pax_global_header00006660000000000000000000000064147777231720014533gustar00rootroot0000000000000052 comment=164581299fd4ffc8e96feadf21b54ff2eb7d98b9 kombu-5.5.3/000077500000000000000000000000001477772317200126625ustar00rootroot00000000000000kombu-5.5.3/.bumpversion.cfg000066400000000000000000000005241477772317200157730ustar00rootroot00000000000000[bumpversion] current_version = 5.5.3 commit = True tag = True parse = (?P\d+)\.(?P\d+)\.(?P\d+)(?P[a-z]+)? serialize = {major}.{minor}.{patch}{releaselevel} {major}.{minor}.{patch} [bumpversion:file:kombu/__init__.py] [bumpversion:file:docs/includes/introduction.txt] [bumpversion:file:README.rst] kombu-5.5.3/.cookiecutterrc000066400000000000000000000005731477772317200157150ustar00rootroot00000000000000default_context: email: 'ask@celeryproject.org' full_name: 'Ask Solem' github_username: 'celery' project_name: 'Kombu' project_short_description: 'Messaging library for Python.' project_slug: 'kombu' version: '1.0.0' year: '2009-2016' kombu-5.5.3/.coveragerc000066400000000000000000000013721477772317200150060ustar00rootroot00000000000000[run] branch = 1 cover_pylib = 0 include=*kombu/* omit = kombu.tests.* [report] omit = */python?.?/* */site-packages/* */pypy/* *kombu/async/http/urllib3_client.py *kombu/five.py *kombu/transport/mongodb.py *kombu/transport/filesystem.py *kombu/transport/sqlalchemy/* *kombu/utils.compat.py *kombu/utils/eventio.py *kombu/async/debug.py *kombu/transport/amqplib.py *kombu/transport/couchdb.py *kombu/transport/beanstalk.py *kombu/transport/sqlalchemy.py *kombu/transport/zookeeper.py *kombu/transport/zmq.py *kombu/transport/django.py *kombu/transport/pyro.py *kombu/transport/azurestoragequeues.py *kombu/transport/qpid* exclude_lines = pragma: no cover for infinity kombu-5.5.3/.editorconfig000066400000000000000000000003151477772317200153360ustar00rootroot00000000000000# http://editorconfig.org root = true [*] indent_style = space indent_size = 4 trim_trailing_whitespace = true insert_final_newline = true charset = utf-8 end_of_line = lf [Makefile] indent_style = tab kombu-5.5.3/.github/000077500000000000000000000000001477772317200142225ustar00rootroot00000000000000kombu-5.5.3/.github/FUNDING.yml000066400000000000000000000003231477772317200160350ustar00rootroot00000000000000# These are supported funding model platforms patreon: auvipy open_collective: celery ko_fi: # Replace with a single Ko-fi username tidelift: "pypi/kombu" custom: # Replace with a single custom sponsorship URL kombu-5.5.3/.github/dependabot.yml000066400000000000000000000003131477772317200170470ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" - package-ecosystem: "pip" directory: "/" schedule: interval: "daily" kombu-5.5.3/.github/opencollective.yml000066400000000000000000000014331477772317200177610ustar00rootroot00000000000000collective: celery tiers: - tiers: '*' labels: ['Backer ❤️'] message: 'Hey . Thank you for supporting the project!:heart:' - tiers: ['Basic Sponsor', 'Sponsor', 'Silver Sponsor', 'Gold Sponsor'] labels: ['Sponsor ❤️'] message: | Thank you for sponsoring the project!:heart::heart::heart: Resolving this issue is one of our top priorities. One of @celery/core-developers will triage it shortly. invitation: | Hey :wave:, Thank you for opening an issue. We will get back to you as soon as we can. Also, check out our [Open Collective]() and consider backing us - every little helps! We also offer priority support for our sponsors. If you require immediate assistance please consider sponsoring us. kombu-5.5.3/.github/tidelift.yml000066400000000000000000000007571477772317200165620ustar00rootroot00000000000000name: Tidelift Alignment on: push: jobs: build: name: Run Tidelift to ensure approved open source packages are in use runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 - name: Alignment uses: tidelift/alignment-action@main env: TIDELIFT_API_KEY: ${{ secrets.TIDELIFT_API_KEY }} TIDELIFT_ORGANIZATION: ${{ secrets.TIDELIFT_ORGANIZATION }} TIDELIFT_PROJECT: ${{ secrets.TIDELIFT_PROJECT }} kombu-5.5.3/.github/workflows/000077500000000000000000000000001477772317200162575ustar00rootroot00000000000000kombu-5.5.3/.github/workflows/codeql-analysis.yml000066400000000000000000000044071477772317200220770ustar00rootroot00000000000000# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ main ] pull_request: # The branches below must be a subset of the branches above branches: [ main ] jobs: analyze: name: Analyze runs-on: blacksmith-4vcpu-ubuntu-2204 permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: [ 'python' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # Learn more about CodeQL language support at https://git.io/codeql-language-support steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 kombu-5.5.3/.github/workflows/linter.yml000066400000000000000000000021161477772317200202770ustar00rootroot00000000000000name: Linter on: [pull_request, workflow_dispatch] jobs: pre-commit: runs-on: blacksmith-4vcpu-ubuntu-2204 steps: - name: Checkout branch uses: actions/checkout@v4 - name: Run pre-commit uses: pre-commit/action@v3.0.1 lint: runs-on: blacksmith-4vcpu-ubuntu-2204 strategy: matrix: python-version: ["3.13"] steps: - name: Install system packages run: sudo apt-get update && sudo apt-get install libssl-dev - name: Check out code from GitHub uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} id: python uses: useblacksmith/setup-python@v6 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: pip install --upgrade pip wheel tox tox-docker - name: Run flake8 run: tox -v -e flake8 -- -v - name: Run pydocstyle run: tox -v -e pydocstyle -- -v - name: Run apicheck run: tox -v -e apicheck -- -v - name: Run mypy run: tox -v -e mypy -- -v kombu-5.5.3/.github/workflows/python-package.yml000066400000000000000000000072471477772317200217260ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: Kombu on: push: branches: [ 'main'] paths: - '**.py' - '**.txt' - '.github/workflows/python-package.yml' - '**.toml' - "tox.ini" pull_request: branches: [ 'main' ] paths: - '**.py' - '**.txt' - '**.toml' - '.github/workflows/python-package.yml' - "tox.ini" workflow_dispatch: permissions: contents: read # to fetch code (actions/checkout) jobs: Unit: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] os: ["blacksmith-4vcpu-ubuntu-2204"] steps: - name: Install apt packages if: startsWith(matrix.os, 'blacksmith-4vcpu-ubuntu') run: sudo apt-get update && sudo apt-get install libssl-dev - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: useblacksmith/setup-python@v6 with: python-version: ${{ matrix.python-version }} allow-prereleases: true cache: 'pip' cache-dependency-path: '**/setup.py' - name: Install tox run: python -m pip install --upgrade pip wheel tox tox-docker - name: > Run tox for "${{ matrix.python-version }}-unit" timeout-minutes: 5 run: > tox --verbose --verbose -e "${{ matrix.python-version }}-unit" -vv - uses: codecov/codecov-action@v5 with: fail_ci_if_error: false # optional (default = false) token: ${{ secrets.CODECOV_TOKEN }} verbose: true # optional (default = false) Integration: needs: - Unit if: needs.Unit.result == 'success' runs-on: blacksmith-4vcpu-ubuntu-2204 strategy: fail-fast: false matrix: python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] toxenv: [ 'py-amqp', 'py-redis', 'py-mongodb', 'py-kafka' ] experimental: [false] include: - python-version: pypy3.10 toxenv: 'py-amqp' experimental: true - python-version: pypy3.10 toxenv: 'py-redis' experimental: true - python-version: pypy3.10 toxenv: 'py-mongodb' experimental: true # - python-version: pypy3.10 # toxenv: 'py-kafka' # experimental: true steps: - name: Install apt packages run: sudo apt-get update && sudo apt-get install libssl-dev - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: useblacksmith/setup-python@v6 with: python-version: ${{ matrix.python-version }} allow-prereleases: true cache: 'pip' cache-dependency-path: '**/setup.py' - name: Install tox run: python -m pip install --upgrade pip wheel tox tox-docker - name: > Run tox for "${{ matrix.python-version }}-linux-integration-${{ matrix.toxenv }}" timeout-minutes: 30 run: > tox --verbose --verbose -e "${{ matrix.python-version }}-linux-integration-${{ matrix.toxenv }}" -vv kombu-5.5.3/.github/workflows/semgrep.yml000066400000000000000000000007451477772317200204520ustar00rootroot00000000000000on: pull_request: {} push: branches: - main - master paths: - .github/workflows/semgrep.yml schedule: # random HH:MM to avoid a load spike on GitHub Actions at 00:00 - cron: 41 19 * * * name: Semgrep jobs: semgrep: name: Scan runs-on: blacksmith-4vcpu-ubuntu-2204 env: SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} container: image: returntocorp/semgrep steps: - uses: actions/checkout@v4 - run: semgrep ci kombu-5.5.3/.gitignore000066400000000000000000000006401477772317200146520ustar00rootroot00000000000000.DS_Store *.pyc *$py.class *~ *.sqlite *.sqlite-journal settings_local.py build/ .build/ _build/ .*.sw* dist/ *.egg-info pip-log.txt devdatabase.db ^parts ^eggs ^bin developer-eggs downloads Documentation/* .tox/ nosetests.xml kombu/tests/cover kombu/tests/coverage.xml .coverage dump.rdb .idea/ .vscode/ .cache/ .pytest_cache/ htmlcov/ test.db coverage.xml venv/ env .eggs .python-version .coverage.* control/ .env kombu-5.5.3/.pre-commit-config.yaml000066400000000000000000000021611477772317200171430ustar00rootroot00000000000000repos: - repo: https://github.com/asottile/pyupgrade rev: v3.19.1 hooks: - id: pyupgrade args: ["--py38-plus"] - repo: https://github.com/PyCQA/autoflake rev: v2.3.1 hooks: - id: autoflake args: ["--in-place", "--ignore-pass-after-docstring", "--imports"] - repo: https://github.com/PyCQA/flake8 rev: 7.1.1 hooks: - id: flake8 - repo: https://github.com/asottile/yesqa rev: v1.5.0 hooks: - id: yesqa - repo: https://github.com/codespell-project/codespell rev: v2.3.0 hooks: - id: codespell # See pyproject.toml for args args: [--toml, pyproject.toml, --write-changes] additional_dependencies: - tomli - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: check-merge-conflict - id: check-toml - id: check-yaml - id: mixed-line-ending - repo: https://github.com/pycqa/isort rev: 5.13.2 hooks: - id: isort - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.14.1 hooks: - id: mypy pass_filenames: false kombu-5.5.3/.readthedocs.yaml000066400000000000000000000011471477772317200161140ustar00rootroot00000000000000# Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the version of Python and other tools you might need build: os: ubuntu-20.04 tools: python: "3.9" # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/conf.py # If using Sphinx, optionally build your docs in additional formats such as PDF # formats: # - pdf # Optionally declare the Python requirements required to build your docs python: install: - method: pip path: . - requirements: requirements/docs.txt kombu-5.5.3/AUTHORS000066400000000000000000000130101477772317200137250ustar00rootroot00000000000000========= AUTHORS ========= :order: sorted (`.,$!sort -uf`) Adam Gaca Adam Nelson Adam Wentz Alan Justino Alex Koshelev Alexandre Bourget Anastasis Andronidis Andrew Watts Andrey Antukh Andrii Kostenko Andy McCurdy Anthony Lukach Antoine Legrand Anton Gyllenberg Ask Solem Asif Saif Uddin Basil Mironenko Bobby Beever Brian Bernstein Brian Bouterse Bruno Alla C Anthony Risinger Chris Erway Christophe Chauvet Christopher Duryee Christopher Grebs Clay Gerrard Corentin Ardeois Dan LaMotte Dan McGee Dane Guempel Davanum Srinivas David Clymer David Gelvin David Strauss David Ziegler Dhananjay Nene Dima Kurguzov Dmitry Malinovsky Dustin J. Mitchell Emmanuel Cazenave Ephemera Eric Reynolds Fabrice Rabaute Federico Ficarelli Felix Schwarz Felix Yan Fernando Jorge Mota Flavio [FlaPer87] Percoco Premoli Florian Munz Franck Cuny Gábor Boros Germán M. Bravo Gregory Haskins Hank John haridsv Hong Minhee Hunter Fernandes Ian Eure Ian Struble Ionel Maries Cristian iSlava James Saryerwinnie James Turk Jason Cater Jasper Bryant-Greene Jeff Balogh Jesper Thomschütz Jesse Dhillon John Shuping John Spray John Watson Jonathan Halcrow Joseph Crosland Joshua Harlow Juan Carlos Ferrer Kai Groner Keith Fitzgerald Kevin Fox Kevin McCarthy Kevin McDonald Latitia M. Haskins Len Buckens Lorenzo Mancini Luyun Xie <2304310@qq.com> Mads Jensen Mahendra M Manuel Vazquez Acosta Marcin Lulek (ergo) Marcin Puhacz Mark Lavin markow Matt Wise Maxime Rouyrre mdk Mher Movsisyan Michael Barrett Michael Nelson Nathan Van Gheem Nicolas Mota Nitzan Miron Noah Kantrowitz Ollie Walsh Pascal Hartig Patrick Schneider Paul McLanahan Petar Radosevic Peter Hoffmann Pierre Riteau Radek Czajka Rafael Duran Castaneda Rafal Malinowski Ralf Nyren Randy Barlow Raphael Michel Rob Ottaway Robert Kopaczewski Roger Hu Rumyana Neykova Rune Halvorsen Ryan Petrello Sam Stavinoha Sascha Peilicke Scott Lyons Sean Bleier Sean Creeley Seb Insua Sergey Azovskov Sergey Tikhonov Shane Caraveo Steeve Morin Stefan Eletzhofer Stephan Jaekel Stephen Day Stuart Axon Tareque Hossain Thomas Johansson Tobias Schottdorf Tomaž Muraus Tommie McAfee Travis Cline Travis Swicegood Victor Garcia Viet Hung Nguyen Vinay Karanam Vince Gonzalez Vincent Driessen Wido den Hollander Zach Smith Zhao Xiaohong Arcadiy Ivanov kombu-5.5.3/Changelog.rst000066400000000000000000004574501477772317200153220ustar00rootroot00000000000000.. _changelog: ================ Change history ================ .. _version-5.5.3: 5.5.3 ===== :release-date: 16 Apr, 2025 :release-by: Tomer Nosrati What's Changed ~~~~~~~~~~~~~~ - Fix error of double normalization options from URI in mongodb transport (#2282) - Move use of typing_extensions into TYPE_CHECKING block (#2283) - Do not pin package tzdata anymore (#2274) - Typo in pools's documentation (#2285) - Prepare for release: v5.5.3 (#2288) .. _version-5.5.2: 5.5.2 ===== :release-date: 31 Mar, 2025 :release-by: Tomer Nosrati What's Changed ~~~~~~~~~~~~~~ - Bump tzdata from 2025.1 to 2025.2 (#2268) - Downgrad kafka version to make the CI green again (#2271) - Attempt to fix retry loop in `maybe_declare()` when broken connection (#2275) - Prepare for release: v5.5.2 (#2278) .. _version-5.5.1: 5.5.1 ===== :release-date: 24 Mar, 2025 :release-by: Tomer Nosrati What's Changed ~~~~~~~~~~~~~~ - Update trove classifier with missing python 3.13 (#2262) - Fix native_delayed_delivery for queue with explicit bindings (#2263) - SQS: Fix Query Protocol Content-Type header (#2266) - SQS: More Protocol Tests (#2267) - Prepare for release: v5.5.1 (#2270) .. _version-5.5.0: 5.5.0 ===== :release-date: 13 Mar, 2025 :release-by: Tomer Nosrati Key Highlights ~~~~~~~~~~~~~~ Native Delayed Delivery ----------------------- Official support to `RabbitMQ Delayed Delivery `_, which is required to enable ETA tasks with quorum queues in Celery. urllib3 instead of curl ----------------------- We can finally say goodbye to the :pypi:`pycurl` dependency and use :pypi:`urllib3` instead. Transport: Google Pub/Sub ------------------------- New support for Google Pub/Sub as a transport broker. What's Changed ~~~~~~~~~~~~~~ - Pin tzdata to latest version 2024.2 (#2121) - Refactored CI (#2122) - Added unit tests to "tox -e parallel" environment (#2123) - Improve pre-commit checks and fix all new lint errors (#2124) - Removed codecov flags (#2126) - Pin pre-commit to latest version 3.8.0 (#2125) - Limit redis-py <5.1.0 (#2132) - Removed "on push" from the linter GitHub Actions workflow (#2133) - Bump pymongo from 4.9.1 to 4.10.0 (#2130) - Update pymongo to 4.10.1 (#2136) - fix(pytest): skip file-lock test on Windows (#2138) - Apply errback and callback when retry occurs (#2129) - Pin pre-commit to latest version 4.0.0 (Python 3.9+) (#2140) - Pin pre-commit to latest version 4.0.1 (#2144) - Add native delayed delivery API to kombu (#2128) - Add support for Google Pub/Sub as transport broker (#2147) - Update the transport options according to the retry policy (#2148) - Feature: urllib3 instead of curl (#2134) - Update mypy to 1.12.0 (#2149) - Prepare for (pre) release: v5.5.0rc1 (#2150) - Added missing changelog highlight for Google Pub/Sub (#2151) - Bump grpcio from 1.66.2 to 1.67.0 (#2158) - Fix: restrict google-cloud-pubsub version (#2160) - Update mypy to 1.12.1 (#2164) - Added a unit test that proves timeout is used when retry policy is specified (#2166) - Fix regression from #2148 (#2165) - Update google-cloud-pubsub requirement from <=2.20.3,>=2.18.4 to >=2.18.4,<=2.25.2 (#2168) - Revert "Update google-cloud-pubsub requirement from <=2.20.3,>=2.18.4 to >=2.18.4,<=2.25.2" (#2169) - Update mypy to 1.13.0 (#2172) - Fix: restrict google protobuf version (#2175) - Add support for Python 3.13 (#2052) - Apply retry policy to maybe_declare() (#2174) - Pin redis to >=4.5.2,!=4.5.5,!=5.0.2,<=5.2.0 (#2178) - Added confirm_timeout argument to publish() (#2167) - Prepare for (pre) release: v5.5.0rc2 (#2181) - Bump pytest-cov from 5.0.0 to 6.0.0 (#2183) - Add documentation for debug logs environment variables (#2186) - Add documentation for py-amqp transport options (#2187) - Bump codecov/codecov-action from 4 to 5 (#2189) - Update pytest to 8.3.4 (#2196) - Update redis requirement from !=4.5.5,!=5.0.2,<=5.2.0,>=4.5.2 to >=4.5.2,!=4.5.5,!=5.0.2,<=5.2.1 (#2203) - Close connections in case of an exception (#2201) - Bump pytest-freezer from 0.4.8 to 0.4.9 (#2205) - Bump mypy from 1.13.0 to 1.14.0 (#2211) - fix(sqs): don't crash on multiple predefined queues with aws sts session (#2224) - Bump mypy from 1.14.0 to 1.14.1 (#2215) - Bump tzdata from 2024.2 to 2025.1 (#2230) - fix: interpret the ssl_check_hostname as a boolean (#2229) - Revert "Resolving TypeError, during version unpacking " (#2225) - Bump flake8 from 7.1.1 to 7.1.2 (#2244) - Prepare for (pre) release: v5.5.0rc3 (#2247) - Bump pytest from 8.3.4 to 8.3.5 (#2253) - Update delayed_infra example for better clarity (#2252) - SQS: Support Async JSON SQS Protocol & Message Attributes (#2226) - Prepare for release: v5.5.0 (#2255) Changes since 5.5.0rc3 ---------------------- - Bump pytest from 8.3.4 to 8.3.5 (#2253) - Update delayed_infra example for better clarity (#2252) - SQS: Support Async JSON SQS Protocol & Message Attributes (#2226) .. _version-5.5.0rc3: 5.5.0rc3 ======== :release-date: 20 Feb, 2025 :release-by: Tomer Nosrati Key Highlights ~~~~~~~~~~~~~~ Native Delayed Delivery ----------------------- Official support to `RabbitMQ Delayed Delivery `_, which is required to enable ETA tasks with quorum queues in Celery. urllib3 instead of curl ----------------------- We can finally say goodbye to the :pypi:`pycurl` dependency and use :pypi:`urllib3` instead. Transport: Google Pub/Sub ------------------------- New support for Google Pub/Sub as a transport broker. What's Changed ~~~~~~~~~~~~~~ - Bump pytest-cov from 5.0.0 to 6.0.0 (#2183) - Add documentation for debug logs environment variables (#2186) - Add documentation for py-amqp transport options (#2187) - Bump codecov/codecov-action from 4 to 5 (#2189) - Update pytest to 8.3.4 (#2196) - Update redis requirement from !=4.5.5,!=5.0.2,<=5.2.0,>=4.5.2 to >=4.5.2,!=4.5.5,!=5.0.2,<=5.2.1 (#2203) - Close connections in case of an exception (#2201) - Bump pytest-freezer from 0.4.8 to 0.4.9 (#2205) - Bump mypy from 1.13.0 to 1.14.0 (#2211) - fix(sqs): don't crash on multiple predefined queues with aws sts session (#2224) - Bump mypy from 1.14.0 to 1.14.1 (#2215) - Bump tzdata from 2024.2 to 2025.1 (#2230) - fix: interpret the ssl_check_hostname as a boolean (#2229) - Revert "Resolving TypeError, during version unpacking " (#2225) - Bump flake8 from 7.1.1 to 7.1.2 (#2244) - Prepare for (pre) release: v5.5.0rc3 (#2247) .. _version-5.5.0rc2: 5.5.0rc2 ======== :release-date: 29 Oct, 2024 :release-by: Tomer Nosrati Key Highlights ~~~~~~~~~~~~~~ Native Delayed Delivery ----------------------- Official support to `RabbitMQ Delayed Delivery `_, which is required to enable ETA tasks with quorum queues in Celery. urllib3 instead of curl ----------------------- We can finally say goodbye to the :pypi:`pycurl` dependency and use :pypi:`urllib3` instead. Transport: Google Pub/Sub ------------------------- New support for Google Pub/Sub as a transport broker. What's Changed ~~~~~~~~~~~~~~ - Added missing changelog highlight for Google Pub/Sub (#2151) - Bump grpcio from 1.66.2 to 1.67.0 (#2158) - Fix: restrict google-cloud-pubsub version (#2160) - Update mypy to 1.12.1 (#2164) - Added a unit test that proves timeout is used when retry policy is specified (#2166) - Fix regression from #2148 (#2165) - Update google-cloud-pubsub requirement from <=2.20.3,>=2.18.4 to >=2.18.4,<=2.25.2 (#2168) - Revert "Update google-cloud-pubsub requirement from <=2.20.3,>=2.18.4 to >=2.18.4,<=2.25.2" (#2169) - Update mypy to 1.13.0 (#2172) - Fix: restrict google protobuf version (#2175) - Add support for Python 3.13 (#2052) - Apply retry policy to maybe_declare() (#2174) - Pin redis to >=4.5.2,!=4.5.5,!=5.0.2,<=5.2.0 (#2178) - Added confirm_timeout argument to publish() (#2167) - Prepare for (pre) release: v5.5.0rc2 (#2181) .. _version-5.5.0rc1: 5.5.0rc1 ======== :release-date: 14 Oct, 2024 :release-by: Tomer Nosrati Key Highlights ~~~~~~~~~~~~~~ Native Delayed Delivery ----------------------- Official support to `RabbitMQ Delayed Delivery `_, which is required to enable ETA tasks with quorum queues in Celery. urllib3 instead of curl ----------------------- We can finally say goodbye to the :pypi:`pycurl` dependency and use :pypi:`urllib3` instead. Transport: Google Pub/Sub ------------------------- New support for Google Pub/Sub as a transport broker. What's Changed ~~~~~~~~~~~~~~ - Pin tzdata to latest version 2024.2 (#2121) - Refactored CI (#2122) - Added unit tests to "tox -e parallel" environment (#2123) - Improve pre-commit checks and fix all new lint errors (#2124) - Removed codecov flags (#2126) - Pin pre-commit to latest version 3.8.0 (#2125) - Limit redis-py <5.1.0 (#2132) - Removed "on push" from the linter GitHub Actions workflow (#2133) - Bump pymongo from 4.9.1 to 4.10.0 (#2130) - Update pymongo to 4.10.1 (#2136) - fix(pytest): skip file-lock test on Windows (#2138) - Apply errback and callback when retry occurs (#2129) - Pin pre-commit to latest version 4.0.0 (Python 3.9+) (#2140) - Pin pre-commit to latest version 4.0.1 (#2144) - Add native delayed delivery API to kombu (#2128) - Add support for Google Pub/Sub as transport broker (#2147) - Update the transport options according to the retry policy (#2148) - Feature: urllib3 instead of curl (#2134) - Update mypy to 1.12.0 (#2149) - Prepare for (pre) release: v5.5.0rc1 (#2150) .. _version-5.4.2: 5.4.2 ===== :release-date: 19 Sep, 2024 :release-by: Tomer Nosrati Fixed issue with SQS #2113 - tzdata is required for machines without IANA database (#2107) - blacksmith.sh: Migrate workflows to Blacksmith (#2112) - Revert "Validate SQS SSL certs by default" (#2114) - Update pymongo to 4.9 (#2115) - Bump pymongo from 4.9 to 4.9.1 (#2118) - Prepare for release: v5.4.2 (#2119) .. _version-5.4.1: 5.4.1 ===== :release-date: 11 Sep, 2024 :release-by: Tomer Nosrati - Update mypy to 1.11.2 (#2101) - SQS: Fixes #2091 queue_delete() method doesn't actually delete the queue (#2099) - Validate SQS SSL certs by default (#2094) - Resolving TypeError, during version unpacking (#2098) - Revert: allow users to disable broker heartbeats by not providing a timeout (#2097, #2016) (#2104) - Update msgpack to 1.1.0 (#2105) - Update pytest to 8.3.3 (#2108) - Prepare for release: v5.4.1 (#2109) .. _version-5.4.0: 5.4.0 ===== :release-date: 6 August, 2024 :release-by: Tomer Nosrati We want to add a special thanks to contribution `#2007 `_ by @awmackowiak for fixing the Redis reconnection bug. Restoring Redis stability has been an essential improvement - thank you! The rest of the changes are listed below. Changes ------- - fix: Fanout exchange messages mixed across virtual databases in Redis sentinel (#1986) - Pin pymongo to latest version 4.7.2 (#1994) - enable/fix test_etcd.py (resolves #2001) (#2002) - Limit requests<2.32.0 due to docker-py issue 3256 (#2011) - enhance: allow users to disable broker heartbeats (#1998) - enhance: allow uses to disable broker heartbeats by not providing a timeout (#1997, #1998) (#2016) - chore(typing): annotate `utils/debug.py` (#1714) - ConnectionPool can't be used after .resize(..., reset=True) (resolves #2018) (#2024) - Fix Redis connections after reconnect - consumer starts consuming the tasks after crash (#2007) - Add support for mongodb+srv scheme (#1976) - Added Changelog for v5.4.0rc1 (#2033) - Fixed bumpversion bug with RC versions (#2034) - Fix typo in README.rst (#2036) - Reverted limiting requests<2.32.0 in requirements/default.txt but kept in tox.ini due to docker-py issue 3256 (#2041) - Redis transport - Redelivered messages should respect the original priority (#2026) - Exclude Unit 3.9 from CI (#2046) - Fixed CI error from excluding Python 3.9 unit tests (#2047) - Fixed flaky integration test: test_publish_requeue_consume() (#2048) - fix: don't crash on `properties`.`body_encoding`: `utf-8` (#1690) - chore: handle kafka transport with confluentkafka ✨ (#1574) - Revert "Exclude Unit 3.9 from CI #2046" (#2054) - fix azure service bus isinstance checks when None (#2053) - Added Changelog for v5.4.0rc2 (#2056) - Fixed typo in Changelog for v5.4.0rc2 (#2057) - Use logging.Logger.warning (#2058) - SQS: add support for passing MessageAttributes (#2059) - Added Changelog for v5.4.0rc3 (#2064) - Prepare for release: v5.4.0 (#2095) Dependencies Updates -------------------- - Update mypy to 1.10.0 (#1988) - Update pytest to 8.2.0 (#1990) - Bump pytest from 8.2.0 to 8.2.1 (#2005) - Pin typing_extensions to latest version 4.12.1 (#2017) - Bump pytest from 8.2.1 to 8.2.2 (#2021) - Bump pymongo from 4.7.2 to 4.7.3 (#2022) - Update flake8 to 7.1.0 (#2028) - Bump mypy from 1.10.0 to 1.10.1 (#2039) - Bump pymongo from 4.7.3 to 4.8.0 (#2044) - Pin zstandard to latest version 0.23.0 (#2060) - Update mypy to 1.11.0 (#2062) - Update pytest to 8.3.1 (#2063) - Update typing_extensions to 4.12.2 (#2066) - Pin vine to latest version 5.1.0 (#2067) - Update pytest to 8.3.2 (#2076) - Pin codecov to latest version 2.1.13 (#2084) - Pin pytest-freezer to latest version 0.4.8 (#2085) - Pin msgpack to latest version 1.0.8 (#2080) - Pin python-consul2 to latest version 0.1.5 (#2078) - Pin pycouchdb to latest version 1.16.0 (#2079) - Pin bumpversion to latest version 0.6.0 (#2083) - Pin kazoo to latest version 2.10.0 (#2082) - Pin pyro4 to latest version 4.82 (#2081) - Bump mypy from 1.11.0 to 1.11.1 (#2087) - Bump flake8 from 7.1.0 to 7.1.1 (#2090) Changes since 5.4.0rc3 ---------------------- - Update typing_extensions to 4.12.2 (#2066) - Pin vine to latest version 5.1.0 (#2067) - Update pytest to 8.3.2 (#2076) - Pin codecov to latest version 2.1.13 (#2084) - Pin pytest-freezer to latest version 0.4.8 (#2085) - Pin msgpack to latest version 1.0.8 (#2080) - Pin python-consul2 to latest version 0.1.5 (#2078) - Pin pycouchdb to latest version 1.16.0 (#2079) - Pin bumpversion to latest version 0.6.0 (#2083) - Pin kazoo to latest version 2.10.0 (#2082) - Pin pyro4 to latest version 4.82 (#2081) - Bump mypy from 1.11.0 to 1.11.1 (#2087) - Bump flake8 from 7.1.0 to 7.1.1 (#2090) - Prepare for release: v5.4.0 (#2095) .. _version-5.4.0rc3: 5.4.0rc3 ======== :release-date: 22 July, 2024 :release-by: Tomer Nosrati - Fixed typo in Changelog for v5.4.0rc2 (#2057) - Use logging.Logger.warning (#2058) - Pin zstandard to latest version 0.23.0 (#2060) - Update mypy to 1.11.0 (#2062) - Update pytest to 8.3.1 (#2063) - SQS: add support for passing MessageAttributes (#2059) .. _version-5.4.0rc2: 5.4.0rc2 ======== :release-date: 11 July, 2024 :release-by: Tomer Nosrati The ``requests`` package is no longer limited to <2.32.0 per #2041. Contribution #2007 by @awmackowiak was confirmed to have solved the Redis reconnection bug. - Bump mypy from 1.10.0 to 1.10.1 (#2039) - Fix typo in README.rst (#2036) - Reverted limiting requests<2.32.0 in requirements/default.txt but kept in tox.ini due to docker-py issue 3256 (#2041) - Redis transport - Redelivered messages should respect the original priority (#2026) - Exclude Unit 3.9 from CI (#2046) - Fixed CI error from excluding Python 3.9 unit tests (#2047) - Fixed flaky integration test: test_publish_requeue_consume() (#2048) - Bump pymongo from 4.7.3 to 4.8.0 (#2044) - fix: don't crash on `properties`.`body_encoding`: `utf-8` (#1690) - chore: handle kafka transport with confluentkafka ✨ (#1574) - Revert "Exclude Unit 3.9 from CI #2046" (#2054) - fix azure service bus isinstance checks when None (#2053) .. _version-5.4.0rc1: 5.4.0rc1 ======== :release-date: 22 June, 2024 :release-by: Tomer Nosrati We want to add a special thanks to contribution #2007 by @awmackowiak for fixing the Redis reconnection bug. This release candidate aims to allow the community to test the changes and provide feedback. Please let us know if Redis is stable again! New: #1998, #2016, #2024, #1976 The rest of the changes are bug fixes and dependency updates. Lastly, ``requests`` is limited to <2.32.0 per #2011. - Update mypy to 1.10.0 (#1988) - Update pytest to 8.2.0 (#1990) - fix: Fanout exchange messages mixed across virtual databases in Redis sentinel (#1986) - Pin pymongo to latest version 4.7.2 (#1994) - enable/fix test_etcd.py (resolves #2001) (#2002) - Bump pytest from 8.2.0 to 8.2.1 (#2005) - Limit requests<2.32.0 due to docker-py issue 3256 (#2011) - enhance: allow users to disable broker heartbeats (#1998) - enhance: allow uses to disable broker heartbeats by not providing a timeout (#1997,#1998) (#2016) - Pin typing_extensions to latest version 4.12.1 (#2017) - chore(typing): annotate `utils/debug.py` (#1714) - Bump pytest from 8.2.1 to 8.2.2 (#2021) - Bump pymongo from 4.7.2 to 4.7.3 (#2022) - ConnectionPool can't be used after .resize(..., reset=True) (resolves #2018) (#2024) - Fix Redis connections after reconnect - consumer starts consuming the tasks after crash. (#2007) - Update flake8 to 7.1.0 (#2028) - Add support for mongodb+srv scheme (#1976) .. _version-5.3.7: 5.3.7 ===== :release-date: 11 April, 2024 :release-by: Tomer Nosrati The release of v5.3.6 was missing the bumbversion commit so v5.3.7 is only released to sync it back. .. _version-5.3.6: 5.3.6 ===== :release-date: 27 Mar, 2024 :release-by: Tomer Nosrati - boto3>=1.26.143 (#1890) - Always convert azureservicebus namespace to fully qualified (#1892) - Pin pytest-sugar to latest version 1.0.0 (#1912) - Upgrade to pytest v8 that removed nose compatibility (#1914) - fix warning for usage of utcfromtimestamp (#1926) - Update pytest to 8.0.2 (#1942) - Hotfix: Fix CI failures (limit redis to <5.0.2 instead of <6.0.0) (#1961) - Expose cancel callback from py-amqp channel.basic_consume (#1953) - Update mypy to 1.9.0 (#1963) - Update pytest to 8.1.1 (#1965) - Pin hypothesis to hypothesis<7 (#1966) - redis>=4.5.2,<5.0.2,!=4.5.5 -> redis>=4.5.2,!=5.0.2,!=4.5.5 (#1969) - add escape hatch for custom JSON serialization (#1955) - Pin pytest-cov to latest version 5.0.0 (#1972) .. _version-5.3.5: 5.3.5 ===== :release-date: 12 Jan, 2024 :release-by: Tomer Nosrati - Fix ReadTheDocs CI (#1827). - fix(docs): add Redis to the list of transports where SSL is supported (#1826). - Fixed Improper Method Call: Replaced `mktemp` (#1828). - Bump actions/setup-python from 4 to 5 (#1829). - Bump github/codeql-action from 2 to 3 (#1832). - fix: freeze set during ticks iter in async hub (#1830). - azure service bus: fix TypeError when using Managed Identities (#1825). - Fix unacknowledge typo in restore_visible() (#1839). - Changed pyup -> dependabot for updating dependencies (#1842). - Bump pytest from 7.4.3 to 7.4.4 (#1843). - Bump flake8 from 6.0.0 to 7.0.0 (#1845). - Bump mypy from 1.3.0 to 1.8.0 (#1844). - Fix crash when using global_keyprefix with a sentinel connection (#1838) - Fixed version_dev in docs/conf.py (#1875). .. _version-5.3.4: 5.3.4 ===== :release-date: 16 Nov, 2023 :release-by: Asif Saif Uddin - Use the correct protocol for SQS requests (#1807). .. _version-5.3.3: 5.3.3 ===== :release-date: 6 Nov, 2023 :release-by: Asif Saif Uddin - Raise access denied error when ack. - test redis 5.0.0. - fix azure servicebus using managed identity support (#1801). - Added as_uri method to MongoDB transport - Fixes #1795 (#1796). - Revert "[fix #1726] Use boto3 for SQS async requests (#1759)" (#1799). - Create a lock on cached_property if not present (#1811). - Bump kafka deps versions & fix integration test failures (#1818). - Added Python 3.12 support. - Fix: redis requeue concurrency bug #1800 (#1805). .. _version-5.3.2: 5.3.2 ===== :release-date: 31 Aug, 2023 :release-by: Tomer Nosrati - Reverted unwanted constraint introduced in #1629 with max_retries (#1755) - Doc fix (hotfix for #1755) (#1758) - Python3.12: fix imports in kombu/utils/objects.py (#1756) - [fix #1726] Use boto3 for SQS async requests (#1759) - docs: Remove SimpleQueue import (#1764) - Fixed pre-commit issues (#1773) - azure service bus: add managed identity support (#1641) - fix: Prevent redis task loss when closing connection while in poll (#1733) - Kombu & celery with SQS #222 (#1779) - syntax correction (#1780) .. _version-5.3.1: 5.3.1 ===== :release-date: 15 Jun, 2023 :release-by: Asif Saif Uddin - Update pycurl version. - Declare python 3.11 support (#1425). - Fix: allow deserializing any version of UUID. - Update PyCurl version in SQS (#1747). .. _version-5.3.0: 5.3.0 ===== :release-date: 03 Jun, 2023 :release-by: Asif Saif Uddin - Support for Kafka as transport. - Add fanout to filesystem (#1499). - Added possibility to serialize and deserialize binary messages in json (#1516). - Support pymongo 4.x (#1536). - Support redis-py 4.5.x. - Upgrade Azure Storage Queues transport to version 12 (#1539). - Add support to SQS DelaySeconds (#1567). - Add WATCH to prefixed complex commands. - Avoid losing type of UUID when serializing/deserializing (#1575). - Added HLEN to the list of prefixed redis commands (#1540). - Add managed identity support to azure storage queue (#1631). - Add Support of sqlalchemy v2.0. - Deprecate pytz and use zoneinfo (#1680) .. _version-5.3.0rc2: 5.3.0rc2 ======== :release-date: 31 May, 2023 :release-by: Asif Saif Uddin - add missing zoneinfo dependency (#1732). - Support redis >= 4.5.2 - Loosen urlib3 version range for botocore compat .. _version-5.3.0rc1: 5.3.0rc1 ======== :release-date: 24 May, 2023 :release-by: Asif Saif Uddin - Moved to pytest-freezer (#1683). - Deprecate pytz and use zoneinfo (#1680). - handle keyerror in azureservicebus transport when message is not found in qos and perform basic_ack (#1691). - fix mongodb transport obsolete calls (#1694). - SQS: avoid excessive GetQueueURL calls by using cached queue url (#1621). - Update confluentkafka.txt version (#1727). - Revert back to pyro4 for now. .. _version-5.3.0b3: 5.3.0b3 ======= :release-date: 20 Mar, 2023 :release-by: Asif Saif Uddin - Use SPDX license expression in project metadata. - Allowing Connection.ensure() to retry on specific exceptions given by policy (#1629). - Redis==4.3.4 temporarilly in an attempt to avoid BC (#1634). - Add managed identity support to azure storage queue (#1631). - Support sqla v2.0 (#1651). - Switch to Pyro5 (#1655). - Remove unused _setupfuns from serialization.py. - Refactor: Refactor utils/json (#1659). - Adapt the mock to correctly mock the behaviors as implemented on Python 3.10. (Ref #1663). .. _version-5.3.0b2: 5.3.0b2 ======= :release-date: 19 Oct, 2022 :release-by: Asif Saif Uddin - fix: save QueueProperties to _queue_name_cache instead of QueueClient. - hub: tick delay fix (#1587). - Fix incompatibility with redis in disconnect() (#1589). - Solve Kombu filesystem transport not thread safe. - importlib_metadata remove deprecated entry point interfaces (#1601). - Allow azurestoragequeues transport to be used with Azurite emulator in docker-compose (#1611). .. _version-5.3.0b1: 5.3.0b1 ======= :release-date: 1 Aug, 2022 :release-by: Asif Saif Uddin - Add ext.py files to setup.cfg. - Add support to SQS DelaySeconds (#1567). - Add WATCH to prefixed complex commands. - Avoid losing type of UUID when serializing/deserializing (#1575). - chore: add confluentkafka to extras. .. _version-5.3.0a1: 5.3.0a1 ======= :release-date: 29 Jun, 2022 :release-by: Asif Saif Uddin - Add fanout to filesystem (#1499). - Protect set of ready tasks by lock to avoid concurrent updates. (#1489). - Correct documentation stating kombu uses pickle protocol version 2. - Use new entry_points interface. - Add mypy to the pipeline (#1512). - Added possibility to serialize and deserialize binary messages in json (#1516). - Bump pyupgrade version and add __future__.annotations import. - json.py cleaning from outdated libs (#1533). - bump new py-amqp to 5.1.1 (#1534). - add GitHub URL for PyPi. - Upgrade pytest to ~=7.1.1. - Support pymongo 4.x (#1536). - Initial Kafka support (#1506). - Upgrade Azure Storage Queues transport to version 12 (#1539). - move to consul2 (#1544). - Datetime serialization and deserialization fixed (#1515). - Bump redis>=4.2.2 (#1546). - Update sqs dependencies (#1547). - Added HLEN to the list of prefixed redis commands (#1540). - Added some type annotations. .. _version-5.2.4: 5.2.4 ===== :release-date: 06 Mar, 2022 :release-by: Asif Saif Uddin - Allow getting recoverable_connection_errors without an active transport. - Prevent KeyError: 'purelib' by removing INSTALLED_SCHEME hack from setup.py. - Revert "try pining setuptools (#1466)" (#1481). - Fix issue #789: Async http code not allowing for proxy config (#790). - Fix The incorrect times of retrying. - Set redelivered property for Celery with Redis (#1484). - Remove use of OrderedDict in various places (#1483). - Warn about missing hostname only when default one is available (#1488). - All supported versions of Python define __package__. - Added global_keyprefix support for pubsub clients (#1495). - try pytest 7 (#1497). - Add an option to not base64-encode SQS messages. - Fix SQS extract_task_name message reference. .. _version-5.2.3: 5.2.3 ===== :release-date: 29 Dec, 2021 :release-by: Asif Saif Uddin - Allow redis >= 4.0.2. - Fix PyPy CI jobs. - SQS transport: detect FIFO queue properly by checking queue URL (#1450). - Ensure that restore is atomic in redis transport (#1444). - Restrict setuptools>=59.1.1,<59.7.0. - Bump minimum py-amqp to v5.0.9 (#1462). - Reduce memory usage of Transport (#1470). - Prevent event loop polling on closed redis transports (and causing leak). - Respect connection timeout (#1458) - prevent redis event loop stopping on 'consumer: Cannot connect' (#1477). .. _version-5.2.2: 5.2.2 ===== :release-date: 16 Nov, 2021 :release-by: Asif Saif Uddin - Pin redis version to >= 3.4.1<4.0.0 as it is not fully compatible yet. .. _version-5.2.1: 5.2.1 ===== :release-date: 8 Nov, 2021 :release-by: Asif Saif Uddin - Bump redis version to >= 3.4.1. - try latest sqs dependencies ti fix security warning. - Tests & dependency updates .. _version-5.2.0: 5.2.0 ===== :release-date: 5 Nov, 2021 :release-by: Naomi Elstein - v 1.4.x (#1338). - stop mentioning librabbitmq (#1381). - Merge branch 'master' of https://github.com/celery/kombu - test new pytest version (#1383). - drop python 3.6 from CI (#1382). - Use ANY from unittest instead of case.mock. - Fix missing dependency to redis in docs requirements. - [pre-commit.ci] pre-commit autoupdate. - Remove dependency to case (#1389). - Fix: check redis response type. - [pre-commit.ci] pre-commit autoupdate (#1393). - py3.7+ on setup (#1392). - Prevent caching of oid in pidbox (#1394). - Added unittests for #1394 . - fix flake8 in kombu/asynchronous/aws/connection.py (#1397). - [pre-commit.ci] pre-commit autoupdate. - Fix test_pidbox unittests to support non-linux platforms (#1398). - [pre-commit.ci] pre-commit autoupdate. - removre bdist. - add python 3.10 to CI & fix other issues (#1402). - try to fix CI (#1407). - Dont failfast when pypy3 tests fail (#1408). - Return empty list instead of InconsistencyError when exchange table is empty (#1404). - [pre-commit.ci] pre-commit autoupdate. .. _version-5.2.0rc1: 5.2.0rc1 ======== :release-date: 2021-09-07 7:00 P.M UTC+6:00 :release-by: Asif Saif Uddin - Remove backward compatible code not used anymore (#1344). - Add support for setting redis username (#1351). - Add support for Python 3.9. - Use hostname from URI when server_host is None. - Use Python's built-in json module by default, instead of simplejson. - SQS Channel.predefined_queues should be {} if not defined. - Add global key prefix for keys set by Redis transporter (#1349). - fix: raise BrokenPipeError (#1231). - fix: add missing commands to prefix. - Make BrokerState Transport specific. - Tests & Docs cleanup. .. _version-5.1.0: 5.1.0 ===== :release-date: 2021-05-23 7:00 P.M UTC+3:00 :release-by: Omer Katz - Fix queue names special characters replacement for Azure Service Bus. (#1324) - Add support for SQLAlchemy 1.4. (#1328) - Coerce seconds argument to a floating point number in ``Timer.enter_after``. (#1330) - Add accept parameter to SimpleQueue class. (#1140) - ``prepare_accept_content()`` now raises ``SerializerNotInstalled`` instead of ``KeyError``. (#1343) .. _version-5.1.0b1: 5.1.0b1 ======= :release-date: 2021-04-01 10:30 P.M UTC+6:00 :release-by: Asiff Saif Uddin - Wheels are no longer universal. - Revert "Added redis transport key_prefix from envvars". - Redis Transport: Small improvements of `SentinelChannel` (#1253). - Fix pidbox not using default channels. - Revert "on worker restart - restore visible regardless to time (#905)". - Add vine to dependencies. - Pin urllib3<1.26 to fix failing unittests. - Add timeout to producer publish (#1269). - Remove python2 compatibility code (#1277). - redis: Support Sentinel with SSL. - Support for Azure Service Bus 7.0.0 (#1284). - Allow specifying session token (#1283). - kombu/asynchronous/http/curl: implement _set_timeout. - Disable namedtuple to object feature in simplejson (#1297). - Update to tox docker 2.0. - SQS back-off policy (#1301). - Fixed SQS unittests. - Fix: non kombu json message decoding in SQS transport (#1306). - Add Github Actions CI (#1309). - Update default pickle protocol version to 4 (#1314). - Update connection.py (#1311). - Drop support for the lzma backport. - Drop obsolete code importing pickle (#1315). - Update default login method for librabbitmq and pyamqp (#936). - SQS Broker - handle STS authentication with AWS (#1322). - Min py-amqp version is v5.0.6 (#1325). - Numerous docs & example fixes. - Use a thread-safe implementation of cached_property (#1316). .. _version-5.0.2: 5.0.2 ===== :release-date: 2020-09-06 6:30 P.M UTC+3:00 :release-by: Omer Katz - Bump required amqp version to 5.0.0. .. _version-5.0.1: 5.0.1 ===== :release-date: 2020-08-23 19:10 P.M UTC+3:00 :release-by: Omer Katz - Removed kombu.five from the reference documentation since it no longer exists - Adjusted the stable documentation's version in Sphinx's configuration since that was overlooked in the latest release .. _version-5.0.0: 5.0.0 ===== :release-date: 2020-08-05 16:00 P.M UTC+3:00 :release-by: Omer Katz - **BREAKING CHANGE**: Dropped support for Python 2 (#1232) - Add an SQS transport option for custom botocore config (#1219) .. _version-4.6.11: 4.6.11 ======= :release-date: 2020-06-24 1.15 P.M UTC+6:00 :release-by: Asif Saif Uddin - Revert incompatible changes in #1193 and additional improvements (#1211) - Default_channel should reconnect automatically (#1209) .. _version-4.6.10: 4.6.10 ====== :release-date: 2020-06-03 10.45 A.M UTC+6:00 :release-by: Asif Saif Uddin - Doc improvement. - set _connection in _ensure_connection (#1205) - Fix for the issue #1172 - reuse connection [bug fix] .. _version-4.6.9: 4.6.9 ===== :release-date: 2020-06-01 14.00 P.M UTC+6:00 :release-by: Asif Saif Uddin - Prevent failure if AWS creds are not explicitly defined on predefined. - Raise RecoverableConnectionError in maybe_declare with retry on and. - Fix for the issue #1172 . - possible fix for #1174 . - Fix: make SQLAlchemy Channel init thread-safe - Added integration testing infrastructure for RabbitMQ - Initial redis integration tests implementation - SQLAlchemy transport: Use Query.with_for_update() instead of deprecated - Fix Consumer Encoding - Added Integration tests for direct, topic and fanout exchange types - Added TTL integration tests - Added integration tests for priority queues - fix 100% cpu usage on linux while using sqs - Modified Mutex to use redis LuaLock implementation - Fix: eliminate remaining race conditions from SQLAlchemy Channel - Fix connection imaybe_declare (#1196) - Fix for issue #1198: Celery crashes in cases where there aren’t enough - Ensure connection when connecting to broker - update pyamqp to 2.6 with optional cythonization .. _version-4.6.8: 4.6.8 ===== :release-date: 2020-03-29 20:45 A.M UTC+6:00 :release-by: Asif Saif Uddin - Added support for health_check_interval option in broker_transport_options (#1145) - Added retry_on_timeout parameter to Redis Channel (#1150) - Added support for standard values for ssl_cert_reqs query parameter for Redis (#1139) - Added predefined_queues option to SQS transport (#1156) - Added ssl certificate verification against ca certificates when amqps is used for pyamqp transport (#1151) - Fix issue (#701) where kombu.transport.redis.Mutex is broken in python 3 (#1141) - Fix brop error in Redis Channel (#1144) .. _version-4.6.7: 4.6.7 ===== :release-date: 2019-12-07 20:45 A.M UTC+6:00 :release-by: Asif Saif Uddin - Use importlib.metadata from the standard library on Python 3.8+ (#1086). - Add peek lock settings to be changed using transport options (#1119). - Fix redis health checks (#1122). - Reset ready before execute callback (#1126). - Add missing parameter queue_args in kombu.connection.SimpleBuffer (#1128) .. _version-4.6.6: 4.6.6 ===== :release-date: 2019-11-11 00:15 A.M UTC+6:00 :release-by: Asif Saif Uddin - Revert _lookup_direct and related changes of redis. - Python 3.8 support - Fix 'NoneType' object has no attribute 'can_read' bug of redis transport - Issue #1019 Fix redis transport socket timeout - Add wait timeout settings to receive queue message (#1110) - Bump py-amqp to 2.5.2 .. _version-4.6.5: 4.6.5 ===== :release-date: 2019-09-30 19:30 P.M UTC+6:00 :release-by: Asif Saif Uddin - Revert _lookup api and correct redis implemetnation. - Major overhaul of redis test cases by adding more full featured fakeredis module. - Add more test cases to boost coverage of kombu redis transport. - Refactor the producer consumer test cases to be based on original mocks and be passing - Fix lingering line length issue in test. - Sanitise url when include_password is false - Pinned pycurl to 7.43.0.2 as it is the latest build with wheels provided - Bump py-amqp to 2.5.2 .. _version-4.6.4: 4.6.4 ===== :release-date: 2019-08-14 22:45 P.M UTC+6:00 :release-by: Asif Saif Uddin - Use importlib-metadata instead of pkg_resources for better performance - Allow users to switch URLs while omitting the resource identifier (#1032) - Don't stop receiving tasks on 503 SQS error. (#1064) - Fix maybe declare (#1066) - Revert "Revert "Use SIMEMBERS instead of SMEMBERS to check for queue (Redis Broker) - Fix MongoDB backend to work properly with TTL (#1076) - Make sure that max_retries=0 is treated differently than None (#1080) - Bump py-amqp to 2.5.1 .. _version-4.6.3: 4.6.3 ===== :release-date: 2019-06-15 12:45 A.M UTC+6:00 :release-by: Asif Saif Uddin - Revert FastUUID for kombu 4.6 .. _version-4.6.2: 4.6.2 ===== :release-date: 2019-06-15 12:45 A.M UTC+6:00 :release-by: Asif Saif Uddin - Fix sbugs and regressions .. _version-4.6.1: 4.6.1 ===== :release-date: 2019-06-06 10:30 A.M UTC+6:00 :release-by: Asif Saif Uddin - Fix some newly introduced bug in kombu 4.6 .. _version-4.6.0: 4.6.0 ===== :release-date: 2019-05-30 15:30 P.M UTC+6:00 :release-by: Asif Saif Uddin - Dropped python 3.4 - Bump py-amqp to 2.5.0 - Use SIMEMBERS instead of SMEMBERS to check for queue (redis broker) * Add `_lookup_direct` method to virtual channel. (#994) Add possibility to optimize lookup for queue in direct exchange set. * Add `_lookup_direct` method to redis virtual channel. (#994) Use `SISMEMBER` instead of `SMEMBERS` command to check if queue exists in a set. Time complexity is increased from O(N) to O(1) where N is the set cardinality. Contributed by **Stevan Milic** and **Asif Saif Uddin** - Include priority in properties only if it's not None. Since we attempt to serialize the priority property if it exists in the dictionary it must be an integer. Contributed by **Omer Katz** - Removed dangerous default mutable arguments from function definitions where appropriate. Contributed by **Todd Cook** - Codebase improvements and fixes by: - **Omer Katz** - **Asif Saif Uddin** .. _version-4.5.0: 4.5.0 ===== :release-date: 2019-03-3 18:30 P.M UTC+3:00 :release-by: Omer Katz - The Redis transport now supports a custom separator for keys. Previously when storing a key in Redis which represents a queue we used the hardcored value ``\x06\x16`` separator to store different attributes of the queue in the queue's name. The separator is now configurable using the sep transport option: .. code-block:: python with Connection('redis://', transport_options={ 'sep': ':', }): # ... pass Contributed by **Joris Beckers** - When the SQS server returns a timeout we ignore it and keep trying instead of raising an error. This will prevent Celery from raising an error and hanging. Contributed by **Erwin Rossen** - Properly declare async support for the Qpid transport. If you are using this transport we strongly urge you to upgrade. Contributed by **Rohan McGovern** - Revert `celery/kombu#906 `_ and introduce unique broadcast queue names as an optional keyword argument. If you want each broadcast queue to have a unique name specify `unique=True`: .. code-block:: pycon >>> from kombu.common import Broadcast >>> q = Broadcast(queue='foo', unique=True) >>> q.name 'foo.7ee1ac20-cda3-4966-aaf8-e7a3bb548688' >>> q = Broadcast(queue='foo') >>> q.name 'foo' - Codebase improvements and fixes by: - **Omer Katz** .. _version-4.4.0: 4.4.0 ===== :release-date: 2019-03-3 9:00 P.M UTC+2:00 :release-by: Omer Katz - Restore bz2 import checks in compression module. The checks were removed in `celery/kombu#938 `_ due to assumption that it only affected Jython. However, bz2 support can be missing in Pythons built without bz2 support. Contributed by **Patrick Woods** - Fix regression that occurred in 4.3.0 when parsing Redis Sentinel master URI containing password. Contributed by **Peter Lithammer** - Handle the case when only one Redis Sentinel node is provided. Contributed by **Peter Lithammer** - Support SSL URL parameters correctly for `rediss://`` URIs. Contributed by **Paul Bailey** - Revert `celery/kombu#954 `_. Instead bump the required redis-py dependency to 3.2.0 to include this fix `andymccurdy/redis-py@4e1e748 `_. Contributed by **Peter Lithammer** - Added support for broadcasting using a regular expression pattern or a glob pattern to multiple Pidboxes. Contributed by **Jason Held** .. _version-4.3.0: 4.3.0 ===== :release-date: 2019-01-14 7:00 P.M UTC+2:00 :release-by: Omer Katz - Added Python 3.7 support. Contributed by **Omer Katz**, **Mads Jensen** and **Asif Saif Uddin** - Avoid caching queues which are declared with a TTL. Queues that are declared with a TTL are now also be excluded from the in-memory cache in case they expire between publishes on the same channel. Contributed by **Matt Yule-Bennett** - Added an index to the Message table for the SQLAlchemy transport. The index allows to effectively sorting the table by the message's timestamp. .. note:: We do not provide migrations for this model yet. You will need to add the index manually if you are already using the SQLAlchemy transport. The syntax may vary between databases. Please refer to your database's documentation for instructions. Contributed by **Mikhail Shcherbinin** - Added a timeout that limits the amount of time we retry to reconnect to a transport. Contributed by **:github_user:`tothegump`** - :class:``celery.asynchronous.hub.Hub`` is now reentrant. This allows calling :func:`celery.bin.celery.main` to revive a worker in the same process after rescuing from shutdown (:class:``SystemExit``). Contributed by **Alan Justino da Silva** - Queues now accept string exchange names as arguments as documented. Tests were added to avoid further regressions. Contributed by **Antonio Gutierrez** - Specifying names for broadcast queues now work as expected. Previously, named broadcast queues did not create multiple queues per worker. They incorrectly declared the named queue which resulted in one queue per fanout exchange, thus missing the entire point of a fanout exchange. The behavior is now matched to unnamed broadcast queues. Contributed by **Kuan Hsuan-Tso** - When initializing the Redis transport in conjunction with gevent restore all unacknowledged messages to queue. Contributed by **Gal Cohen** - Allow :class:``kombu.simple.SimpleQueue`` to pass queue_arguments to Queue object. This allows :class:``kombu.simple.SimpleQueue`` to connect to RabbitMQ queues with custom arguments like 'x-queue-mode'='lazy'. Contributed by **C Blue Neeh** - Add support for 'rediss' scheme for secure Redis connections. The rediss scheme defaults to the least secure form, as there is no suitable default location for `ca_certs`. The recommendation would still be to follow the documentation and specify `broker_use_ssl` if coming from celery. Contributed by **Daniel Blair** - Added the Azure Storage Queues transport. The transport is implemented on top of Azure Storage Queues. This offers a simple but scalable and low-cost PaaS transport for Celery users in Azure. The transport is intended to be used in conjunction with the Azure Block Blob Storage backend. Contributed by **Clemens Wolff**, **:github_user:`@ankurokok`**, **Denis Kisselev**, **Evandro de Paula**, **Martin Peck** and **:github_user:`@michaelperel`** - Added the Azure Service Bus transport. The transport is implemented on top of Azure Service Bus and offers PaaS support for more demanding Celery workloads in Azure. The transport is intended to be used in conjunction with the Azure CosmosDB backend. Contributed by **Clemens Wolff**, **:github_user:`@ankurokok`**, **Denis Kisselev**, **Evandro de Paula**, **Martin Peck** and **:github_user:`@michaelperel`** - Drop remaining mentions of Jython support completely. Contributed by **Asif Saif Uddin** and **Mads Jensen** - When publishing messages to the Pidbox, retry if an error occurs. Contributed by **Asif Saif Uddin** - Fix infinite loop in :method:``kombu.asynchronous.hub.Hub.create_loop``. Previous attempt to fix the problem (PR kombu/760) did not consider an edge case. It is now fixed. Contributed by **Vsevolod Strukchinsky** - Worker shutdown no longer duplicates messages when using the SQS broker. Contributed by **Mintu Kumar Sah** - When using the SQS broker, prefer boto's default region before our hardcoded default. Contributed by **Victor Villas** - Fixed closing of shared redis sockets which previously caused Celery to hang. Contributed by **Alexey Popravka** - the `Pyro`_ transport (:mod:`kombu.transport.pyro`) now works with recent Pyro versions. Also added a Pyro Kombu Broker that this transport needs for its queues. Contributed by **Irmen de Jong** - Handle non-base64-encoded SQS messages. Fix contributed by **Tim Li**, **Asif Saif Uddin** and **Omer Katz**. - Move the handling of Sentinel failures to the redis library itself. Previously, Redis Sentinel worked only if the first node's sentinel service in the URI was up. A server outage would have caused downtime. Contributed by **Brian Price** - When using Celery and the pickle serializer with binary data as part of the payload, `UnicodeDecodeError` would be raised as the content was not utf-8. We now replace on errors. Contributed by **Jian Dai** - Allow setting :method:``boto3.sqs.create_queue`` Attributes via transport_options. Contributed by **Hunter Fernandes** - Fixed infinite loop when entity.channel is replaced by revive() on connection drop. Contributed by **Tzach Yarimi** - Added optional support for Brotli compression. Contributed by **Omer Katz** - When using the SQS broker, FIFO queues with names that ended with the 'f' letter were incorrectly parsed. This is now fixed. Contributed by **Alex Vishnya** and **Ilya Konstantinov** - Added optional support for LZMA compression. Contributed by **Omer Katz** - Added optional support for ZStandard compression. Contributed by **Omer Katz** - Require py-amqp 2.4.0 as the minimum version. Contributed by **Asif Saif Uddin** - The value of DISABLE_TRACEBACKS environment variable is now respected on debug, info and warning logger level. Contributed by **Ludovic Rivallain** - As documented in kombu/#741 and eventlet/eventlet#415 there is a mismatch between the monkey-patched eventlet queue and the interface Kombu is expecting. This causes Celery to crash when the `broker_pool_limit` configuration option is set eventlet/eventlet#415 suggests that the mutex can be a noop. This is now the case. Contributed by **Josh Morrow** - Codebase improvements and fixes by: - **Omer Katz** - **Mads Jensen** - **Asif Saif Uddin** - **Lars Rinn** - Documentation improvements by: - **Jon Dufresne** - **Fay Cheng** - **Asif Saif Uddin** - **Kyle Verhoog** - **Noah Hall** - **:github_user:`brabiega`** .. _version-4.2.2-post1: 4.2.2-post1 =========== :release-date: 2019-01-01 04:00 P.M IST :release-by: Omer Katz .. note:: The previous release contained code from master. It is now deleted from PyPi. Please use this release instead. - No changes since previous release. .. _version-4.2.2: 4.2.2 ===== :release-date: 2018-12-06 04:30 P.M IST :release-by: Omer Katz - Support both Redis client version 2.x and version 3.x. Contributed by **Ash Berlin-Taylor** and **Jeppe Fihl-Pearson** .. _version-4.2.1: 4.2.1 ===== :release-date: 2018-05-21 09:00 A.M IST :release-by: Omer Katz .. note:: The 4.2.0 release contained remains of the ``async`` module by accident. This is now fixed. - Handle librabbitmq fileno raising a ValueError when socket is not connected. Contributed by **Bryan Shelton** .. _version-4.2.0: 4.2.0 ===== :release-date: 2018-05-21 09:00 A.M IST :release-by: Omer Katz - Now passing ``max_retries``, ``interval_start``, ``interval_step``, ``interval_max`` parameters from broker ``transport_options`` to :meth:`~kombu.Connection.ensure_connection` when returning :meth:`~kombu.Connection.default_connection` (Issue #765). Contributed by **Anthony Lukach**. - Qpid: messages are now durable by default Contributed by **David Davis** - Kombu now requires version 2.10.4 or greater of the redis library, in line with Celery Contributed by **Colin Jeanne** - Fixed ImportError in some environments with outdated simplejson Contributed by **Aaron Morris** - MongoDB: fixed failure on MongoDB versions with an "-rc" tag Contributed by **dust8** - Ensure periodic polling frequency does not exceed timeout in virtual transport Contributed by **Arcadiy Ivanov** - Fixed string handling when using python-future module Contributed by **John Koehl** - Replaced "async" with "asynchronous" in preparation for Python 3.7 Contributed by **Thomas Achtemichuk** - Allow removing pool size limit when in use Contributed by **Alex Hill** - Codebase improvements and fixes by: - **j2gg0s** - **Jon Dufresne** - **Jonas Lergell** - **Mads Jensen** - **Nicolas Delaby** - **Omer Katz** - Documentation improvements by: - **Felix Yan** - **Harry Moreno** - **Mads Jensen** - **Omer Katz** - **Radha Krishna. S.** - **Wojciech Matyśkiewicz** .. _version-4.1.0: 4.1.0 ===== :release-date: 2017-07-17 04:45 P.M MST :release-by: Anthony Lukach - SQS: Added support for long-polling on all supported queries. Fixed bug causing error on parsing responses with no retrieved messages from SQS. Contributed by **Anthony Lukach**. - Async hub: Fixed potential infinite loop while performing todo tasks (Issue celery/celery#3712). - Qpid: Fixed bug where messages could have duplicate ``delivery_tag`` (Issue #563). Contributed by **bmbouter**. - MongoDB: Fixed problem with using ``readPreference`` option at pymongo 3.x. Contributed by **Mikhail Elovskikh**. - Re-added support for :pypi:``SQLAlchemy`` Contributed by **Amin Ghadersohi**. - SQS: Fixed bug where hostname would default to ``localhost`` if not specified in settings. Contributed by **Anthony Lukach**. - Redis: Added support for reading password from transport URL (Issue #677). Contributed by **George Psarakis**. - RabbitMQ: Ensured safer encoding of queue arguments. Contributed by **Robert Kopaczewski**. - Added fallback to :func:``uuid.uuid5`` in :func:``generate_oid`` if :func:``uuid.uuid3`` fails. Contributed by **Bill Nottingham**. - Fixed race condition and innacurrate timeouts for :class:``kombu.simple.SimpleBase`` (Issue #720). Contributed by **c-nichols**. - Zookeeper: Fixed last chroot character trimming Contributed by **Dima Kurguzov**. - RabbitMQ: Fixed bug causing an exception when attempting to close an already-closed connection (Issue #690). Contributed by **eavictor**. - Removed deprecated use of StopIteration in generators and invalid regex escape sequence. Contributed by **Jon Dufresne**. - Added Python 3.6 to CI testing. Contributed by **Jon Dufresne**. - SQS: Allowed endpoint URL to be specified in the boto3 connection. Contributed by **georgepsarakis**. - SQS: Added support for Python 3.4. Contributed by **Anthony Lukach**. - SQS: ``kombu[sqs]`` now depends on :pypi:`boto3` (no longer using :pypi:`boto)`. - Adds support for Python 3.4+ - Adds support for FIFO queues (Issue #678) and (Issue celery/celery#3690) - Avoids issues around a broken endpoints file (Issue celery/celery#3672) Contributed by **Mischa Spiegelmock** and **Jerry Seutter**. - Zookeeper: Added support for delaying task with Python 3. Contributed by **Dima Kurguzov**. - SQS: Fixed bug where :meth:`kombu.transport.SQS.drain_events` did not support callback argument (Issue #694). Contributed by **Michael Montgomery**. - Fixed bug around modifying dictionary size while iterating over it (Issue #675). Contributed by **Felix Yan**. - etcd: Added handling for :exc:`EtcdException` exception rather than :exc:`EtcdError`. Contributed by **Stephen Milner**. - Documentation improvements by: - **Mads Jensen** - **Matias Insaurralde** - **Omer Katz** - **Dmitry Dygalo** - **Christopher Hoskin** .. _version-4.0.2: 4.0.2 ===== :release-date: 2016-12-15 03:31 P.M PST :release-by: Ask Solem - Now depends on :mod:`amqp` 2.1.4 This new version takes advantage of TCP Keepalive settings on Linux, making it better at detecting closed connections, also in failover conditions. - Redis: Priority was reversed so, e.g. priority 0 became priority 9. .. _version-4.0.1: 4.0.1 ===== :release-date: 2016-12-07 06:00 P.M PST :release-by: Ask Solem - Now depends on :mod:`amqp` 2.1.3 This new version takes advantage of the new ``TCP_USER_TIMEOUT`` socket option on Linux. - Producer: Fixed performance degradation when default exchange specified (Issue #651). - QPid: Switch to using getattr in qpid.Transport.__del__ (Issue #658) Contributed by **Patrick Creech**. - QPid: Now uses monotonic time for timeouts. - MongoDB: Fixed compatibility with Python 3 (Issue #661). - Consumer: ``__exit__`` now skips cancelling consumer if connection-related error raised (Issue #670). - MongoDB: Removes use of natural sort (Issue #638). Contributed by **Anton Chaporgin**. - Fixed wrong keyword argument ``channel`` error (Issue #652). Contributed by **Toomore Chiang**. - Safe argument to ``urllib.quote`` must be bytes on Python 2.x (Issue #645). - Documentation improvements by: - **Carlos Edo** - **Cemre Mengu** .. _version-4.0: 4.0 === :release-date: 2016-10-28 16:45 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 2.0. The new py-amqp version have been refactored for better performance, using modern Python socket conventions, and API consistency. - No longer depends on :mod:`anyjson`. Kombu will now only choose between :pypi:`simplejson` and the built-in :mod:`json`. Using the latest version of simplejson is recommended: .. code-block:: console $ pip install -U simplejson - Removed transports that are no longer supported in this version: - Django ORM transport - SQLAlchemy ORM transport - Beanstalk transport - ZeroMQ transport - amqplib transport (use pyamqp). - API Changes * Signature of :class:`kombu.Message` now takes body as first argment. It used to be ``Message(channel, body=body, **kw)``, but now it's ``Message(body, channel=channel, **kw)``. This is unlikey to affect you, as the Kombu API does not have users instantiate messages manually. - New SQS transport Donated by NextDoor, with additional contributions from mdk. .. note:: ``kombu[sqs]`` now depends on :pypi:`pycurl`. - New Consul transport. Contributed by **Wido den Hollander**. - New etcd transport. Contributed by **Stephen Milner**. - New Qpid transport. It was introduced as an experimental transport in Kombu 3.0, but is now mature enough to be fully supported. Created and maintained by **Brian Bouterse**. - Redis: Priority 0 is now lowest, 9 is highest. (**backward incompatible**) This to match how priorities in AMQP works. Fix contributed by **Alex Koshelev**. - Redis: Support for Sentinel You can point the connection to a list of sentinel URLs like: .. code-block:: text sentinel://0.0.0.0:26379;sentinel://0.0.0.0:26380/... where each sentinel is separated by a `;`. Multiple sentinels are handled by :class:`kombu.Connection` constructor, and placed in the alternative list of servers to connect to in case of connection failure. Contributed by **Sergey Azovskov**, and **Lorenzo Mancini** - RabbitMQ Queue Extensions New arguments have been added to :class:`kombu.Queue` that lets you directly and conveniently configure the RabbitMQ queue extensions. - ``Queue(expires=20.0)`` Set queue expiry time in float seconds. See :attr:`kombu.Queue.expires`. - ``Queue(message_ttl=30.0)`` Set queue message time-to-live float seconds. See :attr:`kombu.Queue.message_ttl`. - ``Queue(max_length=1000)`` Set queue max length (number of messages) as int. See :attr:`kombu.Queue.max_length`. - ``Queue(max_length_bytes=1000)`` Set queue max length (message size total in bytes) as int. See :attr:`kombu.Queue.max_length_bytes`. - ``Queue(max_priority=10)`` Declare queue to be a priority queue that routes messages based on the ``priority`` field of the message. See :attr:`kombu.Queue.max_priority`. - RabbitMQ: ``Message.ack`` now supports the ``multiple`` argument. If multiple is set to True, then all messages received before the message being acked will also be acknowledged. - ``amqps://`` can now be specified to require SSL (Issue #610). - ``Consumer.cancel_by_queue`` is now constant time. - ``Connection.ensure*`` now raises :exc:`kombu.exceptions.OperationalError`. Things that can be retried are now reraised as :exc:`kombu.exceptions.OperationalError`. - Redis: Fixed SSL support. Contributed by **Robert Kolba**. - New ``Queue.consumer_arguments`` can be used for the ability to set consumer priority via ``x-priority``. See https://www.rabbitmq.com/consumer-priority.html Example: .. code-block:: python Queue( 'qname', exchange=Exchange('exchange'), routing_key='qname', consumer_arguments={'x-priority': 3}, ) - Queue/Exchange: ``no_declare`` option added (also enabled for internal amq. exchanges) (Issue #565). - JSON serializer now calls ``obj.__json__`` for unsupported types. This means you can now define a ``__json__`` method for custom types that can be reduced down to a built-in json type. Example: .. code-block:: python class Person: first_name = None last_name = None address = None def __json__(self): return { 'first_name': self.first_name, 'last_name': self.last_name, 'address': self.address, } - JSON serializer now handles datetimes, Django promise, UUID and Decimal. - Beanstalk: Priority 0 is now lowest, 9 is highest. (**backward incompatible**) This to match how priorities in AMQP works. Fix contributed by **Alex Koshelev**. - Redis: now supports SSL using the ``ssl`` argument to :class:`~kombu.Connection`. - Redis: Fanout exchanges are no longer visible between vhosts, and fanout messages can be filtered by patterns. (**backward incompatible**) It was possible to enable this mode previously using the ``fanout_prefix``, and ``fanout_patterns`` transport options, but now these are enabled by default. If you want to mix and match producers/consumers running different versions you need to configure your kombu 3.x clients to also enable these options: .. code-block:: pycon >>> Connection(transport_options={ 'fanout_prefix': True, 'fanout_patterns': True, }) - Pidbox: Mailbox new arguments: TTL and expiry. Mailbox now supports new arguments for controlling message TTLs and queue expiry, both for the mailbox queue and for reply queues. - ``queue_expires`` (float/int seconds). - ``queue_ttl`` (float/int seconds). - ``reply_queue_expires`` (float/int seconds). - ``reply_queue_ttl`` (float/int seconds). All take seconds in int/float. Contributed by **Alan Justino**. - Exchange.delivery_mode now defaults to :const:`None`, and the default is instead set by ``Producer.publish``. - :class:`~kombu.Consumer` now supports a new ``prefetch_count`` argument, which if provided will force the consumer to set an initial prefetch count just before starting. - Virtual transports now stores ``priority`` as a property, not in ``delivery_info``, to be compatible with AMQP. - ``reply_to`` argument to ``Producer.publish`` can now be :class:`~kombu.Queue` instance. - Connection: There's now a new method ``Connection.supports_exchange_type(type)`` that can be used to check if the current transport supports a specific exchange type. - SQS: Consumers can now read json messages not sent by Kombu. Contributed by **Juan Carlos Ferrer**. - SQS: Will now log the access key used when authentication fails. Contributed by **Hank John**. - Added new :class:`kombu.mixins.ConsumerProducerMixin` for consumers that will also publish messages on a separate connection. - Messages: Now have a more descriptive ``repr``. Contributed by **Joshua Harlow**. - Async: HTTP client based on curl. - Async: Now uses `poll` instead of `select` where available. - MongoDB: Now supports priorities Contributed by **Alex Koshelev**. - Virtual transports now supports multiple queue bindings. Contributed by **Federico Ficarelli**. - Virtual transports now supports the anon exchange. If when publishing a message, the exchange argument is set to '' (empty string), the routing_key will be regarded as the destination queue. This will bypass the routing table compeltely, and just deliver the message to the queue name specified in the routing key. - Zookeeper: Transport now uses the built-in suport in kazoo to handle failover when using a list of server names. Contributed by **Joshua Harlow**. - ConsumerMixin.run now passes keyword arguments to .consume. Deprecations and removals ------------------------- - The deprecated method ``Consumer.add_queue_from_dict`` has been removed. Use instead: .. code-block:: python consumer.add_queue(Queue.from_dict(queue_name, **options)) - The deprecated function ``kombu.serialization.encode`` has been removed. Use :func:`kombu.serialization.dumps` instead. - The deprecated function ``kombu.serialization.decode`` has been removed. Use :func:`kombu.serialization.loads` instead. - Removed module ``kombu.syn`` ``detect_environment`` has been moved to kombu.utils.compat .. _version-3.0.37: 3.0.37 ====== :release-date: 2016-10-06 05:00 P.M PDT :release-by: Ask Solem - Connection: Return value of ``.info()`` was no longer JSON serializable, leading to "itertools.cycle object not JSON serializable" errors (Issue #635). .. _version-3.0.36: 3.0.36 ====== :release-date: 2016-09-30 03:06 P.M PDT :release-by: Ask Solem - Connection: Fixed bug when cloning connection with alternate urls. Fix contributed by Emmanuel Cazenave. - Redis: Fixed problem with unix socket connections. https://github.com/celery/celery/issues/2903 Fix contributed by Raphael Michel. - Redis: Fixed compatibility with older redis-py versions (Issue #576). - Broadcast now retains queue name when being copied/pickled (Issue #578). .. _version-3.0.35: 3.0.35 ====== :release-date: 2016-03-22 11:22 P.M PST :release-by: Ask Solem - msgpack: msgpack support now requires msgpack-python > 0.4.7. - Redis: TimeoutError was no longer handled as a recoverable error. - Redis: Adds the ability to set more Redis connection options using ``Connection(transport_options={...})``. - ``socket_connect_timeout`` - ``socket_keepalive`` (requires :mod:`redis-py` > 2.10) - ``socket_keepalive_options`` (requires :mod:`redis-py` > 2.10) - msgpack: Fixes support for binary/unicode data .. _version-3.0.34: 3.0.34 ====== :release-date: 2016-03-03 05:30 P.M PST :release-by: Ask Solem - Qpid: Adds async error handling. Contributed by Brian Bouterse. - Qpid: Delivery tag is now a UUID4 (Issue #563). Fix contributed by Brian Bouterse. - Redis: Connection.as_uri() returned malformed URLs when the ``redis+socket`` scheme was ised (Issue celery/celery#2995). - msgpack: Use binary encoding instead of utf-8 (Issue #570). .. _version-3.0.33: 3.0.33 ====== :release-date: 2016-01-08 06:36 P.M PST :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.9. - Redis: Fixed problem with auxilliary connections causing the main consumer connection to be closed (Issue #550). - Qpid: No longer uses threads to operate, to ensure compatibility with all environments (Issue #531). .. _version-3.0.32: 3.0.32 ====== :release-date: 2015-12-16 02:29 P.M PST :release-by: Ask Solem - Redis: Fixed bug introduced in 3.0.31 where the redis transport always connects to localhost, regardless of host setting. .. _version-3.0.31: 3.0.31 ====== :release-date: 2015-12-16 12:00 P.M PST :release-by: Ask Solem - Redis: Fixed bug introduced in 3.0.30 where socket was prematurely disconnected. - Hub: Removed debug logging message: "Deregistered fd..." (Issue #549). .. _version-3.0.30: 3.0.30 ====== :release-date: 2015-12-07 12:28 A.M PST :release-by: Ask Solem - Fixes compatiblity with uuid in Python 2.7.11 and 3.5.1. Fix contributed by Kai Groner. - Redis transport: Attempt at fixing problem with hanging consumer after disconnected from server. - Event loop: Attempt at fixing issue with 100% CPU when using the Redis transport, - Database transport: Fixed oracle compatiblity. An "ORA-00907: missing right parenthesis" error could manifest when using an Oracle database with the database transport. Fix contributed by Deepak N. - Documentation fixes Contributed by Tommaso Barbugli. .. _version-3.0.29: 3.0.29 ====== :release-date: 2015-10-26 11:10 A.M PDT :release-by: Ask Solem - Fixed serialization issue for ``bindings.as_dict()`` (Issue #453). Fix contributed by Sergey Tikhonov. - Json serializer wrongly treated bytes as ``ascii``, not ``utf-8`` (Issue #532). - MongoDB: Now supports pymongo 3.x. Contributed by Len Buckens. - SQS: Tests passing on Python 3. Fix contributed by Felix Yan .. _version-3.0.28: 3.0.28 ====== :release-date: 2015-10-12 12:00 PM PDT :release-by: Ask Solem .. admonition:: Django transport migrations. If you're using Django 1.8 and have already created the kombu_transport_django tables, you have to run a fake initial migration: .. code-block:: console $ python manage.py migrate kombu_transport_django --fake-initial - No longer compatible with South by default. To keep using kombu.transport.django with South migrations you now need to configure a new location for the kombu migrations: .. code-block:: python SOUTH_MIGRATION_MODULES = { 'kombu_transport_django': 'kombu.transport.django.south_migrations', } - Keep old South migrations in ``kombu.transport.django.south_migrations``. - Now works with Redis < 2.10 again. .. _version-3.0.27: 3.0.27 ====== :release-date: 2015-10-09 3:10 PM PDT :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.7. - Fixed libSystem import error on some macOS 10.11 (El Capitan) installations. Fix contributed by Eric Wang. - Now compatible with Django 1.9. - Django: Adds migrations for the database transport. - Redis: Now depends on py-redis 2.10.0 or later (Issue #468). - QPid: Can now connect as localhost (Issue #519). Fix contributed by Brian Bouterse. - QPid: Adds support for ``login_method`` (Issue #502, Issue #499). Contributed by Brian Bouterse. - QPid: Now reads SASL mechanism from broker string (Issue #498). Fix contributed by Brian Bouterse. - QPid: Monitor thread now properly terminated on session close (Issue #485). Fix contributed by Brian Bouterse. - QPid: Fixed file descriptor leak (Issue #476). Fix contributed by Jeff Ortel - Docs: Fixed wrong order for entrypoint arguments (Issue #473). - ConsumerMixin: Connection error logs now include traceback (Issue #480). - BaseTransport now raises RecoverableConnectionError when disconnected (Issue #507). - Consumer: Adds ``tag_prefix`` option to modify how consumer tags are generated (Issue #509). .. _version-3.0.26: 3.0.26 ====== :release-date: 2015-04-22 06:00 P.M UTC :release-by: Ask Solem - Fixed compatibility with py-redis versions before 2.10.3 (Issue #470). .. _version-3.0.25: 3.0.25 ====== :release-date: 2015-04-21 02:00 P.M UTC :release-by: Ask Solem - pyamqp/librabbitmq now uses 5671 as default port when SSL is enabled (Issue #459). - Redis: Now supports passwords in ``redis+socket://:pass@host:port`` URLs (Issue #460). - ``Producer.publish`` now defines the ``expiration`` property in support of the `RabbitMQ per-message TTL extension`_. Contributed by Anastasis Andronidis. - Connection transport attribute now set correctly for all transports. Contributed by Alex Koshelev. - qpid: Fixed bug where the connectionw as not being closed properly. Contributed by Brian Bouterse. - :class:`~kombu.entity.bindings` is now JSON serializable (Issue #453). Contributed by Sergey Tikhonov. - Fixed typo in error when yaml is not installed (said ``msgpack``). Contributed by Joshua Harlow. - Redis: Now properly handles :exc:`redis.exceptions.TimeoutError` raised by :mod:`redis`. Contributed by markow. - qpid: Adds additional string to check for when connecting to qpid. When we connect to qpid, we need to ensure that we skip to the next SASL mechanism if the current mechanism fails. Otherwise, we will keep retrying the connection with a non-working mech. Contributed by Chris Duryee. - qpid: Handle ``NotFound`` exceptions. Contributed by Brian Bouterse. - :class:`Queue.__repr__` now makes sure return value is not unicode (Issue #440). - qpid: ``Queue.purge`` incorrectly raised :exc:`AttributeErrror` if the does not exist (Issue #439). Contributed by Brian Bouterse. - Linux: Now ignores permission errors on epoll unregister. .. _`RabbitMQ per-message TTL extension`: https://www.rabbitmq.com/ttl.html .. _version-3.0.24: 3.0.24 ====== :release-date: 2014-11-17 11:00 P.M UTC :release-by: Ask Solem - The `Qpid `_ broker is supported for Python 2.x environments. The Qpid transport includes full SSL support within Kombu. See the :mod:`kombu.transport.qpid` docs for more info. Contributed by Brian Bouterse and Chris Duryee through support from Red Hat. - Dependencies: extra[librabbitmq] now requires librabbitmq 1.6.0 - Docstrings for :class:`~kombu.utils.limit.TokenBucket` did not match implementation. Fix contributed by Jesse Dhillon. - :func:`~kombu.common.oid_from` accidentally called ``uuid.getnode()`` but did not use the return value. Fix contributed by Alexander Todorov. - Redis: Now ignores errors when cosing the underlying connection. - Redis: Restoring messages will now use a single connection. - ``kombu.five.monotonic``: Can now be imported even if ctypes is not available for some reason (e.g. App Engine) - Documentation: Improved example to use the ``declare`` argument to ``Producer`` (Issue #423). - Django: Fixed ``app_label`` for older Django versions (``< 1.7``). (Issue #414). .. _version-3.0.23: 3.0.23 ====== :release-date: 2014-09-14 10:45 P.M UTC :release-by: Ask Solem - Django: Fixed bug in the Django 1.7 compatibility improvements related to autocommit handling. Contributed by Radek Czajka. - Django: The Django transport models would not be created on syncdb after app label rename (Issue #406). .. _version-3.0.22: 3.0.22 ====== :release-date: 2014-09-04 03:00 P.M UTC :release-by: Ask Solem - kombu.async: Min. delay between waiting for timer was always increased to one second. - Fixed bug in itermessages where message is received after the with statement exits the block. Fixed by Rumyana Neykova - Connection.autoretry: Now works with functions missing wrapped attributes (``__module__``, ``__name__``, ``__doc__``). Fixes #392. Contributed by johtso. - Django: Now sets custom app label for ``kombu.transport.django`` to work with recent changes in Django 1.7. - SimpleQueue removed messages from the wrong end of buffer (Issue #380). - Tests: Now using ``unittest.mock`` if available (Issue #381). .. _version-3.0.21: 3.0.21 ====== :release-date: 2014-07-07 02:00 P.M UTC :release-by: Ask Solem - Fixed remaining bug in ``maybe_declare`` for ``auto_delete`` exchanges. Fix contributed by Roger Hu. - MongoDB: Creating a channel now properly evaluates a connection (Issue #363). Fix contributed by Len Buckens. .. _version-3.0.20: 3.0.20 ====== :release-date: 2014-06-24 02:30 P.M UTC :release-by: Ask Solem - Reverts change in 3.0.17 where ``maybe_declare`` caches the declaration of auto_delete queues and exchanges. Fix contributed by Roger Hu. - Redis: Fixed race condition when using gevent and the channel is closed. Fix contributed by Andrew Rodionoff. .. _version-3.0.19: 3.0.19 ====== :release-date: 2014-06-09 03:10 P.M UTC :release-by: Ask Solem - The wheel distribution did not support Python 2.6 by failing to list the extra dependencies required. - Durable and auto_delete queues/exchanges can be be cached using ``maybe_declare``. .. _version-3.0.18: 3.0.18 ====== :release-date: 2014-06-02 06:00 P.M UTC :release-by: Ask Solem - A typo introduced in 3.0.17 caused kombu.async.hub to crash (Issue #360). .. _version-3.0.17: 3.0.17 ====== :release-date: 2014-06-02 05:00 P.M UTC :release-by: Ask Solem - ``kombu[librabbitmq]`` now depends on librabbitmq 1.5.2. - Async: Event loop now selectively removes file descriptors for the mode it failed in, and keeps others (e.g read vs write). Fix contributed by Roger Hu. - CouchDB: Now works without userid set. Fix contributed by Latitia M. Haskins. - SQLAlchemy: Now supports recovery from connection errors. Contributed by Felix Schwarz. - Redis: Restore at shutdown now works when ack emulation is disabled. - :func:`kombu.common.eventloop` accidentally swallowed socket errors. - Adds :func:`kombu.utils.url.sanitize_url` .. _version-3.0.16: 3.0.16 ====== :release-date: 2014-05-06 01:00 P.M UTC :release-by: Ask Solem - ``kombu[librabbitmq]`` now depends on librabbitmq 1.5.1. - Redis: Fixes ``TypeError`` problem in ``unregister`` (Issue #342). Fix contributed by Tobias Schottdorf. - Tests: Some unit tests accidentally required the `redis-py` library. Fix contributed by Randy Barlow. - librabbitmq: Would crash when using an older version of :mod:`librabbitmq`, now emits warning instead. .. _version-3.0.15: 3.0.15 ====== :release-date: 2014-04-15 09:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.5. - RabbitMQ 3.3 changes QoS semantics (Issue #339). See the RabbitMQ release notes here: http://www.rabbitmq.com/blog/2014/04/02/breaking-things-with-rabbitmq-3-3/ A new connection property has been added that can be used to detect whether the remote server is using this new QoS behavior: .. code-block:: pycon >>> Connection('amqp://').qos_behavior_matches_spec False so if your application depends on the old semantics you can use this to set the ``apply_global`` flag appropriately: .. code-block:: python def update_prefetch_count(channel, new_value): channel.basic_qos( 0, new_value, not channel.connection.client.qos_behavior_matches_spec, ) - Users of :mod:`librabbitmq` is encouraged to upgrade to librabbitmq 1.5.0. The ``kombu[librabbitmq]`` extra has been updated to depend on this version. - Pools: Now takes transport options into account when comparing connections (Issue #333). - MongoDB: Fixes Python 3 compatibility. - Async: select: Ignore socket errors when attempting to unregister handles from the loop. - Pidbox: Can now be configured to use a serializer other than json, but specifying a serializer argument to :class:`~kombu.pidbox.Mailbox`. Contributed by Dmitry Malinovsky. - Message decompression now works with Python 3. Fix contributed by Adam Gaca. .. _version-3.0.14: 3.0.14 ====== :release-date: 2014-03-19 07:00 P.M UTC :release-by: Ask Solem - **MongoDB**: Now endures a connection failover (Issue #123). Fix contributed by Alex Koshelev. - **MongoDB**: Fixed ``KeyError`` when a replica set member is removed. Also fixes celery#971 and celery/#898. Fix contributed by Alex Koshelev. - **MongoDB**: Fixed MongoDB broadcast cursor re-initialization bug. Fix contributed by Alex Koshelev. - **Async**: Fixed bug in lax semaphore implementation where in some usage patterns the limit was not honored correctly. Fix contributed by Ionel Cristian Mărieș. - **Redis**: Fixed problem with fanout when using Python 3 (Issue #324). - **Redis**: Fixed ``AttributeError`` from attempting to close a non-existing connection (Issue #320). .. _version-3.0.13: 3.0.13 ====== :release-date: 2014-03-03 04:00 P.M UTC :release-by: Ask Solem - Redis: Fixed serious race condition that could lead to data loss. The delivery tags were accidentally set to be an incremental number local to the channel, but the delivery tags need to be globally unique so that a message can not overwrite an older message in the backup store. This change is not backwards incompatible and you are encouraged to update all your system using a previous version as soon as possible. - Now depends on :mod:`amqp` 1.4.4. - Pidbox: Now makes sure message encoding errors are handled by default, so that a custom error handler does not need to be specified. - Redis: The fanout exchange can now use AMQP patterns to route and filter messages. This change is backwards incompatible and must be enabled with the ``fanout_patterns`` transport option: .. code-block:: pycon >>> conn = kombu.Connection('redis://', transport_options={ ... 'fanout_patterns': True, ... }) When enabled the exchange will work like an amqp topic exchange if the binding key is a pattern. This is planned to be default behavior in the future. - Redis: Fixed ``cycle`` no such attribute error. .. _version-3.0.12: 3.0.12 ====== :release-date: 2014-02-09 03:50 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.3. - Fixes Python 3.4 logging incompatibility (Issue #311). - Redis: Now properly handles unknown pub/sub messages. Fix contributed by Sam Stavinoha. - amqplib: Fixed bug where more bytes were requested from the socket than necessary. Fix contributed by Ionel Cristian Mărieș. .. _version-3.0.11: 3.0.11 ====== :release-date: 2014-02-03 05:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.2. - Now always trusts messages of type `application/data` and `application/text` or which have an unspecified content type (Issue #306). - Compression errors are now handled as decode errors and will trigger the ``Consumer.on_decode_error`` callback if specified. - New ``kombu.Connection.get_heartbeat_interval()`` method that can be used to access the negotiated heartbeat value. - `kombu.common.oid_for` no longer uses the MAC address of the host, but instead uses a process-wide UUID4 as a node id. This avoids a call to `uuid.getnode()` at module scope. - Hub.add: Now normalizes registered fileno. Contributed by Ionel Cristian Mărieș. - SQS: Fixed bug where the prefetch count limit was not respected. .. _version-3.0.10: 3.0.10 ====== :release-date: 2014-01-17 05:40 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.1. - ``maybe_declare`` now raises a "recoverable connection error" if the channel is disconnected instead of a :exc:`ChannelError` so that the operation can be retried. - Redis: ``Consumer.cancel()`` is now thread safe. This fixes an issue when using gevent/eventlet and a message is handled after the consumer is canceled resulting in a "message for queue without consumers" error. - Retry operations would not always respect the interval_start value when calculating the time to sleep for (Issue #303). Fix contributed by Antoine Legrand. - Timer: Fixed "unhashable type" error on Python 3. - Hub: Do not attempt to unregister operations on an already closed poller instance. .. _version-3.0.9: 3.0.9 ===== :release-date: 2014-01-13 05:30 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.0. - Redis: Basic cancel for fanout based queues now sends a corresponding ``UNSUBSCRIBE`` command to the server. This fixes an issue with pidbox where reply messages could be received after the consumer was canceled, giving the ``"message to queue without consumers"`` error. - MongoDB: Improved connection string and options handling (Issue #266 + Issue #120). Contributed by Alex Koshelev. - SQS: Limit the number of messages when receiving in batch to 10. This is a hard limit enforced by Amazon so the sqs transport must not exceeed this value. Fix contributed by Eric Reynolds. - ConsumerMixin: ``consume`` now checks heartbeat every time the socket times out. Contributed by Dustin J. Mitchell. - Retry Policy: A max retries of 0 did not retry forever. Fix contributed by Antoine Legrand. - Simple: If passing a Queue object the simple utils will now take default routing key from that queue. Contributed by Fernando Jorge Mota. - ``repr(producer)`` no longer evaluates the underlying channnel. - Redis: The map of Redis error classes are now exposed at the module level using the :func:`kombu.transport.redis.get_redis_error_classes` function. - Async: ``Hub.close`` now sets ``.poller`` to None. .. _version-3.0.8: 3.0.8 ===== :release-date: 2013-12-16 05:00 P.M UTC :release-by: Ask Solem - Serializer: loads and dumps now wraps exceptions raised into :exc:`~kombu.exceptions.DecodeError` and :exc:`kombu.exceptions.EncodeError` respectively. Contributed by Ionel Cristian Maries - Redis: Would attempt to read from the wrong connection if a select/epoll/kqueue exception event happened. Fix contributed by Michael Nelson. - Redis: Disabling ack emulation now works properly. Fix contributed by Michael Nelson. - Redis: :exc:`IOError` and :exc:`OSError` are now treated as recoverable connection errors. - SQS: Improved performance by reading messages in bulk. Contributed by Matt Wise. - Connection Pool: Attempting to acquire from a closed pool will now raise :class:`RuntimeError`. .. _version-3.0.7: 3.0.7 ===== :release-date: 2013-12-02 04:00 P.M UTC :release-by: Ask Solem - Fixes Python 2.6 compatibility. - Redis: Fixes 'bad file descriptor' issue. .. _version-3.0.6: 3.0.6 ===== :release-date: 2013-11-21 04:50 P.M UTC :release-by: Ask Solem - Timer: No longer attempts to hash keyword arguments (Issue #275). - Async: Did not account for the long type for file descriptors. Fix contributed by Fabrice Rabaute. - PyPy: kqueue support was broken. - Redis: Bad pub/sub payloads no longer crashes the consumer. - Redis: Unix socket URLs can now specify a virtual host by including it as a query parameter. Example URL specifying a virtual host using database number 3: .. code-block:: text redis+socket:///tmp/redis.sock?virtual_host=3 - ``kombu.VERSION`` is now a named tuple. .. _version-3.0.5: 3.0.5 ===== :release-date: 2013-11-15 11:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.3.3. - Redis: Fixed Python 3 compatibility problem (Issue #270). - MongoDB: Fixed problem with URL parsing when authentication used. Fix contributed by dongweiming. - pyamqp: Fixed small issue when publishing the message and the property dictionary was set to None. Fix contributed by Victor Garcia. - Fixed problem in ``repr(LaxBoundedSemaphore)``. Fix contributed by Antoine Legrand. - Tests now passing on Python 3.3. .. _version-3.0.4: 3.0.4 ===== :release-date: 2013-11-08 01:00 P.M UTC :release-by: Ask Solem - common.QoS: ``decrement_eventually`` now makes sure the value does not go below 1 if a prefetch count is enabled. .. _version-3.0.3: 3.0.3 ===== :release-date: 2013-11-04 03:00 P.M UTC :release-by: Ask Solem - SQS: Properly reverted patch that caused delays between messages. Contributed by James Saryerwinnie - select: Clear all registerd fds on poller.cloe - Eventloop: unregister if EBADF raised. .. _version-3.0.2: 3.0.2 ===== :release-date: 2013-10-29 02:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` version 1.3.2. - select: Fixed problem where unregister did not properly remove the fd. .. _version-3.0.1: 3.0.1 ===== :release-date: 2013-10-24 04:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` version 1.3.1. - Redis: New option ``fanout_keyprefix`` This transport option is recommended for all users as it ensures that broadcast (fanout) messages sent is only seen by the current virtual host: .. code-block:: python Connection('redis://', transport_options={'fanout_keyprefix': True}) However, enabling this means that you cannot send or receive messages from older Kombu versions so make sure all of your participants are upgraded and have the transport option enabled. This will be the default behavior in Kombu 4.0. - Distribution: Removed file ``requirements/py25.txt``. - MongoDB: Now disables ``auto_start_request``. - MongoDB: Enables ``use_greenlets`` if eventlet/gevent used. - Pidbox: Fixes problem where expires header was None, which is a value not supported by the amq protocol. - ConsumerMixin: New ``consumer_context`` method for starting the consumer without draining events. .. _version-3.0.0: 3.0.0 ===== :release-date: 2013-10-14 04:00 P.M BST :release-by: Ask Solem - Now depends on :mod:`amqp` version 1.3. - No longer supports Python 2.5 The minimum Python version supported is now Python 2.6.0 for Python 2, and Python 3.3 for Python 3. - Dual codebase supporting both Python 2 and 3. No longer using ``2to3``, making it easier to maintain support for both versions. - pickle, yaml and msgpack deserialization is now disabled by default. This means that Kombu will by default refuse to handle any content type other than json. Pickle is known to be a security concern as it will happily load any object that is embedded in a pickle payload, and payloads can be crafted to do almost anything you want. The default serializer in Kombu is json but it also supports a number of other serialization formats that it will evaluate if received: including pickle. It was always assumed that users were educated about the security implications of pickle, but in hindsight we don't think users should be expected to secure their services if we have the ability to be secure by default. By disabling any content type that the user did not explicitly want enabled we ensure that the user must be conscious when they add pickle as a serialization format to support. The other built-in serializers (yaml and msgpack) are also disabled even though they aren't considered insecure [#f1]_ at this point. Instead they're disabled so that if a security flaw is found in one of these libraries in the future, you will only be affected if you have explicitly enabled them. To have your consumer accept formats other than json you have to explicitly add the wanted formats to a white-list of accepted content types: .. code-block:: pycon >>> c = Consumer(conn, accept=['json', 'pickle', 'msgpack']) or when using synchronous access: .. code-block:: pycon >>> msg = queue.get(accept=['json', 'pickle', 'msgpack']) The ``accept`` argument was first supported for consumers in version 2.5.10, and first supported by ``Queue.get`` in version 2.5.15 so to stay compatible with previous versions you can enable the previous behavior: >>> from kombu import enable_insecure_serializers >>> enable_insecure_serializers() But note that this has global effect, so be very careful should you use it. .. rubric:: Footnotes .. [#f1] The PyYAML library has a :func:`yaml.load` function with some of the same security implications as pickle, but Kombu uses the :func:`yaml.safe_load` function which is not known to be affected. - kombu.async: Experimental event loop implementation. This code was previously in Celery but was moved here to make it easier for async transport implementations. The API is meant to match the Tulip API which will be included in Python 3.4 as the ``asyncio`` module. It's not a complete implementation obviously, but the goal is that it will be easy to change to it once that is possible. - Utility function ``kombu.common.ipublish`` has been removed. Use ``Producer(..., retry=True)`` instead. - Utility function ``kombu.common.isend_reply`` has been removed Use ``send_reply(..., retry=True)`` instead. - ``kombu.common.entry_to_queue`` and ``kombu.messaging.entry_to_queue`` has been removed. Use ``Queue.from_dict(name, **options)`` instead. - Redis: Messages are now restored at the end of the list. Contributed by Mark Lavin. - ``StdConnectionError`` and ``StdChannelError`` is removed and :exc:`amqp.ConnectionError` and :exc:`amqp.ChannelError` is used instead. - Message object implementation has moved to :class:`kombu.message.Message`. - Serailization: Renamed functions encode/decode to :func:`~kombu.serialization.dumps` and :func:`~kombu.serialization.loads`. For backward compatibility the old names are still available as aliases. - The ``kombu.log.anon_logger`` function has been removed. Use :func:`~kombu.log.get_logger` instead. - ``queue_declare`` now returns namedtuple with ``queue``, ``message_count``, and ``consumer_count`` fields. - LamportClock: Can now set lock class - :mod:`kombu.utils.clock`: Utilities for ordering events added. - :class:`~kombu.simple.SimpleQueue` now allows you to override the exchange type used. Contributed by Vince Gonzales. - Zookeeper transport updated to support new changes in the :mod:`kazoo` library. Contributed by Mahendra M. - pyamqp/librabbitmq: Transport options are now forwarded as keyword arguments to the underlying connection (Issue #214). - Transports may now distinguish between recoverable and irrecoverable connection and channel errors. - ``kombu.utils.Finalize`` has been removed: Use :mod:`multiprocessing.util.Finalize` instead. - Memory transport now supports the fanout exchange type. Contributed by Davanum Srinivas. - Experimental new `Pyro`_ transport (:mod:`kombu.transport.pyro`). Contributed by Tommie McAfee. .. _`Pyro`: http://pythonhosted.org/Pyro - Experimental new `SoftLayer MQ`_ transport (:mod:`kombu.transport.SLMQ`). Contributed by Kevin McDonald .. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue - Eventio: Kqueue breaks in subtle ways so select is now used instead. - SQLAlchemy transport: Can now specify table names using the ``queue_tablename`` and ``message_tablename`` transport options. Contributed by Ryan Petrello. Redis transport: Now supports using local UNIX sockets to communicate with the Redis server (Issue #1283) To connect using a UNIX socket you have to use the ``redis+socket`` URL-prefix: ``redis+socket:///tmp/redis.sock``. This functionality was merged from the `celery-redis-unixsocket`_ project. Contributed by Maxime Rouyrre. ZeroMQ transport: drain_events now supports timeout. Contributed by Jesper Thomschütz. .. _`celery-redis-unixsocket`: https://github.com/piquadrat/celery-redis-unixsocket .. _version-2.5.16: 2.5.16 ====== :release-date: 2013-10-04 03:30 P.M BST :release-by: Ask Solem - Python 3: Fixed problem with dependencies not being installed. .. _version-2.5.15: 2.5.15 ====== :release-date: 2013-10-04 03:30 P.M BST :release-by: Ask Solem - Declaration cache: Now only keeps hash of declaration so that it does not keep a reference to the channel. - Declaration cache: Now respects ``entity.can_cache_declaration`` attribute. - Fixes Python 2.5 compatibility. - Fixes tests after python-msgpack changes. - ``Queue.get``: Now supports ``accept`` argument. .. _version-2.5.14: 2.5.14 ====== :release-date: 2013-08-23 05:00 P.M BST :release-by: Ask Solem - safe_str did not work properly resulting in :exc:`UnicodeDecodeError` (Issue #248). .. _version-2.5.13: 2.5.13 ====== :release-date: 2013-08-16 04:00 P.M BST :release-by: Ask Solem - Now depends on :mod:`amqp` 1.0.13 - Fixed typo in Django functional tests. - safe_str now returns Unicode in Python 2.x Fix contributed by Germán M. Bravo. - amqp: Transport options are now merged with arguments supplied to the connection. - Tests no longer depends on distribute, which was deprecated and merged back into setuptools. Fix contributed by Sascha Peilicke. - ConsumerMixin now also restarts on channel related errors. Fix contributed by Corentin Ardeois. .. _version-2.5.12: 2.5.12 ====== :release-date: 2013-06-28 03:30 P.M BST :release-by: Ask Solem - Redis: Ignore errors about keys missing in the round-robin cycle. - Fixed test suite errors on Python 3. - Fixed msgpack test failures. .. _version-2.5.11: 2.5.11 ====== :release-date: 2013-06-25 02:30 P.M BST :release-by: Ask Solem - Now depends on amqp 1.0.12 (Py3 compatibility issues). - MongoDB: Removed cause of a "database name in URI is being ignored" warning. Fix by Flavio Percoco Premoli - Adds ``passive`` option to :class:`~kombu.Exchange`. Setting this flag means that the exchange will not be declared by kombu, but that it must exist already (or an exception will be raised). Contributed by Rafal Malinowski - Connection.info() now gives the current hostname and not the list of available hostnames. Fix contributed by John Shuping. - pyamqp: Transport options are now forwarded as kwargs to ``amqp.Connection``. - librabbitmq: Transport options are now forwarded as kwargs to ``librabbitmq.Connection``. - librabbitmq: Now raises :exc:`NotImplementedError` if SSL is enabled. The librabbitmq library does not support ssl, but you can use stunnel or change to the ``pyamqp://`` transport instead. Fix contributed by Dan LaMotte. - librabbitmq: Fixed a cyclic reference at connection close. - eventio: select implementation now removes bad file descriptors. - eventio: Fixed Py3 compatibility problems. - Functional tests added for py-amqp and librabbitmq transports. - Resource.force_close_all no longer uses a mutex. - Pidbox: Now ignores `IconsistencyError` when sending replies, as this error simply means that the client may no longer be alive. - Adds new :meth:`Connection.collect <~kombu.Connection.collect>` method, that can be used to clean up after connections without I/O. - ``queue_bind`` is no longer called for queues bound to the "default exchange" (Issue #209). Contributed by Jonathan Halcrow. - The max_retries setting for retries was not respected correctly (off by one). .. _version-2.5.10: 2.5.10 ====== :release-date: 2013-04-11 06:10 P.M BST :release-by: Ask Solem Note about upcoming changes for Kombu 3.0 ----------------------------------------- Kombu 3 consumers will no longer accept pickle/yaml or msgpack by default, and you will have to explicitly enable untrusted deserializers either globally using :func:`kombu.enable_insecure_serializers`, or using the ``accept`` argument to :class:`~kombu.Consumer`. Changes ------- - New utility function to disable/enable untrusted serializers. - :func:`kombu.disable_insecure_serializers` - :func:`kombu.enable_insecure_serializers`. - Consumer: ``accept`` can now be used to specify a whitelist of content types to accept. If the accept whitelist is set and a message is received with a content type that is not in the whitelist then a :exc:`~kombu.exceptions.ContentDisallowed` exception is raised. Note that this error can be handled by the already existing `on_decode_error` callback Examples: .. code-block:: python Consumer(accept=['application/json']) Consumer(accept=['pickle', 'json']) - Now depends on amqp 1.0.11 - pidbox: Mailbox now supports the ``accept`` argument. - Redis: More friendly error for when keys are missing. - Connection URLs: The parser did not work well when there were multiple '+' tokens. .. _version-2.5.9: 2.5.9 ===== :release-date: 2013-04-08 05:07 P.M BST :release-by: Ask Solem - Pidbox: Now warns if there are multiple nodes consuming from the same pidbox. - Adds :attr:`Queue.on_declared ` A callback to be called when the queue is declared, with signature ``(name, messages, consumers)``. - Now uses fuzzy matching to suggest alternatives to typos in transport names. - SQS: Adds new transport option ``queue_prefix``. Contributed by j0hnsmith. - pyamqp: No longer overrides verify_connection. - SQS: Now specifies the ``driver_type`` and ``driver_name`` attributes. Fix contributed by Mher Movsisyan. - Fixed bug with ``kombu.utils.retry_over_time`` when no errback specified. .. _version-2.5.8: 2.5.8 ===== :release-date: 2013-03-21 04:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.0.10 which fixes a Python 3 compatibility error. - Redis: Fixed a possible race condition (Issue #171). - Redis: Ack emulation/visibility_timeout can now be disabled using a transport option. Ack emulation adds quite a lot of overhead to ensure data is safe even in the event of an unclean shutdown. If data loss do not worry you there is now an `ack_emulation` transport option you can use to disable it: .. code-block:: python Connection('redis://', transport_options={'ack_emulation': False}) - SQS: Fixed :mod:`boto` v2.7 compatibility (Issue #207). - Exchange: Should not try to re-declare default exchange (``""``) (Issue #209). - SQS: Long polling is now disabled by default as it was not implemented correctly, resulting in long delays between receiving messages (Issue #202). - Fixed Python 2.6 incompatibility depending on ``exc.errno`` being available. Fix contributed by Ephemera. .. _version-2.5.7: 2.5.7 ===== :release-date: 2013-03-08 01:00 P.M UTC :release-by: Ask Solem - Now depends on amqp 1.0.9 - Redis: A regression in 2.5.6 caused the redis transport to ignore options set in ``transport_options``. - Redis: New ``socket_timeout`` transport option. - Redis: ``InconsistencyError`` is now regarded as a recoverable error. - Resource pools: Will no longer attempt to release resource that was never acquired. - MongoDB: Now supports the ``ssl`` option. Contributed by Sebastian Pawlus. .. _version-2.5.6: 2.5.6 ===== :release-date: 2013-02-08 01:00 P.M UTC :release-by: Ask Solem - Now depends on amqp 1.0.8 which works around a bug found on some Python 2.5 installations where 2**32 overflows to 0. .. _version-2.5.5: 2.5.5 ===== :release-date: 2013-02-07 05:00 P.M UTC :release-by: Ask Solem SQS: Now supports long polling (Issue #176). The polling interval default has been changed to 0 and a new transport option (``wait_time_seconds``) has been added. This parameter specifies how long to wait for a message from SQS, and defaults to 20 seconds, which is the maximum value currently allowed by Amazon SQS. Contributed by James Saryerwinnie. - SQS: Now removes unpickleable fields before restoring messages. - Consumer.__exit__ now ignores exceptions occurring while canceling the consumer. - Virtual: Routing keys can now consist of characters also used in regular expressions (e.g. parens) (Issue #194). - Virtual: Fixed compression header when restoring messages. Fix contributed by Alex Koshelev. - Virtual: ack/reject/requeue now works while using ``basic_get``. - Virtual: Message.reject is now supported by virtual transports (requeue depends on individual transport support). - Fixed typo in hack used for static analyzers. Fix contributed by Basil Mironenko. .. _version-2.5.4: 2.5.4 ===== :release-date: 2012-12-10 12:35 P.M UTC :release-by: Ask Solem - Fixed problem with connection clone and multiple URLs (Issue #182). Fix contributed by Dane Guempel. - zeromq: Now compatible with libzmq 3.2.x. Fix contributed by Andrey Antukh. - Fixed Python 3 installation problem (Issue #187). .. _version-2.5.3: 2.5.3 ===== :release-date: 2012-11-29 12:35 P.M UTC :release-by: Ask Solem - Pidbox: Fixed compatibility with Python 2.6 2.5.2 ===== :release-date: 2012-11-29 12:35 P.M UTC :release-by: Ask Solem .. _version-2.5.2: 2.5.2 ===== :release-date: 2012-11-29 12:35 P.M UTC :release-by: Ask Solem - [Redis] Fixed connection leak and added a new 'max_connections' transport option. .. _version-2.5.1: 2.5.1 ===== :release-date: 2012-11-28 12:45 P.M UTC :release-by: Ask Solem - Fixed bug where return value of Queue.as_dict could not be serialized with JSON (Issue #177). .. _version-2.5.0: 2.5.0 ===== :release-date: 2012-11-27 04:00 P.M UTC :release-by: Ask Solem - `py-amqp`_ is now the new default transport, replacing ``amqplib``. The new `py-amqp`_ library is a fork of amqplib started with the following goals: - Uses AMQP 0.9.1 instead of 0.8 - Support for heartbeats (Issue #79 + Issue #131) - Automatically revives channels on channel errors. - Support for all RabbitMQ extensions - Consumer Cancel Notifications (Issue #131) - Publisher Confirms (Issue #131). - Exchange-to-exchange bindings: ``exchange_bind`` / ``exchange_unbind``. - API compatible with :mod:`librabbitmq` so that it can be used as a pure-python replacement in environments where rabbitmq-c cannot be compiled. librabbitmq will be updated to support all the same features as py-amqp. - Support for using multiple connection URL's for failover. The first argument to :class:`~kombu.Connection` can now be a list of connection URLs: .. code-block:: python Connection(['amqp://foo', 'amqp://bar']) or it can be a single string argument with several URLs separated by semicolon: .. code-block:: python Connection('amqp://foo;amqp://bar') There is also a new keyword argument ``failover_strategy`` that defines how :meth:`~kombu.Connection.ensure_connection`/ :meth:`~kombu.Connection.ensure`/:meth:`kombu.Connection.autoretry` will reconnect in the event of connection failures. The default reconnection strategy is ``round-robin``, which will simply cycle through the list forever, and there's also a ``shuffle`` strategy that will select random hosts from the list. Custom strategies can also be used, in that case the argument must be a generator yielding the URL to connect to. Example: .. code-block:: python Connection('amqp://foo;amqp://bar') - Now supports PyDev, PyCharm, pylint and other static code analysis tools. - :class:`~kombu.Queue` now supports multiple bindings. You can now have multiple bindings in the same queue by having the second argument be a list: .. code-block:: python from kombu import binding, Queue Queue('name', [ binding(Exchange('E1'), routing_key='foo'), binding(Exchange('E1'), routing_key='bar'), binding(Exchange('E2'), routing_key='baz'), ]) To enable this, helper methods have been added: - :meth:`~kombu.Queue.bind_to` - :meth:`~kombu.Queue.unbind_from` Contributed by Rumyana Neykova. - Custom serializers can now be registered using Setuptools entry-points. See :ref:`serialization-entrypoints`. - New :class:`kombu.common.QoS` class used as a thread-safe way to manage changes to a consumer or channels prefetch_count. This was previously an internal class used in Celery now moved to the :mod:`kombu.common` module. - Consumer now supports a ``on_message`` callback that can be used to process raw messages (not decoded). Other callbacks specified using the ``callbacks`` argument, and the ``receive`` method will be not be called when a on message callback is present. - New utility :func:`kombu.common.ignore_errors` ignores connection and channel errors. Must only be used for cleanup actions at shutdown or on connection loss. - Support for exchange-to-exchange bindings. The :class:`~kombu.Exchange` entity gained ``bind_to`` and ``unbind_from`` methods: .. code-block:: python e1 = Exchange('A')(connection) e2 = Exchange('B')(connection) e2.bind_to(e1, routing_key='rkey', arguments=None) e2.unbind_from(e1, routing_key='rkey', arguments=None) This is currently only supported by the ``pyamqp`` transport. Contributed by Rumyana Neykova. .. _version-2.4.10: 2.4.10 ====== :release-date: 2012-11-22 06:00 P.M UTC :release-by: Ask Solem - The previous versions connection pool changes broke Redis support so that it would always connect to localhost (default setting) no matter what connection parameters were provided (Issue #176). .. _version-2.4.9: 2.4.9 ===== :release-date: 2012-11-21 03:00 P.M UTC :release-by: Ask Solem - Redis: Fixed race condition that could occur while trying to restore messages (Issue #171). Fix contributed by Ollie Walsh. - Redis: Each channel is now using a specific connection pool instance, which is disconnected on connection failure. - ProducerPool: Fixed possible dead-lock in the acquire method. - ProducerPool: ``force_close_all`` no longer tries to call the non-existent ``Producer._close``. - librabbitmq: Now implements ``transport.verify_connection`` so that connection pools will not give back connections that are no longer working. - New and better ``repr()`` for Queue and Exchange objects. - Python 3: Fixed problem with running the unit test suite. - Python 3: Fixed problem with JSON codec. .. _version-2.4.8: 2.4.8 ===== :release-date: 2012-11-02 05:00 P.M UTC :release-by: Ask Solem - Redis: Improved fair queue cycle implementation (Issue #166). Contributed by Kevin McCarthy. - Redis: Unacked message restore limit is now unlimited by default. Also, the limit can now be configured using the ``unacked_restore_limit`` transport option: .. code-block:: python Connection('redis://', transport_options={ 'unacked_restore_limit': 100, }) A limit of 100 means that the consumer will restore at most 100 messages at each pass. - Redis: Now uses a mutex to ensure only one consumer restores messages at a time. The mutex expires after 5 minutes by default, but can be configured using the ``unacked_mutex_expire`` transport option. - LamportClock.adjust now returns the new clock value. - Heartbeats can now be specified in URLs. Fix contributed by Mher Movsisyan. - Kombu can now be used with PyDev, PyCharm and other static analysis tools. - Fixes problem with msgpack on Python 3 (Issue #162). Fix contributed by Jasper Bryant-Greene - amqplib: Fixed bug with timeouts when SSL is used in non-blocking mode. Fix contributed by Mher Movsisyan .. _version-2.4.7: 2.4.7 ===== :release-date: 2012-09-18 03:00 P.M BST :release-by: Ask Solem - Virtual: Unknown exchanges now default to 'direct' when sending a message. - MongoDB: Fixed memory leak when merging keys stored in the db (Issue #159) Fix contributed by Michael Korbakov. - MongoDB: Better index for MongoDB transport (Issue #158). This improvement will create a new compund index for queue and _id in order to be able to use both indexed fields for getting a new message (using queue field) and sorting by _id. It'll be necessary to manually delete the old index from the collection. Improvement contributed by rmihael .. _version-2.4.6: 2.4.6 ===== :release-date: 2012-09-12 03:00 P.M BST :release-by: Ask Solem - Adds additional compatibility dependencies: - Python <= 2.6: - importlib - ordereddict - Python <= 2.5 - simplejson .. _version-2.4.5: 2.4.5 ===== :release-date: 2012-08-30 03:36 P.M BST :release-by: Ask Solem - Last version broke installtion on PyPy and Jython due to test requirements clean-up. .. _version-2.4.4: 2.4.4 ===== :release-date: 2012-08-29 04:00 P.M BST :release-by: Ask Solem - amqplib: Fixed a bug with asynchronously reading large messages. - pyamqp: Now requires amqp 0.9.3 - Cleaned up test requirements. .. _version-2.4.3: 2.4.3 ===== :release-date: 2012-08-25 10:30 P.M BST :release-by: Ask Solem - Fixed problem with amqp transport alias (Issue #154). .. _version-2.4.2: 2.4.2 ===== :release-date: 2012-08-24 05:00 P.M BST :release-by: Ask Solem - Having an empty transport name broke in 2.4.1. .. _version-2.4.1: 2.4.1 ===== :release-date: 2012-08-24 04:00 P.M BST :release-by: Ask Solem - Redis: Fixed race condition that could cause the consumer to crash (Issue #151) Often leading to the error message ``"could not convert string to float"`` - Connection retry could cause an inifite loop (Issue #145). - The ``amqp`` alias is now resolved at runtime, so that eventlet detection works even if patching was done later. .. _version-2.4.0: 2.4.0 ===== :release-date: 2012-08-17 08:00 P.M BST :release-by: Ask Solem - New experimental :mod:`ZeroMQ >> conn = Connection('pyamqp://guest:guest@localhost//') The ``pyamqp://`` transport will be the default fallback transport in Kombu version 3.0, when :mod:`librabbitmq` is not installed, and librabbitmq will also be updated to support the same features. - Connection now supports heartbeat argument. If enabled you must make sure to manually maintain heartbeats by calling the ``Connection.heartbeat_check`` at twice the rate of the specified heartbeat interval. E.g. if you have ``Connection(heartbeat=10)``, then you must call ``Connection.heartbeat_check()`` every 5 seconds. if the server has not sent heartbeats at a suitable rate then the heartbeat check method must raise an error that is listed in ``Connection.connection_errors``. The attribute ``Connection.supports_heartbeats`` has been added for the ability to inspect if a transport supports heartbeats or not. Calling ``heartbeat_check`` on a transport that does not support heartbeats results in a noop operation. - SQS: Fixed bug with invalid characters in queue names. Fix contributed by Zach Smith. - utils.reprcall: Fixed typo where kwargs argument was an empty tuple by default, and not an empty dict. .. _version-2.2.6: 2.2.6 ===== :release-date: 2012-07-10 05:00 P.M BST :release-by: Ask Solem - Adds ``kombu.messaging.entry_to_queue`` for compat with previous versions. .. _version-2.2.5: 2.2.5 ===== :release-date: 2012-07-10 05:00 P.M BST :release-by: Ask Solem - Pidbox: Now sets queue expire at 10 seconds for reply queues. - EventIO: Now ignores ``ValueError`` raised by epoll unregister. - MongoDB: Fixes Issue #142 Fix by Flavio Percoco Premoli .. _version-2.2.4: 2.2.4 ===== :release-date: 2012-07-05 04:00 P.M BST :release-by: Ask Solem - Support for msgpack-python 0.2.0 (Issue #143) The latest msgpack version no longer supports Python 2.5, so if you're still using that you need to depend on an earlier msgpack-python version. Fix contributed by Sebastian Insua - :func:`~kombu.common.maybe_declare` no longer caches entities with the ``auto_delete`` flag set. - New experimental filesystem transport. Contributed by Bobby Beever. - Virtual Transports: Now support anonymous queues and exchanges. .. _version-2.2.3: 2.2.3 ===== :release-date: 2012-06-24 05:00 P.M BST :release-by: Ask Solem - ``BrokerConnection`` now renamed to ``Connection``. The name ``Connection`` has been an alias for a very long time, but now the rename is official in the documentation as well. The Connection alias has been available since version 1.1.3, and ``BrokerConnection`` will still work and is not deprecated. - ``Connection.clone()`` now works for the sqlalchemy transport. - :func:`kombu.common.eventloop`, :func:`kombu.utils.uuid`, and :func:`kombu.utils.url.parse_url` can now be imported from the :mod:`kombu` module directly. - Pidbox transport callback ``after_reply_message_received`` now happens in a finally block. - Trying to use the ``librabbitmq://`` transport will now show the right name in the :exc:`ImportError` if :mod:`librabbitmq` is not installed. The librabbitmq falls back to the older ``pylibrabbitmq`` name for compatibility reasons and would therefore show ``No module named pylibrabbitmq`` instead of librabbitmq. .. _version-2.2.2: 2.2.2 ===== :release-date: 2012-06-22 02:30 P.M BST :release-by: Ask Solem - Now depends on :mod:`anyjson` 0.3.3 - Json serializer: Now passes :class:`buffer` objects directly, since this is supported in the latest :mod:`anyjson` version. - Fixes blocking epoll call if timeout was set to 0. Fix contributed by John Watson. - setup.py now takes requirements from the :file:`requirements/` directory. - The distribution directory :file:`contrib/` is now renamed to :file:`extra/` .. _version-2.2.1: 2.2.1 ===== :release-date: 2012-06-21 01:00 P.M BST :release-by: Ask Solem - SQS: Default visibility timeout is now 30 minutes. Since we have ack emulation the visibility timeout is only in effect if the consumer is abrubtly terminated. - retry argument to ``Producer.publish`` now works properly, when the declare argument is specified. - Json serializer: didn't handle buffer objects (Issue #135). Fix contributed by Jens Hoffrichter. - Virtual: Now supports passive argument to ``exchange_declare``. - Exchange & Queue can now be bound to connections (which will use the default channel): .. code-block:: pycon >>> exchange = Exchange('name') >>> bound_exchange = exchange(connection) >>> bound_exchange.declare() - ``SimpleQueue`` & ``SimpleBuffer`` can now be bound to connections (which will use the default channel). - ``Connection.manager.get_bindings`` now works for librabbitmq and pika. - Adds new transport info attributes: - ``Transport.driver_type`` Type of underlying driver, e.g. "amqp", "redis", "sql". - ``Transport.driver_name`` Name of library used e.g. "amqplib", "redis", "pymongo". - ``Transport.driver_version()`` Version of underlying library. .. _version-2.2.0: 2.2.0 ===== :release-date: 2012-06-07 03:10 P.M BST :release-by: Ask Solem .. _v220-important: Important Notes --------------- - The canonical source code repository has been moved to http://github.com/celery/kombu - Pidbox: Exchanges used by pidbox are no longer auto_delete. Auto delete has been described as a misfeature, and therefore we have disabled it. For RabbitMQ users old exchanges used by pidbox must be removed, these are named ``mailbox_name.pidbox``, and ``reply.mailbox_name.pidbox``. The following command can be used to clean up these exchanges: .. code-block:: text $ VHOST=/ URL=amqp:// python -c'import sys,kombu;[kombu.Connection( sys.argv[-1]).channel().exchange_delete(x) for x in sys.argv[1:-1]]' \ $(sudo rabbitmqctl -q list_exchanges -p "$VHOST" \ | grep \.pidbox | awk '{print $1}') "$URL" The :envvar:`VHOST` variable must be set to the target RabbitMQ virtual host, and the :envvar:`URL` must be the AMQP URL to the server. - The ``amqp`` transport alias will now use :mod:`librabbitmq` if installed. `py-librabbitmq`_ is a fast AMQP client for Python using the librabbitmq C library. It can be installed by: .. code-block:: console $ pip install librabbitmq It will not be used if the process is monkey patched by eventlet/gevent. .. _`py-librabbitmq`: https://github.com/celery/librabbitmq .. _v220-news: News ---- - Redis: Ack emulation improvements. Reducing the possibility of data loss. Acks are now implemented by storing a copy of the message when the message is consumed. The copy is not removed until the consumer acknowledges or rejects it. This means that unacknowledged messages will be redelivered either when the connection is closed, or when the visibility timeout is exceeded. - Visibility timeout This is a timeout for acks, so that if the consumer does not ack the message within this time limit, the message is redelivered to another consumer. The timeout is set to one hour by default, but can be changed by configuring a transport option: >>> Connection('redis://', transport_options={ ... 'visibility_timeout': 1800, # 30 minutes ... }) **NOTE**: Messages that have not been acked will be redelivered if the visibility timeout is exceeded, for Celery users this means that ETA/countdown tasks that are scheduled to execute with a time that exceeds the visibility timeout will be executed twice (or more). If you plan on using long ETA/countdowns you should tweak the visibility timeout accordingly: .. code-block:: python BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 18000} # 5 hours Setting a long timeout means that it will take a long time for messages to be redelivered in the event of a power failure, but if so happens you could temporarily set the visibility timeout lower to flush out messages when you start up the systems again. - Experimental `Apache ZooKeeper`_ transport More information is in the module reference: :mod:`kombu.transport.zookeeper`. Contributed by Mahendra M. .. _`Apache ZooKeeper`: http://zookeeper.apache.org/ - Redis: Priority support. The message's ``priority`` field is now respected by the Redis transport by having multiple lists for each named queue. The queues are then consumed by in order of priority. The priority field is a number in the range of 0 - 9, where 0 is the default and highest priority. The priority range is collapsed into four steps by default, since it is unlikely that nine steps will yield more benefit than using four steps. The number of steps can be configured by setting the ``priority_steps`` transport option, which must be a list of numbers in **sorted order**: .. code-block:: pycon >>> x = Connection('redis://', transport_options={ ... 'priority_steps': [0, 2, 4, 6, 8, 9], ... }) Priorities implemented in this way is not as reliable as priorities on the server side, which is why nickname the feature "quasi-priorities"; **Using routing is still the suggested way of ensuring quality of service**, as client implemented priorities fall short in a number of ways, e.g. if the worker is busy with long running tasks, has prefetched many messages, or the queues are congested. Still, it is possible that using priorities in combination with routing can be more beneficial than using routing or priorities alone. Experimentation and monitoring should be used to prove this. Contributed by Germán M. Bravo. - Redis: Now cycles queues so that consuming is fair. This ensures that a very busy queue won't block messages from other queues, and ensures that all queues have an equal chance of being consumed from. This used to be the case before, but the behavior was accidentally changed while switching to using blocking pop. - Redis: Auto delete queues that are bound to fanout exchanges is now deleted at channel.close. - amqplib: Refactored the drain_events implementation. - Pidbox: Now uses ``connection.default_channel``. - Pickle serialization: Can now decode buffer objects. - Exchange/Queue declarations can now be cached even if the entity is non-durable. This is possible because the list of cached declarations are now kept with the connection, so that the entities will be redeclared if the connection is lost. - Kombu source code now only uses one-level of explicit relative imports. .. _v220-fixes: Fixes ----- - eventio: Now ignores ENOENT raised by ``epoll.register``, and EEXIST from ``epoll.unregister``. - eventio: kqueue now ignores :exc:`KeyError` on unregister. - Redis: ``Message.reject`` now supports the ``requeue`` argument. - Redis: Remove superfluous pipeline call. Fix contributed by Thomas Johansson. - Redis: Now sets redelivered header for redelivered messages. - Now always makes sure references to :func:`sys.exc_info` is removed. - Virtual: The compression header is now removed before restoring messages. - More tests for the SQLAlchemy backend. Contributed by Franck Cuny. - Url parsing did not handle MongoDB URLs properly. Fix contributed by Flavio Percoco Premoli. - Beanstalk: Ignore default tube when reserving. Fix contributed by Zhao Xiaohong. Nonblocking consume support --------------------------- librabbitmq, amqplib and redis transports can now be used non-blocking. The interface is very manual, and only consuming messages is non-blocking so far. The API should not be regarded as stable or final in any way. It is used by Celery which has very limited needs at this point. Hopefully we can introduce a proper callback-based API later. - ``Transport.eventmap`` Is a map of ``fd -> callback(fileno, event)`` to register in an eventloop. - ``Transport.on_poll_start()`` Is called before every call to poll. The poller must support ``register(fd, callback)`` and ``unregister(fd)`` methods. - ``Transport.on_poll_start(poller)`` Called when the hub is initialized. The poller argument must support the same interface as :class:`kombu.utils.eventio.poll`. - ``Connection.ensure_connection`` now takes a callback argument which is called for every loop while the connection is down. - Adds ``connection.drain_nowait`` This is a non-blocking alternative to drain_events, but only supported by amqplib/librabbitmq. - drain_events now sets ``connection.more_to_read`` if there is more data to read. This is to support eventloops where other things must be handled between draining events. .. _version-2.1.8: 2.1.8 ===== :release-date: 2012-05-06 03:06 P.M BST :release-by: Ask Solem * Bound Exchange/Queue's are now pickleable. * Consumer/Producer can now be instantiated without a channel, and only later bound using ``.revive(channel)``. * ProducerPool now takes ``Producer`` argument. * :func:`~kombu.utils.fxrange` now counts forever if the stop argument is set to None. (fxrange is like xrange but for decimals). * Auto delete support for virtual transports were incomplete and could lead to problems so it was removed. * Cached declarations (:func:`~kombu.common.maybe_declare`) are now bound to the underlying connection, so that entities are redeclared if the connection is lost. This also means that previously uncacheable entities (e.g. non-durable) can now be cached. * compat ConsumerSet: can now specify channel. .. _version-2.1.7: 2.1.7 ===== :release-date: 2012-04-27 06:00 P.M BST :release-by: Ask Solem * compat consumerset now accepts optional channel argument. .. _version-2.1.6: 2.1.6 ===== :release-date: 2012-04-23 01:30 P.M BST :release-by: Ask Solem * SQLAlchemy transport was not working correctly after URL parser change. * maybe_declare now stores cached declarations per underlying connection instead of globally, in the rare case that data disappears from the broker after connection loss. * Django: Added South migrations. Contributed by Joseph Crosland. .. _version-2.1.5: 2.1.5 ===== :release-date: 2012-04-13 03:30 P.M BST :release-by: Ask Solem * The url parser removed more than the first leading slash (Issue #121). * SQLAlchemy: Can now specify url using + separator Example: .. code-block:: python Connection('sqla+mysql://localhost/db') * Better support for anonymous queues (Issue #116). Contributed by Michael Barrett. * ``Connection.as_uri`` now quotes url parts (Issue #117). * Beanstalk: Can now set message TTR as a message property. Contributed by Andrii Kostenko .. _version-2.1.4: 2.1.4 ===== :release-date: 2012-04-03 04:00 P.M GMT :release-by: Ask Solem * MongoDB: URL parsing are now delegated to the pymongo library (Fixes Issue #103 and Issue #87). Fix contributed by Flavio Percoco Premoli and James Sullivan * SQS: A bug caused SimpleDB to be used even if sdb persistence was not enabled (Issue #108). Fix contributed by Anand Kumria. * Django: Transaction was committed in the wrong place, causing data cleanup to fail (Issue #115). Fix contributed by Daisuke Fujiwara. * MongoDB: Now supports replica set URLs. Contributed by Flavio Percoco Premoli. * Redis: Now raises a channel error if a queue key that is currently being consumed from disappears. Fix contributed by Stephan Jaekel. * All transport 'channel_errors' lists now includes ``kombu.exception.StdChannelError``. * All kombu exceptions now inherit from a common :exc:`~kombu.exceptions.KombuError`. .. _version-2.1.3: 2.1.3 ===== :release-date: 2012-03-20 03:00 P.M GMT :release-by: Ask Solem * Fixes Jython compatibility issues. * Fixes Python 2.5 compatibility issues. .. _version-2.1.2: 2.1.2 ===== :release-date: 2012-03-01 01:00 P.M GMT :release-by: Ask Solem * amqplib: Last version broke SSL support. .. _version-2.1.1: 2.1.1 ===== :release-date: 2012-02-24 02:00 P.M GMT :release-by: Ask Solem * Connection URLs now supports encoded characters. * Fixed a case where connection pool could not recover from connection loss. Fix contributed by Florian Munz. * We now patch amqplib's ``__del__`` method to skip trying to close the socket if it is not connected, as this resulted in an annoying warning. * Compression can now be used with binary message payloads. Fix contributed by Steeve Morin. .. _version-2.1.0: 2.1.0 ===== :release-date: 2012-02-04 10:38 P.M GMT :release-by: Ask Solem * MongoDB: Now supports fanout (broadcast) (Issue #98). Contributed by Scott Lyons. * amqplib: Now detects broken connections by using ``MSG_PEEK``. * pylibrabbitmq: Now supports ``basic_get`` (Issue #97). * gevent: Now always uses the ``select`` polling backend. * pika transport: Now works with pika 0.9.5 and 0.9.6dev. The old pika transport (supporting 0.5.x) is now available as alias ``oldpika``. (Note terribly latency has been experienced with the new pika versions, so this is still an experimental transport). * Virtual transports: can now set polling interval via the transport options (Issue #96). Example: .. code-block:: pycon >>> Connection('sqs://', transport_options={ ... 'polling_interval': 5.0}) The default interval is transport specific, but usually 1.0s (or 5.0s for the Django database transport, which can also be set using the ``KOMBU_POLLING_INTERVAL`` setting). * Adds convenience function: :func:`kombu.common.eventloop`. .. _version-2.0.0: 2.0.0 ===== :release-date: 2012-01-15 06:34 P.M GMT :release-by: Ask Solem .. _v200-important: Important Notes --------------- .. _v200-python-compatibility: Python Compatibility ~~~~~~~~~~~~~~~~~~~~ * No longer supports Python 2.4. Users of Python 2.4 can still use the 1.x series. The 1.x series has entered bugfix-only maintenance mode, and will stay that way as long as there is demand, and a willingness to maintain it. .. _v200-new-transports: New Transports ~~~~~~~~~~~~~~ * ``django-kombu`` is now part of Kombu core. The Django message transport uses the Django ORM to store messages. It uses polling, with a default polling interval of 5 seconds. The polling interval can be increased or decreased by configuring the ``KOMBU_POLLING_INTERVAL`` Django setting, which is the polling interval in seconds as an int or a float. Note that shorter polling intervals can cause extreme strain on the database: if responsiveness is needed you shall consider switching to a non-polling transport. To use it you must use transport alias ``"django"``, or as a URL: .. code-block:: text django:// and then add ``kombu.transport.django`` to ``INSTALLED_APPS``, and run ``manage.py syncdb`` to create the necessary database tables. **Upgrading** If you have previously used ``django-kombu``, then the entry in ``INSTALLED_APPS`` must be changed from ``djkombu`` to ``kombu.transport.django``: .. code-block:: python INSTALLED_APPS = ( # …, 'kombu.transport.django', ) If you have previously used django-kombu, then there is no need to recreate the tables, as the old tables will be fully compatible with the new version. * ``kombu-sqlalchemy`` is now part of Kombu core. This change requires no code changes given that the ``sqlalchemy`` transport alias is used. .. _v200-news: News ---- * :class:`kombu.mixins.ConsumerMixin` is a mixin class that lets you easily write consumer programs and threads. See :ref:`examples` and :ref:`guide-consumers`. * SQS Transport: Added support for SQS queue prefixes (Issue #84). The queue prefix can be set using the transport option ``queue_name_prefix``: .. code-block:: python BrokerTransport('SQS://', transport_options={ 'queue_name_prefix': 'myapp'}) Contributed by Nitzan Miron. * ``Producer.publish`` now supports automatic retry. Retry is enabled by the ``reply`` argument, and retry options set by the ``retry_policy`` argument: .. code-block:: python exchange = Exchange('foo') producer.publish(message, exchange=exchange, retry=True, declare=[exchange], retry_policy={ 'interval_start': 1.0}) See :meth:`~kombu.Connection.ensure` for a list of supported retry policy options. * ``Producer.publish`` now supports a ``declare`` keyword argument. This is a list of entities (:class:`Exchange`, or :class:`Queue`) that should be declared before the message is published. .. _v200-fixes: Fixes ----- * Redis transport: Timeout was multiplied by 1000 seconds when using ``select`` for event I/O (Issue #86). .. _version-1.5.1: 1.5.1 ===== :release-date: 2011-11-30 01:00 P.M GMT :release-by: Ask Solem * Fixes issue with ``kombu.compat`` introduced in 1.5.0 (Issue #83). * Adds the ability to disable content_types in the serializer registry. Any message with a content type that is disabled will be refused. One example would be to disable the Pickle serializer: >>> from kombu.serialization import registry # by name >>> registry.disable('pickle') # or by mime-type. >>> registry.disable('application/x-python-serialize') .. _version-1.5.0: 1.5.0 ===== :release-date: 2011-11-27 06:00 P.M GMT :release-by: Ask Solem * kombu.pools: Fixed a bug resulting in resources not being properly released. This was caused by the use of ``__hash__`` to distinguish them. * Virtual transports: Dead-letter queue is now disabled by default. The dead-letter queue was enabled by default to help application authors, but now that Kombu is stable it should be removed. There are after all many cases where messages should just be dropped when there are no queues to buffer them, and keeping them without supporting automatic cleanup is rather considered a resource leak than a feature. If wanted the dead-letter queue can still be enabled, by using the ``deadletter_queue`` transport option: .. code-block:: pycon >>> x = Connection('redis://', ... transport_options={'deadletter_queue': 'ae.undeliver'}) In addition, an :class:`UndeliverableWarning` is now emitted when the dead-letter queue is enabled and a message ends up there. Contributed by Ionel Maries Cristian. * MongoDB transport now supports Replicasets (Issue #81). Contributed by Ivan Metzlar. * The ``Connection.ensure`` methods now accepts a ``max_retries`` value of 0. A value of 0 now means *do not retry*, which is distinct from :const:`None` which means *retry indefinitely*. Contributed by Dan McGee. * SQS Transport: Now has a lowercase ``sqs`` alias, so that it can be used with broker URLs (Issue #82). Fix contributed by Hong Minhee * SQS Transport: Fixes KeyError on message acknowledgments (Issue #73). The SQS transport now uses UUID's for delivery tags, rather than a counter. Fix contributed by Brian Bernstein. * SQS Transport: Unicode related fixes (Issue #82). Fix contributed by Hong Minhee. * Redis version check could crash because of improper handling of types (Issue #63). * Fixed error with `Resource.force_close_all` when resources were not yet properly initialized (Issue #78). .. _version-1.4.3: 1.4.3 ===== :release-date: 2011-10-27 10:00 P.M BST :release-by: Ask Solem * Fixes bug in ProducerPool where too many resources would be acquired. .. _version-1.4.2: 1.4.2 ===== :release-date: 2011-10-26 05:00 P.M BST :release-by: Ask Solem * Eventio: Polling should ignore `errno.EINTR` * SQS: str.encode did only start accepting kwargs after Py2.7. * simple_task_queue example didn't run correctly (Issue #72). Fix contributed by Stefan Eletzhofer. * Empty messages would not raise an exception not able to be handled by `on_decode_error` (Issue #72) Fix contributed by Christophe Chauvet. * CouchDB: Properly authenticate if user/password set (Issue #70) Fix contributed by Rafael Duran Castaneda * Connection.Consumer had the wrong signature. Fix contributed by Pavel Skvazh .. _version-1.4.1: 1.4.1 ===== :release-date: 2011-09-26 04:00 P.M BST :release-by: Ask Solem * 1.4.0 broke the producer pool, resulting in new connections being established for every acquire. .. _version-1.4.0: 1.4.0 ===== :release-date: 2011-09-22 05:00 P.M BST :release-by: Ask Solem * Adds module :mod:`kombu.mixins`. This module contains a :class:`~kombu.mixins.ConsumerMixin` class that can be used to easily implement a message consumer thread that consumes messages from one or more :class:`kombu.Consumer` instances. * New example: :ref:`task-queue-example` Using the ``ConsumerMixin``, default channels and the global connection pool to demonstrate new Kombu features. * MongoDB transport did not work with MongoDB >= 2.0 (Issue #66) Fix contributed by James Turk. * Redis-py version check did not account for beta identifiers in version string. Fix contributed by David Ziegler. * Producer and Consumer now accepts a connection instance as the first argument. The connections default channel will then be used. In addition shortcut methods has been added to Connection: .. code-block:: pycon >>> connection.Producer(exchange) >>> connection.Consumer(queues=..., callbacks=...) * Connection has aquired a ``connected`` attribute that can be used to check if the connection instance has established a connection. * ``ConnectionPool.acquire_channel`` now returns the connections default channel rather than establising a new channel that must be manually handled. * Added ``kombu.common.maybe_declare`` ``maybe_declare(entity)`` declares an entity if it has not previously been declared in the same process. * :func:`kombu.compat.entry_to_queue` has been moved to :mod:`kombu.common` * New module :mod:`kombu.clocks` now contains an implementation of Lamports logical clock. .. _version-1.3.5: 1.3.5 ===== :release-date: 2011-09-16 06:00 P.M BST :release-by: Ask Solem * Python 3: AMQP_PROTOCOL_HEADER must be bytes, not str. .. _version-1.3.4: 1.3.4 ===== :release-date: 2011-09-16 06:00 P.M BST :release-by: Ask Solem * Fixes syntax error in pools.reset .. _version-1.3.3: 1.3.3 ===== :release-date: 2011-09-15 02:00 P.M BST :release-by: Ask Solem * pools.reset did not support after forker arguments. .. _version-1.3.2: 1.3.2 ===== :release-date: 2011-09-10 01:00 P.M BST :release-by: Mher Movsisyan * Broke Python 2.5 compatibility by importing ``parse_qsl`` from ``urlparse`` * Connection.default_channel is now closed when connection is revived after connection failures. * Pika: Channel now supports the ``connection.client`` attribute as required by the simple interface. * pools.set_limit now raises an exception if the limit is lower than the previous limit. * pools.set_limit no longer resets the pools. .. _version-1.3.1: 1.3.1 ===== :release-date: 2011-10-07 03:00 P.M BST :release-by: Ask Solem * Last release broke after fork for pool reinitialization. * Producer/Consumer now has a ``connection`` attribute, giving access to the :class:`Connection` of the instance. * Pika: Channels now have access to the underlying :class:`Connection` instance using ``channel.connection.client``. This was previously required by the ``Simple`` classes and is now also required by :class:`Consumer` and :class:`Producer`. * Connection.default_channel is now closed at object revival. * Adds kombu.clocks.LamportClock. * compat.entry_to_queue has been moved to new module :mod:`kombu.common`. .. _version-1.3.0: 1.3.0 ===== :release-date: 2011-10-05 01:00 P.M BST :release-by: Ask Solem * Broker connection info can be now be specified using URLs The broker hostname can now be given as a URL instead, of the format: .. code-block:: text transport://user:password@hostname:port/virtual_host for example the default broker is expressed as: .. code-block:: pycon >>> Connection('amqp://guest:guest@localhost:5672//') Transport defaults to amqp, and is not required. user, password, port and virtual_host is also not mandatory and will default to the corresponding transports default. .. note:: Note that the path component (virtual_host) always starts with a forward-slash. This is necessary to distinguish between the virtual host '' (empty) and '/', which are both acceptable virtual host names. A virtual host of '/' becomes:: .. code-block:: text amqp://guest:guest@localhost:5672// and a virtual host of '' (empty) becomes: .. code-block:: text amqp://guest:guest@localhost:5672/ So the leading slash in the path component is **always required**. * Now comes with default global connection and producer pools. The acquire a connection using the connection parameters from a :class:`Connection`: .. code-block:: pycon >>> from kombu import Connection, connections >>> connection = Connection('amqp://guest:guest@localhost//') >>> with connections[connection].acquire(block=True): ... # do something with connection To acquire a producer using the connection parameters from a :class:`Connection`: .. code-block:: pycon >>> from kombu import Connection, producers >>> connection = Connection('amqp://guest:guest@localhost//') >>> with producers[connection].acquire(block=True): ... producer.publish({'hello': 'world'}, exchange='hello') Acquiring a producer will in turn also acquire a connection from the associated pool in ``connections``, so you the number of producers is bound the same limit as number of connections. The default limit of 100 connections per connection instance can be changed by doing: .. code-block:: pycon >>> from kombu import pools >>> pools.set_limit(10) The pool can also be forcefully closed by doing: .. code-block:: pycon >>> from kombu import pools >>> pool.reset() * SQS Transport: Persistence using SimpleDB is now disabled by default, after reports of unstable SimpleDB connections leading to errors. * :class:`Producer` can now be used as a context manager. * ``Producer.__exit__`` now properly calls ``release`` instead of close. The previous behavior would lead to a memory leak when using the :class:`kombu.pools.ProducerPool` * Now silences all exceptions from `import ctypes` to match behaviour of the standard Python uuid module, and avoid passing on MemoryError exceptions on SELinux-enabled systems (Issue #52 + Issue #53) * ``amqp`` is now an alias to the ``amqplib`` transport. * ``kombu.syn.detect_environment`` now returns 'default', 'eventlet', or 'gevent' depending on what monkey patches have been installed. * Serialization registry has new attribute ``type_to_name`` so it is possible to lookup serializater name by content type. * Exchange argument to ``Producer.publish`` can now be an :class:`Exchange` instance. * ``compat.Publisher`` now supports the ``channel`` keyword argument. * Acking a message on some transports could lead to :exc:`KeyError` being raised (Issue #57). * Connection pool: Connections are no long instantiated when the pool is created, but instantiated as needed instead. * Tests now pass on PyPy. * ``Connection.as_uri`` now includes the password if the keyword argument ``include_password`` is set. * Virtual transports now comes with a default ``default_connection_params`` attribute. .. _version-1.2.1: 1.2.1 ===== :release-date: 2011-07-29 12:52 P.M BST :release-by: Ask Solem * Now depends on amqplib >= 1.0.0. * Redis: Now automatically deletes auto_delete queues at ``basic_cancel``. * ``serialization.unregister`` added so it is possible to remove unwanted seralizers. * Fixes MemoryError while importing ctypes on SELinux (Issue #52). * ``Connection.autoretry`` is a version of ``ensure`` that works with arbitrary functions (i.e. it does not need an associated object that implements the ``revive`` method. Example usage: .. code-block:: python channel = connection.channel() try: ret, channel = connection.autoretry(send_messages, channel=channel) finally: channel.close() * ``ConnectionPool.acquire`` no longer force establishes the connection. The connection will be established as needed. * ``Connection.ensure`` now supports an ``on_revive`` callback that is applied whenever the connection is re-established. * ``Consumer.consuming_from(queue)`` returns True if the Consumer is consuming from ``queue``. * ``Consumer.cancel_by_queue`` did not remove the queue from ``queues``. * ``compat.ConsumerSet.add_queue_from_dict`` now automatically declared the queue if ``auto_declare`` set. .. _version-1.2.0: 1.2.0 ===== :release-date: 2011-07-15 12:00 P.M BST :release-by: Ask Solem * Virtual: Fixes cyclic reference in Channel.close (Issue #49). * Producer.publish: Can now set additional properties using keyword arguments (Issue #48). * Adds Queue.no_ack option to control the no_ack option for individual queues. * Recent versions broke pylibrabbitmq support. * SimpleQueue and SimpleBuffer can now be used as contexts. * Test requirements specifies PyYAML==3.09 as 3.10 dropped Python 2.4 support * Now properly reports default values in Connection.info/.as_uri .. _version-1.1.6: 1.1.6 ===== :release-date: 2011-06-13 04:00 P.M BST :release-by: Ask Solem * Redis: Fixes issue introduced in 1.1.4, where a redis connection failure could leave consumer hanging forever. * SQS: Now supports fanout messaging by using SimpleDB to store routing tables. This can be disabled by setting the `supports_fanout` transport option: >>> Connection(transport='SQS', ... transport_options={'supports_fanout': False}) * SQS: Now properly deletes a message when a message is acked. * SQS: Can now set the Amazon AWS region, by using the ``region`` transport option. * amqplib: Now uses `localhost` as default hostname instead of raising an error. .. _version-1.1.5: 1.1.5 ===== :release-date: 2011-06-07 06:00 P.M BST :release-by: Ask Solem * Fixes compatibility with redis-py 2.4.4. .. _version-1.1.4: 1.1.4 ===== :release-date: 2011-06-07 04:00 P.M BST :release-by: Ask Solem * Redis transport: Now requires redis-py version 2.4.4 or later. * New Amazon SQS transport added. Usage: >>> conn = Connection(transport='SQS', ... userid=aws_access_key_id, ... password=aws_secret_access_key) The environment variables :envvar:`AWS_ACCESS_KEY_ID` and :envvar:`AWS_SECRET_ACCESS_KEY` are also supported. * librabbitmq transport: Fixes default credentials support. * amqplib transport: Now supports `login_method` for SSL auth. :class:`Connection` now supports the `login_method` keyword argument. Default `login_method` is ``AMQPLAIN``. .. _version-1.1.3: 1.1.3 ===== :release-date: 2011-04-21 04:00 P.M CEST :release-by: Ask Solem * Redis: Consuming from multiple connections now works with Eventlet. * Redis: Can now perform channel operations while the channel is in BRPOP/LISTEN mode (Issue #35). Also the async BRPOP now times out after 1 second, this means that canceling consuming from a queue/starting consuming from additional queues has a latency of up to one second (BRPOP does not support subsecond timeouts). * Virtual: Allow channel objects to be closed multiple times without error. * amqplib: ``AttributeError`` has been added to the list of known connection related errors (:attr:`Connection.connection_errors`). * amqplib: Now converts :exc:`SSLError` timeout errors to :exc:`socket.timeout` (http://bugs.python.org/issue10272) * Ensures cyclic references are destroyed when the connection is closed. .. _version-1.1.2: 1.1.2 ===== :release-date: 2011-04-06 04:00 P.M CEST :release-by: Ask Solem * Redis: Fixes serious issue where messages could be lost. The issue could happen if the message exceeded a certain number of kilobytes in size. It is recommended that all users of the Redis transport should upgrade to this version, even if not currently experiencing any issues. .. _version-1.1.1: 1.1.1 ===== :release-date: 2011-04-05 03:51 P.M CEST :release-by: Ask Solem * 1.1.0 started using ``Queue.LifoQueue`` which is only available in Python 2.6+ (Issue #33). We now ship with our own LifoQueue. .. _version-1.1.0: 1.1.0 ===== :release-date: 2011-04-05 01:05 P.M CEST :release-by: Ask Solem .. _v110-important: Important Notes --------------- * Virtual transports: Message body is now base64 encoded by default (Issue #27). This should solve problems sending binary data with virtual transports. Message compatibility is handled by adding a ``body_encoding`` property, so messages sent by older versions is compatible with this release. However -- If you are accessing the messages directly not using Kombu, then you have to respect the ``body_encoding`` property. If you need to disable base64 encoding then you can do so via the transport options: .. code-block:: python Connection(transport='...', transport_options={'body_encoding': None}) **For transport authors**: You don't have to change anything in your custom transports, as this is handled automatically by the base class. If you want to use a different encoder you can do so by adding a key to ``Channel.codecs``. Default encoding is specified by the ``Channel.body_encoding`` attribute. A new codec must provide two methods: ``encode(data)`` and ``decode(data)``. * ConnectionPool/ChannelPool/Resource: Setting ``limit=None`` (or 0) now disables pool semantics, and will establish and close the resource whenever acquired or released. * ConnectionPool/ChannelPool/Resource: Is now using a LIFO queue instead of the previous FIFO behavior. This means that the last resource released will be the one acquired next. I.e. if only a single thread is using the pool this means only a single connection will ever be used. * Connection: Cloned connections did not inherit transport_options (``__copy__``). * contrib/requirements is now located in the top directory of the distribution. * MongoDB: Now supports authentication using the ``userid`` and ``password`` arguments to :class:`Connection` (Issue #30). * Connection: Default autentication credentials are now delegated to the individual transports. This means that the ``userid`` and ``password`` arguments to Connection is no longer *guest/guest* by default. The amqplib and pika transports will still have the default credentials. * :meth:`Consumer.__exit__` did not have the correct signature (Issue #32). * Channel objects now have a ``channel_id`` attribute. * MongoDB: Version sniffing broke with development versions of mongod (Issue #29). * New environment variable :envvar:`KOMBU_LOG_CONNECTION` will now emit debug log messages for connection related actions. :envvar:`KOMBU_LOG_DEBUG` will also enable :envvar:`KOMBU_LOG_CONNECTION`. .. _version-1.0.7: 1.0.7 ===== :release-date: 2011-03-28 05:45 P.M CEST :release-by: Ask Solem * Now depends on anyjson 0.3.1 cjson is no longer a recommended json implementation, and anyjson will now emit a deprecation warning if used. * Please note that the Pika backend only works with version 0.5.2. The latest version (0.9.x) drastically changed API, and it is not compatible yet. * on_decode_error is now called for exceptions in message_to_python (Issue #24). * Redis: did not respect QoS settings. * Redis: Creating a connection now ensures the connection is established. This means ``Connection.ensure_connection`` works properly with Redis. * consumer_tag argument to ``Queue.consume`` can't be :const:`None` (Issue #21). A None value is now automatically converted to empty string. An empty string will make the server generate a unique tag. * Connection now supports a ``transport_options`` argument. This can be used to pass additional arguments to transports. * Pika: ``drain_events`` raised :exc:`socket.timeout` even if no timeout set (Issue #8). .. version-1.0.6: 1.0.6 ===== :release-date: 2011-03-22 04:00 P.M CET :release-by: Ask Solem * The ``delivery_mode`` aliases (persistent/transient) were not automatically converted to integer, and would cause a crash if using the amqplib transport. * Redis: The redis-py :exc:`InvalidData` exception suddenly changed name to :exc:`DataError`. * The :envvar:`KOMBU_LOG_DEBUG` environment variable can now be set to log all channel method calls. Support for the following environment variables have been added: * :envvar:`KOMBU_LOG_CHANNEL` will wrap channels in an object that logs every method call. * :envvar:`KOMBU_LOG_DEBUG` both enables channel logging and configures the root logger to emit messages to standard error. **Example Usage**: .. code-block:: console $ KOMBU_LOG_DEBUG=1 python >>> from kombu import Connection >>> conn = Connection() >>> channel = conn.channel() Start from server, version: 8.0, properties: {u'product': 'RabbitMQ',.............. } Open OK! known_hosts [] using channel_id: 1 Channel open >>> channel.queue_declare('myq', passive=True) [Kombu channel:1] queue_declare('myq', passive=True) (u'myq', 0, 1) .. _version-1.0.5: 1.0.5 ===== :release-date: 2011-03-17 04:00 P.M CET :release-by: Ask Solem * Fixed memory leak when creating virtual channels. All virtual transports affected (redis, mongodb, memory, django, sqlalchemy, couchdb, beanstalk). * Virtual Transports: Fixed potential race condition when acking messages. If you have been affected by this, the error would show itself as an exception raised by the OrderedDict implementation. (``object no longer exists``). * MongoDB transport requires the ``findandmodify`` command only available in MongoDB 1.3+, so now raises an exception if connected to an incompatible server version. * Virtual Transports: ``basic.cancel`` should not try to remove unknown consumer tag. .. _version-1.0.4: 1.0.4 ===== :release-date: 2011-02-28 04:00 P.M CET :release-by: Ask Solem * Added Transport.polling_interval Used by django-kombu to increase the time to sleep between SELECTs when there are no messages in the queue. Users of django-kombu should upgrade to django-kombu v0.9.2. .. _version-1.0.3: 1.0.3 ===== :release-date: 2011-02-12 04:00 P.M CET :release-by: Ask Solem * ConnectionPool: Re-connect if amqplib connection closed * Adds ``Queue.as_dict`` + ``Exchange.as_dict``. * Copyright headers updated to include 2011. .. _version-1.0.2: 1.0.2 ===== :release-date: 2011-01-31 10:45 P.M CET :release-by: Ask Solem * amqplib: Message properties were not set properly. * Ghettoq backend names are now automatically translated to the new names. .. _version-1.0.1: 1.0.1 ===== :release-date: 2011-01-28 12:00 P.M CET :release-by: Ask Solem * Redis: Now works with Linux (epoll) .. _version-1.0.0: 1.0.0 ===== :release-date: 2011-01-27 12:00 P.M CET :release-by: Ask Solem * Initial release .. _version-0.1.0: 0.1.0 ===== :release-date: 2010-07-22 04:20 P.M CET :release-by: Ask Solem * Initial fork of carrot kombu-5.5.3/FAQ000066400000000000000000000006641477772317200132220ustar00rootroot00000000000000============================ Frequently Asked Questions ============================ Questions ========= Q: Message.reject doesn't work? -------------------------------------- **Answer**: Earlier versions of RabbitMQ did not implement ``basic.reject``, so make sure your version is recent enough to support it. Q: Message.requeue doesn't work? -------------------------------------- **Answer**: See _`Message.reject doesn't work?` kombu-5.5.3/INSTALL000066400000000000000000000006031477772317200137120ustar00rootroot00000000000000Installation ============ You can install ``kombu`` either via the Python Package Index (PyPI) or from source. To install using ``pip``,:: $ pip install kombu To install using ``easy_install``,:: $ easy_install kombu If you have downloaded a source tarball you can install it by doing the following,:: $ python setup.py build # python setup.py install # as root kombu-5.5.3/LICENSE000066400000000000000000000032001477772317200136620ustar00rootroot00000000000000Copyright (c) 2015-2016 Ask Solem & contributors. All rights reserved. Copyright (c) 2012-2014 GoPivotal Inc & contributors. All rights reserved. Copyright (c) 2009-2012, Ask Solem & contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Ask Solem nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Ask Solem OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. kombu-5.5.3/MANIFEST.in000066400000000000000000000010031477772317200144120ustar00rootroot00000000000000include AUTHORS include Changelog include FAQ include INSTALL include LICENSE include MANIFEST.in include README.rst include README include THANKS include TODO include setup.cfg recursive-include extra * recursive-include docs * recursive-include kombu *.py recursive-include t *.py recursive-include requirements *.txt recursive-include funtests *.py setup.cfg recursive-include examples *.py recursive-exclude docs/_build * recursive-exclude * __pycache__ recursive-exclude * *.py[co] recursive-exclude * .*.sw* kombu-5.5.3/Makefile000066400000000000000000000073071477772317200143310ustar00rootroot00000000000000PROJ=kombu PGPIDENT="Celery Security Team" PYTHON=python PYTEST=py.test GIT=git TOX=tox ICONV=iconv PYDOCSTYLE=pydocstyle FLAKE8=flake8 FLAKEPLUS=flakeplus SPHINX2RST=sphinx2rst TESTDIR=t SPHINX_DIR=docs/ SPHINX_BUILDDIR="${SPHINX_DIR}/_build" README=README.rst README_SRC="docs/templates/readme.txt" CONTRIBUTING=CONTRIBUTING.rst CONTRIBUTING_SRC="docs/contributing.rst" SPHINX_HTMLDIR="${SPHINX_BUILDDIR}/html" DOCUMENTATION=Documentation FLAKEPLUSTARGET=2.7 all: help help: @echo "docs - Build documentation." @echo "test-all - Run tests for all supported python versions." @echo "distcheck ---------- - Check distribution for problems." @echo " test - Run unittests using current python." @echo " lint ------------ - Check codebase for problems." @echo " apicheck - Check API reference coverage." @echo " configcheck - Check configuration reference coverage." @echo " readmecheck - Check README.rst encoding." @echo " contribcheck - Check CONTRIBUTING.rst encoding" @echo " flakes -------- - Check code for syntax and style errors." @echo " flakecheck - Run flake8 on the source code." @echo " flakepluscheck - Run flakeplus on the source code." @echo " pep257check - Run pep257 on the source code." @echo "readme - Regenerate README.rst file." @echo "contrib - Regenerate CONTRIBUTING.rst file" @echo "clean-dist --------- - Clean all distribution build artifacts." @echo " clean-git-force - Remove all uncommitted files." @echo " clean ------------ - Non-destructive clean" @echo " clean-pyc - Remove .pyc/__pycache__ files" @echo " clean-docs - Remove documentation build artifacts." @echo " clean-build - Remove setup artifacts." @echo "bump - Bump patch version number." @echo "bump-minor - Bump minor version number." @echo "bump-major - Bump major version number." @echo "release - Make PyPI release." clean: clean-docs clean-pyc clean-build clean-dist: clean clean-git-force bump: bumpversion patch bump-minor: bumpversion minor bump-major: bumpversion major release: python setup.py register sdist bdist_wheel upload --sign --identity="$(PGPIDENT)" build-docs: (cd "$(SPHINX_DIR)"; $(MAKE) html) Documentation: ln -sf "$(SPHINX_HTMLDIR)" $@ docs: build-docs Documentation clean-docs: -rm -rf "$(SPHINX_BUILDDIR)" lint: flakecheck apicheck configcheck readmecheck apicheck: (cd "$(SPHINX_DIR)"; $(MAKE) apicheck) configcheck: (cd "$(SPHINX_DIR)"; $(MAKE) configcheck) flakecheck: $(FLAKE8) "$(PROJ)" "$(TESTDIR)" flakediag: -$(MAKE) flakecheck flakepluscheck: $(FLAKEPLUS) --$(FLAKEPLUSTARGET) "$(PROJ)" "$(TESTDIR)" flakeplusdiag: -$(MAKE) flakepluscheck pep257check: $(PYDOCSTYLE) "$(PROJ)" flakes: flakediag flakeplusdiag pep257check clean-readme: -rm -f $(README) readmecheck: $(ICONV) -f ascii -t ascii $(README) >/dev/null $(README): $(SPHINX2RST) "$(README_SRC)" --ascii > $@ readme: clean-readme $(README) readmecheck clean-contrib: -rm -f "$(CONTRIBUTING)" $(CONTRIBUTING): $(SPHINX2RST) "$(CONTRIBUTING_SRC)" > $@ contrib: clean-contrib $(CONTRIBUTING) clean-pyc: -find . -type f -a \( -name "*.pyc" -o -name "*$$py.class" \) | xargs rm -find . -type d -name "__pycache__" | xargs rm -r removepyc: clean-pyc clean-build: rm -rf build/ dist/ .eggs/ *.egg-info/ .coverage cover/ clean-git: $(GIT) clean -xdn clean-git-force: $(GIT) clean -xdf test-all: clean-pyc $(TOX) test: $(PYTHON) setup.py test cov: $(PYTEST) -x --cov=kombu --cov-report=html build: $(PYTHON) setup.py sdist bdist_wheel distcheck: lint test clean dist: readme contrib clean-dist build kombu-5.5.3/README.rst000066400000000000000000000316461477772317200143630ustar00rootroot00000000000000======================================== kombu - Messaging library for Python ======================================== |build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |downloads| :Version: 5.5.3 :Documentation: https://kombu.readthedocs.io/ :Download: https://pypi.org/project/kombu/ :Source: https://github.com/celery/kombu/ :Keywords: messaging, amqp, rabbitmq, redis, mongodb, python, queue About ===== `Kombu` is a messaging library for Python. The aim of `Kombu` is to make messaging in Python as easy as possible by providing an idiomatic high-level interface for the AMQ protocol, and also provide proven and tested solutions to common messaging problems. `AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol for message orientation, queuing, routing, reliability and security, for which the `RabbitMQ`_ messaging server is the most popular implementation. Features ======== * Allows application authors to support several message server solutions by using pluggable transports. * AMQP transport using the `py-amqp`_, or `qpid-python`_ libraries. * Virtual transports makes it really easy to add support for non-AMQP transports. There is already built-in support for `Redis`_, `Amazon SQS`_, `ZooKeeper`_, `SoftLayer MQ`_, `MongoDB`_ and `Pyro`_. * In-memory transport for unit testing. * Supports automatic encoding, serialization and compression of message payloads. * Consistent exception handling across transports. * The ability to ensure that an operation is performed by gracefully handling connection and channel errors. * Several annoyances with `amqplib`_ has been fixed, like supporting timeouts and the ability to wait for events on more than one channel. * Projects already using `carrot`_ can easily be ported by using a compatibility layer. For an introduction to AMQP you should read the article `Rabbits and warrens`_, and the `Wikipedia article about AMQP`_. .. _`RabbitMQ`: https://www.rabbitmq.com/ .. _`AMQP`: https://amqp.org .. _`py-amqp`: https://pypi.org/project/amqp/ .. _`qpid-python`: https://pypi.org/project/qpid-python/ .. _`Redis`: https://redis.io .. _`Amazon SQS`: https://aws.amazon.com/sqs/ .. _`Zookeeper`: https://zookeeper.apache.org/ .. _`Rabbits and warrens`: http://web.archive.org/web/20160323134044/http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ .. _`amqplib`: https://barryp.org/software/py-amqplib/ .. _`Wikipedia article about AMQP`: https://en.wikipedia.org/wiki/AMQP .. _`carrot`: https://pypi.org/project/carrot/ .. _`librabbitmq`: https://pypi.org/project/librabbitmq/ .. _`Pyro`: https://pyro4.readthedocs.io/ .. _`SoftLayer MQ`: https://sldn.softlayer.com/reference/messagequeueapi .. _`MongoDB`: https://www.mongodb.com/ .. _transport-comparison: Transport Comparison ==================== +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | **Client** | **Type** | **Direct** | **Topic** | **Fanout** | **Priority** | **TTL** | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *amqp* | Native | Yes | Yes | Yes | Yes [#f3]_ | Yes [#f4]_ | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *qpid* | Native | Yes | Yes | Yes | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | Yes | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *mongodb* | Virtual | Yes | Yes | Yes | Yes | Yes | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | Yes | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *in-memory* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *Pyro* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ .. [#f1] Declarations only kept in memory, so exchanges/queues must be declared by all clients that needs them. .. [#f2] Fanout supported via storing routing tables in SimpleDB. Disabled by default, but can be enabled by using the ``supports_fanout`` transport option. .. [#f3] AMQP Message priority support depends on broker implementation. .. [#f4] AMQP Message/Queue TTL support depends on broker implementation. Documentation ------------- Kombu is using Sphinx, and the latest documentation can be found here: https://kombu.readthedocs.io/ Quick overview -------------- .. code:: python from kombu import Connection, Exchange, Queue media_exchange = Exchange('media', 'direct', durable=True) video_queue = Queue('video', exchange=media_exchange, routing_key='video') def process_media(body, message): print(body) message.ack() # connections with Connection('amqp://guest:guest@localhost//') as conn: # produce producer = conn.Producer(serializer='json') producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, exchange=media_exchange, routing_key='video', declare=[video_queue]) # the declare above, makes sure the video queue is declared # so that the messages can be delivered. # It's a best practice in Kombu to have both publishers and # consumers declare the queue. You can also declare the # queue manually using: # video_queue(conn).declare() # consume with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: # Process messages and handle events on all channels while True: conn.drain_events() # Consume from several queues on the same channel: video_queue = Queue('video', exchange=media_exchange, key='video') image_queue = Queue('image', exchange=media_exchange, key='image') with connection.Consumer([video_queue, image_queue], callbacks=[process_media]) as consumer: while True: connection.drain_events() Or handle channels manually: .. code:: python with connection.channel() as channel: producer = Producer(channel, ...) consumer = Consumer(channel) All objects can be used outside of with statements too, just remember to close the objects after use: .. code:: python from kombu import Connection, Consumer, Producer connection = Connection() # ... connection.release() consumer = Consumer(channel_or_connection, ...) consumer.register_callback(my_callback) consumer.consume() # .... consumer.cancel() `Exchange` and `Queue` are simply declarations that can be pickled and used in configuration files etc. They also support operations, but to do so they need to be bound to a channel. Binding exchanges and queues to a connection will make it use that connections default channel. :: >>> exchange = Exchange('tasks', 'direct') >>> connection = Connection() >>> bound_exchange = exchange(connection) >>> bound_exchange.delete() # the original exchange is not affected, and stays unbound. >>> exchange.delete() raise NotBoundError: Can't call delete on Exchange not bound to a channel. Terminology =========== There are some concepts you should be familiar with before starting: * Producers Producers sends messages to an exchange. * Exchanges Messages are sent to exchanges. Exchanges are named and can be configured to use one of several routing algorithms. The exchange routes the messages to consumers by matching the routing key in the message with the routing key the consumer provides when binding to the exchange. * Consumers Consumers declares a queue, binds it to a exchange and receives messages from it. * Queues Queues receive messages sent to exchanges. The queues are declared by consumers. * Routing keys Every message has a routing key. The interpretation of the routing key depends on the exchange type. There are four default exchange types defined by the AMQP standard, and vendors can define custom types (so see your vendors manual for details). These are the default exchange types defined by AMQP/0.8: * Direct exchange Matches if the routing key property of the message and the `routing_key` attribute of the consumer are identical. * Fan-out exchange Always matches, even if the binding does not have a routing key. * Topic exchange Matches the routing key property of the message by a primitive pattern matching scheme. The message routing key then consists of words separated by dots (`"."`, like domain names), and two special characters are available; star (`"*"`) and hash (`"#"`). The star matches any word, and the hash matches zero or more words. For example `"*.stock.#"` matches the routing keys `"usd.stock"` and `"eur.stock.db"` but not `"stock.nasdaq"`. Installation ============ You can install `Kombu` either via the Python Package Index (PyPI) or from source. To install using `pip`,: :: $ pip install kombu To install using `easy_install`,: :: $ easy_install kombu If you have downloaded a source tarball you can install it by doing the following,: :: $ python setup.py build # python setup.py install # as root Getting Help ============ Mailing list ------------ Join the `celery-users`_ mailing list. .. _`kombu forum`: https://github.com/celery/kombu/discussions Bug tracker =========== If you have any suggestions, bug reports or annoyances please report them to our issue tracker at https://github.com/celery/kombu/issues/ Contributing ============ Development of `Kombu` happens at Github: https://github.com/celery/kombu You are highly encouraged to participate in the development. If you don't like Github (for some reason) you're welcome to send regular patches. License ======= This software is licensed under the `New BSD License`. See the `LICENSE` file in the top distribution directory for the full license text. .. |build-status| image:: https://github.com/celery/kombu/actions/workflows/ci.yaml/badge.svg :alt: Build status :target: https://github.com/celery/kombu/actions/workflows/ci.yml .. |coverage| image:: https://codecov.io/github/celery/kombu/coverage.svg?branch=main :target: https://codecov.io/github/celery/kombu?branch=main .. |license| image:: https://img.shields.io/pypi/l/kombu.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/kombu.svg :alt: Kombu can be installed via wheel :target: https://pypi.org/project/kombu/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/kombu.svg :alt: Supported Python versions. :target: https://pypi.org/project/kombu/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/kombu.svg :alt: Support Python implementations. :target: https://pypi.org/project/kombu/ .. |downloads| image:: https://pepy.tech/badge/kombu :target: https://pepy.tech/project/kombu kombu as part of the Tidelift Subscription ======= The maintainers of kombu and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source dependencies you use to build your applications. Save time, reduce risk, and improve code health, while paying the maintainers of the exact dependencies you use. [Learn more.](https://tidelift.com/subscription/pkg/pypi-kombu?utm_source=pypi-kombu&utm_medium=referral&utm_campaign=readme&utm_term=repo) -- kombu-5.5.3/SECURITY.md000066400000000000000000000006741477772317200144620ustar00rootroot00000000000000# Security Policy ## Supported Versions | Version | Supported | | ------- | ------------------ | | 5.5.x | :white_check_mark: | | 5.4.x | :x: | | 5.3.x | :x: | | 5.2.x | :x: | | 5.1.x | :x: | | < 5.0 | :x: | ## Reporting a Vulnerability Please reach out to tomer.nosrati@gmail.com or auvipy@gmail.com for reporting security concerns via email. kombu-5.5.3/THANKS000066400000000000000000000017241477772317200136010ustar00rootroot00000000000000======== THANKS ======== From ``carrot`` THANKS file =========================== * Thanks to Barry Pederson for the py-amqplib library. * Thanks to Grégoire Cachet for bug reports. * Thanks to Martin Mahner for the Sphinx theme. * Thanks to jcater for bug reports. * Thanks to sebest for bug reports. * Thanks to greut for bug reports From ``django-kombu`` THANKS file ================================= * Thanks to Rajesh Dhawan and other authors of django-queue-service for the database model implementation. See http://code.google.com/p/django-queue-service/. From ``kombu-sqlalchemy`` THANKS file ===================================== * Thanks to Rajesh Dhawan and other authors of django-queue-service for the database model implementation. See http://code.google.com/p/django-queue-service/. * Thanks to haridsv for the draft SQLAlchemy port (which can still be found at http://github.com/haridsv/celery-alchemy-poc) kombu-5.5.3/TODO000066400000000000000000000001221477772317200133450ustar00rootroot00000000000000Please see our Issue Tracker at GitHub: http://github.com/celery/kombu/issues kombu-5.5.3/conftest.py000066400000000000000000000020471477772317200150640ustar00rootroot00000000000000from __future__ import annotations import pytest def pytest_addoption(parser): parser.addoption( "-E", action="append", metavar="NAME", help="only run tests matching the environment NAME.", ) def pytest_configure(config): # register an additional marker config.addinivalue_line( "markers", "env(name): mark test to run only on named environment", ) config.addinivalue_line("markers", "replace_module_value") config.addinivalue_line("markers", "masked_modules") config.addinivalue_line("markers", "ensured_modules") config.addinivalue_line("markers", "sleepdeprived_patched_module") def pytest_runtest_setup(item): envnames = [mark.args[0] for mark in item.iter_markers(name='env')] if envnames: if ( item.config.getoption("-E") is None or len(set(item.config.getoption("-E")) & set(envnames)) == 0 ): # We skip test if does not mentioned by -E param pytest.skip("test requires env in %r" % envnames) kombu-5.5.3/docs/000077500000000000000000000000001477772317200136125ustar00rootroot00000000000000kombu-5.5.3/docs/Makefile000066400000000000000000000175111477772317200152570ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\'t have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " epub3 to make an epub3" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" @echo " apicheck to verify that all modules are present in autodoc" .PHONY: clean clean: rm -rf $(BUILDDIR)/* .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PROJ.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PROJ.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/PROJ" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PROJ" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 epub3: $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: apicheck apicheck: $(SPHINXBUILD) -b apicheck $(ALLSPHINXOPTS) $(BUILDDIR)/apicheck .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." kombu-5.5.3/docs/_ext/000077500000000000000000000000001477772317200145515ustar00rootroot00000000000000kombu-5.5.3/docs/_ext/.keep000066400000000000000000000000001477772317200154640ustar00rootroot00000000000000kombu-5.5.3/docs/_static/000077500000000000000000000000001477772317200152405ustar00rootroot00000000000000kombu-5.5.3/docs/_static/.keep000066400000000000000000000000001477772317200161530ustar00rootroot00000000000000kombu-5.5.3/docs/_templates/000077500000000000000000000000001477772317200157475ustar00rootroot00000000000000kombu-5.5.3/docs/_templates/sidebardonations.html000066400000000000000000000060121477772317200221640ustar00rootroot00000000000000 kombu-5.5.3/docs/changelog.rst000066400000000000000000000000361477772317200162720ustar00rootroot00000000000000.. include:: ../Changelog.rst kombu-5.5.3/docs/conf.py000066400000000000000000000016521477772317200151150ustar00rootroot00000000000000from __future__ import annotations from sphinx_celery import conf globals().update(conf.build_config( 'kombu', __file__, project='Kombu', version_dev='5.4', version_stable='5.3', canonical_url='https://kombu.readthedocs.io/', webdomain='kombu.readthedocs.io', github_project='celery/kombu', author='Ask Solem & contributors', author_name='Ask Solem', copyright='2009-2019', publisher='Celery Project', html_logo='images/kombusmall.jpg', html_favicon='images/favicon.ico', html_prepend_sidebars=['sidebardonations.html'], extra_extensions=['sphinx.ext.napoleon'], apicheck_ignore_modules=[ 'kombu.entity', 'kombu.messaging', 'kombu.asynchronous.aws.ext', 'kombu.asynchronous.aws.sqs.ext', 'kombu.transport.qpid_patches', 'kombu.transport.librabbitmq', 'kombu.utils', 'kombu.transport.virtual.base', ], )) kombu-5.5.3/docs/faq.rst000066400000000000000000000000241477772317200151070ustar00rootroot00000000000000.. include:: ../FAQ kombu-5.5.3/docs/images/000077500000000000000000000000001477772317200150575ustar00rootroot00000000000000kombu-5.5.3/docs/images/favicon.ico000066400000000000000000000124211477772317200172000ustar00rootroot00000000000000PNG  IHDR szzsRGB pHYs^ KiTXtXML:com.adobe.xmp 0 1 0 31/1 320 2 1/1 80 46 80 1/125 0 0221 0 0 0 80 0 False True 3 3 False 4/1 0 28/5 0/1 0 0 320 21/5 0 5 0100 1 2 NIKON CORPORATION NIKON D50 Adobe Photoshop CS2 Windows 2008-03-05T13:23:21.80 2008-03-24T12:01:10.80 2008-03-05T13:23:21.80 X _IDATX W[l=3w^߂lT(mD>UT)URoTj!R%>$ I%`cm׷ﳳ=mw<ٙ?wFq`u,&ȼ}%$ɸE2Lf3R< sIGq7PVQ,QzMGTB.F:FNba].Nkge_FǼGlS`q:`u../-BwȺtHܱNj쬱}Tv ,@ jG\bK775۠0h?zUXzPJC Vx0TWbUwmeUM xXmuDj#募=d`F`X,uִDC`y0w'8I)s#DZ4wo&T{:f͙Y{B|` +q_x/Cew7׮A`|Y!~\t oWp]q܏}"C.|fw,hJd08ֽ/|LN(Nl| G㘟?3g`yqPd56+TB`5*[u8 nT |ű ;r%ЄG7\ܹDQBC20;p$ ˰\b so=XB2p8_B+lfFQSP*iܡvdIL޾ VM_ dє"B KjʷT:p"j1l&: "^ĖcSsQ71}MhCрȑ.Rbeݺ VNh((yD;{ k;c}m S'?P aR&6UEd']M!c9JD7~*#ϩ: E=76Wpe,?z \Ba* m*1qAY氣so֖Q̥o!NAuP5 </ϲ\կ1x{4Ûh;`a9_$RFu3 BќvKShB B~~,[o}}y<m(:ݣ!pQe7=˃Eܺqh>E&ncnj0l,vb11R"*{4h$k51&1NtƮ\ƉT|{p0ܿ"~H膍> HH65ݣVk䩣&bWiV@o*K!|~}S;eW v1LSd.*VGȏ8;^5^د}84r>bޚ8`: 3!~TD(!Uݘ9eHs7mv$ ܙ7D)FϗBq3P8?z`l\*z{I-X"SYj"ӇsS SRk_Xrh y1r|f-k }KM@,,F|;H1J%↖:bt1\~,e"MBɖWrԌV®*:?9稒sm՛QZy*VHwHĀ1i(K'O–d/0<IB`V "@Uie!/.^R}D- BO|/`,ij wP. t:8=;YzN!EKIKnۢ]X3 'q Fl4;Cx9Ͼ֖c<{O5zB5% nl4DɢᰪVڶh@ _@U|p|f*ED"atⷈEv\T&ۀ7(L'LwDwo2s`-mQ2O ?0"m{ Owa {QilvBkV1)M##ܐXQʦs3R'v ;BMd %ٌhLT2Jk׀]e7Vc0} %rKJ'%@ag{Op!&EMmW08a߈5Je9)W bPzdVT."d1`eۣ!vq?h6=d7 IENDB`kombu-5.5.3/docs/images/kombu.jpg000066400000000000000000003414311477772317200167040ustar00rootroot00000000000000JFIF+Photoshop 3.08BIM%8BIM8BIM&?8BIM 8BIM8BIM 8BIM 8BIM' 8BIMH/fflff/ff2Z5-8BIMp8BIM@@8BIM8BIMEDSC_0124nullboundsObjcRct1Top longLeftlongBtomlongRghtlongslicesVlLsObjcslicesliceIDlonggroupIDlongoriginenum ESliceOrigin autoGeneratedTypeenum ESliceTypeImg boundsObjcRct1Top longLeftlongBtomlongRghtlongurlTEXTnullTEXTMsgeTEXTaltTagTEXTcellTextIsHTMLboolcellTextTEXT horzAlignenumESliceHorzAligndefault vertAlignenumESliceVertAligndefault bgColorTypeenumESliceBGColorTypeNone topOutsetlong leftOutsetlong bottomOutsetlong rightOutsetlong8BIM( ?8BIM8BIM %h,%LJFIFHH Adobe_CMAdobed            " ?   3!1AQa"q2B#$Rb34rC%Scs5&DTdE£t6UeuF'Vfv7GWgw5!1AQaq"2B#R3$brCScs4%&5DTdEU6teuFVfv'7GWgw ?TI%)S.ƀKCՌsU}blg0ONְY~}Y6\[ĵú"wTK ׀^}[=Jʿ_u Fei-s1m@.ቆ~вRA /bO^2h#s}u~AEޓFP3 'r%Ɵ#¼q\u ΥkcbصLqqeL긶Zƌ\O沌!cJ+^%fl,uxֶ$ճO_f=w"ʚk6=ku4=VG8COS'ڙQ\1Iy뺻d1#+!i_cWU[nSq!8dO=$U:a> ;ˈ'V1Y^7a6쳣`oQp%u{ ٸ`mMkIy=uu9cdZq}|}13*ʬ0-:"Q1$⤒I$?TI%)s'ݐƳ hk=3[eͱl]>_S*-*1wߛe2"l*2K8fG[z_ߓhybgc/6K̦ܯgWOiYpU,~KY+1pdjESZl[G^ɰӺCz?;ЪoBͿw[ɪoU&˨E?&KOMÅax9 `n]e{+v?qgzis+8Ãgx8(2x {2룤F-׸5Ƭ8^'Z.=7=kqN6 6=K]t3m1?",U\#}u5C}oeћ?j7^mʶ zBƊ,u>u6jo[=㐱)_VLDc+s75fQz3=>nX7<0-:߳ƳQm>CCv;W1262ëmU32]D6{rY,|9ߴScI2W6Rb`eU7CSaG(qZ_q?ޘ .y%bcuge,k)fui5XrM~ҿKJޙԮ3&9cneU~+,H';fs?`}Vi{\,q>WXssUK1ZiۑM̶z =N8}D}?w&Wec=CER^-~ONS*Xg^ϡ6{]c\dK+9]N=ZODGRI$TI%)q\)ٌe~;Yccf%_S7 Kz_H;Ck_bGb<@OB xYZ\lf]5^=Fޛ]BߡuRqr/lu̶i߲PIs/ձoc*(M=|{FEd\ګe>~>z\PWaacm3/詊J$9^, MGŏؿ=57, ϩvf}pwbgP(t 2eøhQ4.4Pj JR&R&R:u.1ڃΫזn$6Dߥōkj vNE`;pUUdcծcь$ ܒHqo[pk+Ʒ"NY} ;GHNiF]hsOo/NŹ=&W]`}dVf_zT[7-i}wXQ:M:iichKkҫm?E[3z7Rx3Yck n,]UCY۲Ѷo}?1[vG+q$;{A\Onַtli#^K\ym{[C_~H7:YϷ1_zmѮr_Rfc6f>6Mp/d}֢6jnFa5I$K_TI%)p[$}g,?̯޻i~s5*PF;_&VUwlf`!w_[O@#M6OnO5$T wͅ76i!7_wZ`gCoMT1, :[~bI>53Jھhk湦O;ͳciF7OtcoEI ~ ^K~:+yFvn\"5Wu۲}/z{陾O' 3%6GmYݗ]VbY_W}Me_+WC~m̗gr~OTiٽSp0wNuYv~epk Ħ&F_m>vYtԫ3񟅎Vڮ{ Y9m?e1iue'juέcm v5NS]UW/~Tgd]qwc4C挆?[}{G'XXY]?^>yd4]P۝]oO~sz3,M_KbVKj/zX5l,wfUYuzǹ&Lŷc^=;7кv+mnc][Ou m9]4W[FUYe<2G vT%w_åh_[z{LU}/asҳ}U˃̪:Uo-qre~e7Uv=|^I$JTI%)pYH?\:Hq|}dq}4nqM+.^xtjnj?ig?V{#YzexXox?ɴ粐&vGU1,=60w'؊luTK@p"OskTA5m"5kţca'7 n2ky멻+~[Ӻ+ޛ8}e.8ƍm2}*Kj_[VYF 74nvZ^c?ªn]*쁹F]XV*K-[u nّ}Sx;m>yOJ3g)B ]7]ۏ~%Ksq)K[me7~eXN5]^=W{n}}գ[?K[OaGuޏ?fM]Aٖ0Vdh~m:{-4U}ӪѱߡroN־K2K>`Tl%{]?Rt,LbUӲOvvPuӐAͮW?s?T7:[y~f6̇z>7M]/5}ccg,JjLQ~N ?&}sZnw؏RpCcϣ{+n:܏2l&29Nʷ!]}>k1p{4mmWQXz[-# UT~ icT>>_k-[j 꾰tnd0[~k}L?JNI$HTI%)p}xىib. k8pťvS<$Nj_SKzze,s鱶XYwVop,ct.sG]7srƿ\ H:L4~k?&=V>3sn,Qj}dX[znG#Bg[^6+}<|{ڪc2}L_Ko6u"xq w}mTQekU*VVE8/h5yСeSճ!:*v۰mbYqoLu=ޯlQ:1grW~􎣆 ][,>q뱾Qc}T2/ro36Z푻L֬GV6F7j>Sra=ŦS[@aN+e>=9}Bkz_\3Ԟs3+uFEOm]Nut(g*ͮuWZޕeNOz^L竢U]4toVw@/]_w;k~KӫݏgY4Yu^oqmc+Σ~noF%S[?"ӝly mEzoGݟHWI9t܎uXc ~~C5~ Gbu:N%n-yֆ}9ޯkudރKze mXƿ+-)~еcѹW[i:4mmy-wz3汬/ٻxG5Xպ֚P I|;wQn~.CSoR@`VZmg1~/z龬㚰=\Ĉ;kmt t9lmP컋Ժi'mO$Of{W/6mngF-[MeQEKh)5$HTI%)y=>abZ uƛw&;_[1H?3] c`ϖ̪I6=6wnf-c~VGOΣ7Y[\}c=;v>:33){౻=QC?K?G@mX؅vʪ޳:?_ NrfuXX,}Wտ:Mr!Y_V1l^S@~Ml9}4YdwWvnq瑩emu_F%on.'[gG׾t,|lzu6쪻Ÿ?hٵW?"}TǑn09iYlkFE.$F@4v:nNeYz-/ o~+XʯgIzΣe7=kPh֦TdhK'Qv~8n&'}FYV}>U}K)ʣ:}V5aGNsjwU9?TAg lK7sYء{Ǩ)D} TOWo3\*ǨXkqkjF%YlpeUx~Cѩ}f7\H˲%kZ{M$4,1]J1_qd״XJcf5ߤ&5,^_bf.EBlublckԳ=3!{XQ8K2skk`׃h eW~+]4u{wm%>@n~k6_u rS(ܼۜgu m ONVGUפI$HTI%)puϮQ؍t*r8չw"\[}!NNs9ge,lil%'}uŷtԹ_G3cQwXm7xxszfʷ @Ӱ>DWwag{s>ÓQ&MnXE6c@۱%~oY_WzE9=uˮ0ەclb͖U};˯{ku{.s x$:G;svsfܾ̾em? u"u 7 lh^Q9k3/'')Ue7ֺNnUU׳]Ů^>V^Tu̺I8 e/g}uϪS볨cdcgٕ{lLksngߠ8_osM}:32wlUewz?{_EJM,yqSwmZDYyh7I8ʝ907pu[@8m4Z_p\׏C> ;=+fXYkH݇mEV-ET1{ȤS e;q+ek,H7q'{`QM9Y}ZiW9?%M{z׽{[IdXWb+ {1\0{v-si{}Oru .} 漦6Y{z/=2UڵcD4cciq:Ƿ}z?hêQӯKzR˟F5wwё/H\9FCov=dNWOYUz]IK2+auYUmdnߥg.v1_ [VҷB])n ,ˮc8Km5ܕ@4,{[`s@;x+V~oWz/K:cܖ;tc݌3~5ޭ-.lYgoK .k:MΠiu8g'`:]X%^T˽z?"jőZk1a΢_QMoxs~U_U#n2ەӫs`ooq]}JizIW`axkw" s{Z;9h;SCg[vaߺ}ǯ~[:~=,V4*ژMR;Eyic<RS XۜlVYg ;_5sP\%4c},^=jr]c1ˢh]I$/ˆy;3 $e= zS[2:X5PV%<=.+v#hCXA'6>c[˥\=vG5~M˽JurdZ ZI\`e}TĽζUk"ŎٻgieMߴ]o(<U;m1滨6!;]vu7G;.$Ŧ2~YwGwI <E犪gQ"lfNkcXXt?fU'&I 3&HtLoRwt ~]8 $-b1k/d_Hsh&=xW\'֫/=K`!o"͒I8BIM!UAdobe PhotoshopAdobe Photoshop CS28BIM)ExifMM*  (12ihNIKON CORPORATIONNIKON D50Adobe Photoshop CS2 Windows2008:03:24 12:01:10&"0221֑   , 8080800100H6>.    8 2008:03:05 13:23:212008:03:05 13:23:21* 6 ASCII R980100(%LHHJFIFHH Adobe_CMAdobed            " ?   3!1AQa"q2B#$Rb34rC%Scs5&DTdE£t6UeuF'Vfv7GWgw5!1AQaq"2B#R3$brCScs4%&5DTdEU6teuFVfv'7GWgw ?TI%)S.ƀKCՌsU}blg0ONְY~}Y6\[ĵú"wTK ׀^}[=Jʿ_u Fei-s1m@.ቆ~вRA /bO^2h#s}u~AEޓFP3 'r%Ɵ#¼q\u ΥkcbصLqqeL긶Zƌ\O沌!cJ+^%fl,uxֶ$ճO_f=w"ʚk6=ku4=VG8COS'ڙQ\1Iy뺻d1#+!i_cWU[nSq!8dO=$U:a> ;ˈ'V1Y^7a6쳣`oQp%u{ ٸ`mMkIy=uu9cdZq}|}13*ʬ0-:"Q1$⤒I$?TI%)s'ݐƳ hk=3[eͱl]>_S*-*1wߛe2"l*2K8fG[z_ߓhybgc/6K̦ܯgWOiYpU,~KY+1pdjESZl[G^ɰӺCz?;ЪoBͿw[ɪoU&˨E?&KOMÅax9 `n]e{+v?qgzis+8Ãgx8(2x {2룤F-׸5Ƭ8^'Z.=7=kqN6 6=K]t3m1?",U\#}u5C}oeћ?j7^mʶ zBƊ,u>u6jo[=㐱)_VLDc+s75fQz3=>nX7<0-:߳ƳQm>CCv;W1262ëmU32]D6{rY,|9ߴScI2W6Rb`eU7CSaG(qZ_q?ޘ .y%bcuge,k)fui5XrM~ҿKJޙԮ3&9cneU~+,H';fs?`}Vi{\,q>WXssUK1ZiۑM̶z =N8}D}?w&Wec=CER^-~ONS*Xg^ϡ6{]c\dK+9]N=ZODGRI$TI%)q\)ٌe~;Yccf%_S7 Kz_H;Ck_bGb<@OB xYZ\lf]5^=Fޛ]BߡuRqr/lu̶i߲PIs/ձoc*(M=|{FEd\ګe>~>z\PWaacm3/詊J$9^, MGŏؿ=57, ϩvf}pwbgP(t 2eøhQ4.4Pj JR&R&R:u.1ڃΫזn$6Dߥōkj vNE`;pUUdcծcь$ ܒHqo[pk+Ʒ"NY} ;GHNiF]hsOo/NŹ=&W]`}dVf_zT[7-i}wXQ:M:iichKkҫm?E[3z7Rx3Yck n,]UCY۲Ѷo}?1[vG+q$;{A\Onַtli#^K\ym{[C_~H7:YϷ1_zmѮr_Rfc6f>6Mp/d}֢6jnFa5I$K_TI%)p[$}g,?̯޻i~s5*PF;_&VUwlf`!w_[O@#M6OnO5$T wͅ76i!7_wZ`gCoMT1, :[~bI>53Jھhk湦O;ͳciF7OtcoEI ~ ^K~:+yFvn\"5Wu۲}/z{陾O' 3%6GmYݗ]VbY_W}Me_+WC~m̗gr~OTiٽSp0wNuYv~epk Ħ&F_m>vYtԫ3񟅎Vڮ{ Y9m?e1iue'juέcm v5NS]UW/~Tgd]qwc4C挆?[}{G'XXY]?^>yd4]P۝]oO~sz3,M_KbVKj/zX5l,wfUYuzǹ&Lŷc^=;7кv+mnc][Ou m9]4W[FUYe<2G vT%w_åh_[z{LU}/asҳ}U˃̪:Uo-qre~e7Uv=|^I$JTI%)pYH?\:Hq|}dq}4nqM+.^xtjnj?ig?V{#YzexXox?ɴ粐&vGU1,=60w'؊luTK@p"OskTA5m"5kţca'7 n2ky멻+~[Ӻ+ޛ8}e.8ƍm2}*Kj_[VYF 74nvZ^c?ªn]*쁹F]XV*K-[u nّ}Sx;m>yOJ3g)B ]7]ۏ~%Ksq)K[me7~eXN5]^=W{n}}գ[?K[OaGuޏ?fM]Aٖ0Vdh~m:{-4U}ӪѱߡroN־K2K>`Tl%{]?Rt,LbUӲOvvPuӐAͮW?s?T7:[y~f6̇z>7M]/5}ccg,JjLQ~N ?&}sZnw؏RpCcϣ{+n:܏2l&29Nʷ!]}>k1p{4mmWQXz[-# UT~ icT>>_k-[j 꾰tnd0[~k}L?JNI$HTI%)p}xىib. k8pťvS<$Nj_SKzze,s鱶XYwVop,ct.sG]7srƿ\ H:L4~k?&=V>3sn,Qj}dX[znG#Bg[^6+}<|{ڪc2}L_Ko6u"xq w}mTQekU*VVE8/h5yСeSճ!:*v۰mbYqoLu=ޯlQ:1grW~􎣆 ][,>q뱾Qc}T2/ro36Z푻L֬GV6F7j>Sra=ŦS[@aN+e>=9}Bkz_\3Ԟs3+uFEOm]Nut(g*ͮuWZޕeNOz^L竢U]4toVw@/]_w;k~KӫݏgY4Yu^oqmc+Σ~noF%S[?"ӝly mEzoGݟHWI9t܎uXc ~~C5~ Gbu:N%n-yֆ}9ޯkudރKze mXƿ+-)~еcѹW[i:4mmy-wz3汬/ٻxG5Xպ֚P I|;wQn~.CSoR@`VZmg1~/z龬㚰=\Ĉ;kmt t9lmP컋Ժi'mO$Of{W/6mngF-[MeQEKh)5$HTI%)y=>abZ uƛw&;_[1H?3] c`ϖ̪I6=6wnf-c~VGOΣ7Y[\}c=;v>:33){౻=QC?K?G@mX؅vʪ޳:?_ NrfuXX,}Wտ:Mr!Y_V1l^S@~Ml9}4YdwWvnq瑩emu_F%on.'[gG׾t,|lzu6쪻Ÿ?hٵW?"}TǑn09iYlkFE.$F@4v:nNeYz-/ o~+XʯgIzΣe7=kPh֦TdhK'Qv~8n&'}FYV}>U}K)ʣ:}V5aGNsjwU9?TAg lK7sYء{Ǩ)D} TOWo3\*ǨXkqkjF%YlpeUx~Cѩ}f7\H˲%kZ{M$4,1]J1_qd״XJcf5ߤ&5,^_bf.EBlublckԳ=3!{XQ8K2skk`׃h eW~+]4u{wm%>@n~k6_u rS(ܼۜgu m ONVGUפI$HTI%)puϮQ؍t*r8չw"\[}!NNs9ge,lil%'}uŷtԹ_G3cQwXm7xxszfʷ @Ӱ>DWwag{s>ÓQ&MnXE6c@۱%~oY_WzE9=uˮ0ەclb͖U};˯{ku{.s x$:G;svsfܾ̾em? u"u 7 lh^Q9k3/'')Ue7ֺNnUU׳]Ů^>V^Tu̺I8 e/g}uϪS볨cdcgٕ{lLksngߠ8_osM}:32wlUewz?{_EJM,yqSwmZDYyh7I8ʝ907pu[@8m4Z_p\׏C> ;=+fXYkH݇mEV-ET1{ȤS e;q+ek,H7q'{`QM9Y}ZiW9?%M{z׽{[IdXWb+ {1\0{v-si{}Oru .} 漦6Y{z/=2UڵcD4cciq:Ƿ}z?hêQӯKzR˟F5wwё/H\9FCov=dNWOYUz]IK2+auYUmdnߥg.v1_ [VҷB])n ,ˮc8Km5ܕ@4,{[`s@;x+V~oWz/K:cܖ;tc݌3~5ޭ-.lYgoK .k:MΠiu8g'`:]X%^T˽z?"jőZk1a΢_QMoxs~U_U#n2ەӫs`ooq]}JizIW`axkw" s{Z;9h;SCg[vaߺ}ǯ~[:~=,V4*ژMR;Eyic<RS XۜlVYg ;_5sP\%4c},^=jr]c1ˢh]I$/ˆy;3 $e= zS[2:X5PV%<=.+v#hCXA'6>c[˥\=vG5~M˽JurdZ ZI\`e}TĽζUk"ŎٻgieMߴ]o(<U;m1滨6!;]vu7G;.$Ŧ2~YwGwI <E犪gQ"lfNkcXXt?fU'&I 3&HtLoRwt ~]8 $-b1k/d_Hsh&=xW\'֫/=K`!o"͒ICYhttp://ns.adobe.com/xap/1.0/ 1 2 1800000/10000 1800000/10000 2 NIKON CORPORATION NIKON D50 256,257,258,259,262,274,277,284,530,531,282,283,296,301,318,319,529,532,306,270,271,272,305,315,33432;7F2E6A5D5BEFC3067CAFD64E23E5F118 2008-03-24T12:01:10+01:00 Adobe Photoshop CS2 Windows 2008-03-24T12:01:10+01:00 2008-03-24T12:01:10+01:00 0221 0100 1 1 2 3 0 4/1 400 400 2008-03-05T13:23:21+01:00 2008-03-05T13:23:21+01:00 10/1250 56/10 0 0/6 42/10 5 0 True 3 3 False False 310/10 2 3 1 0 0 0 1/1 46 0 0 0 0 0 0 36864,40960,40961,37121,37122,40962,40963,37510,40964,36867,36868,33434,33437,34850,34852,34855,34856,37377,37378,37379,37380,37381,37382,37383,37384,37385,37386,37396,41483,41484,41486,41487,41488,41492,41493,41495,41728,41729,41730,41985,41986,41987,41988,41989,41990,41991,41992,41993,41994,41995,41996,42016,0,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,20,22,23,24,25,26,27,28,30;01E21FAC5F15A1DAF80AC79893D4F821 uuid:43F4408690F9DC1192FB80ADC6E99C63 uuid:44F4408690F9DC1192FB80ADC6E99C63 uuid:42F4408690F9DC1192FB80ADC6E99C63 uuid:42F4408690F9DC1192FB80ADC6E99C63 image/jpeg 3 sRGB IEC61966-2.1 C      C    P ! 1A"Q2aq #BR3$br%45CcsDSt&6d V !"1AQ2aq#BR3b$r4C%Scs&5DT6UVd ?"4Dh#DF"4Dh#DF"4Dh#DF"4Dh#DF"4Dh#DF"4Dh#DF"4Dh#DF"mV~ԵAܔ*t)$m2 ";et]HRj ϖzD}1H[n-' hDF"4Dh#DF"4Dh#DF"4Dh#DF~)a> pTm\葐S!?'DLzV{{ACzlZ6"8$fI.98}fe$"4Dh#DF"4Dh*uS.T2BK)M$Iwk,RQC5ʍ:{`}xntQT);7o_W'CQVT( kR/tU1-(e5F-6y!(d(X[yHTqh@D{.5Z66X<#Z9HTkfЮx>Yg',Rn6F9ԫhsR?Ia)AgkwSAq NI.J'V{e9iKW(v95%4{8SFAi䫷)Rs8>;-~/-[BJDM˓y;E2qQ zKug+#UMg-VMc&̺.عd5%M:mS:?2 vU^'Zke+3ڵ [kr9 Q ص)~h-A_᦭A_)[ {^"GM[y1y6zqNu^ (X)XK|YSiŒJ>[^?5+lKw_WK[f'5K H$;]ͻ)ScEKuT'T5$~xS~^9tvHR=~ټVusPW" %*ԗ@OugͥIhM%5*S=H.5"1,8HH?xP;v(I >9tHu} !N6OƈRJAtmyT\/J??2V#&"τw+ô&F"4Dh#DF"4Dh; Y8KhYAeC#?SgR[ei{ ʫL̸ /J\O#F.Xx'Tq%=-v~n\RETNJk$-n̳?}ɣ[rjҖKN##)8F H^J^VY-&#0@H'Q ^pbMY퀖cp5?IjH+h+]Xl~}ΎbgfZlook+}>9αoz r^m]"J\/c{֩:5&MhH 5.MZ 6*A/J%|i,j[ͥrd,LJlIF Ҋ& ]dJ^ֺG?TΧF* =dқ=45$޲ԦRbnCn}̂F!>F@γ޳nI\ޫp*mXo lɍ~SUyfϚȒ=$me|UM\Q,R)Äeu씃jja*V?GIW+F '"066}HڵXG3H(Pm8)+ @==uZv*qz@tE'Ғ M0c!yaw>|Sf"BRRVG۳d$~|F/{$xtn?)JAbl cY'L}VGSC4F-'7>zW?G{u'JR߿NVbO l}ŋ>t!D ~vI[~?[y^]:<)Τ'?rϪ>Aqdcn^Mώ?EVUwutgG?d?Ry?VAL7;gzgz)q+E YM&[~ӻ[,?xH{ 7; x髛itBx$">Ж_`J~q3Z)@Z۲?H%Ër y.nxvjhIil)UOuʪ}/uݟҽ>Ia2Wg~&+VR&t!ԥ1Uc,$H-уhuqf G P3oyd7&tSOCEюtt!)S59QSI졫tڰEls~*zo*p .Oa{] cm<:aP!Vgï~knJZ~2 —Vi>ۇ˔)y.$ԫo4ax*8_.ܟ߃Ce:UЮI>@b˸+>V3N(> &Dbx&/%!|ɉÔ\I hiyIG"Ƞt'Ԡ,*Nr9gې"&aIʖۙ8H*?l.|u5wD6c;[v|$[kdzuFr]ukkDĀ)I < ΋tkF xGD,1`Go:PTt:Ȓqa8sFY/u Sy'n4dkBo~阾>pՁt),3=/ˬB|ģH藢w ( tmCFIz0/rv )t?tlO͠<JWE&!vk/BW-+Hu_NC?ωn;/W ?_-\/~ t{yewŽE)21ĪttdYVKh VcqR 7pXjgMd`lu1lvU9LBFi*O+:itR271w&oz=/:gt:4i 8TiVKZДߖQU̔!ݽ:B1">ku |j].!@p?ui.YRwu0a6N zԍ覩@O]RNBy4C(t3ť_Dh4ui"k{?p5 8LӯjID}ڻ-?hJ6 |[AK=R]x7ҺjFy$fVvn67<@=~2͛ ;I#93|2ͩoMw]ιqQB|4a(N0 utUoݍf_ h%:pfw='i̮H"̵m+ávFԵy@ᯥNz-UTIm~.K}":jFIg~{Eɵ HMՉ |m_?:I:Pg{7ˋMҺOJ#yV@:WufFd&eY!qQ}u}0/YTSߔn2SK? je2+A.T}I#o0 0S6*AgHnAyxW?-:A}%J?W9ث»nslwQ5=.-ɿU$WMXThQHn&?f5U‡d?HX R7.wR6/8r~eRmII_Z' CrfӠ @R}VZ&ᘂ6n_dH]U&Չzȷ:To">,ɨ8b?fRß\N 1 =Ft^7'_X_<m^{syLj^n^KEFqb+ HϠ .h=dgWJ&b,ul/R Pf-'U8TkpL>-7^_HySizm*Q?:(,6AD qLzE1̀>) Ie^ْu@u"7i\NL88}(^RAQiXUFҶag;xRHP͐ՃH5C ;wʉ qssӚ-*${-ۃ~K>O2"Ǯ*LJM=܉^a䰒9{|^F.ٻ_6.UF `9]EYb]w;ew|{8K;-r"ky2s/Uts v\CH]'i`x|x)ݺ6oMl:ӃTyB\) BT}ӏ_<b6ϒrT==UHOi/Zǐ? R__Sni0r;װ[~ITpĻtnKio$=]_0E8򬈵4_u5/o~ {޴V^*}>X*NUyDwr',i$KiXPLj*ԯdjLZvH VCJ)JNvZH&NG&oD<7xt_,m}uCaf-fk{I%^#[}FB&e*7$ vY;g`Gt V9-s\zއA}*JV>+;[RT-"I~T$嵩}QC3d-WUH6cffn9q~~ytOmD4$ɝQCY XSǧr5FC"z(9En<OU[:IBږ>M[S H.JOPB=F~շޠ|g}iZ{ DeVUڌ|ʒ=N52 uze甌g#.ч6!\g<"K(J?mȗ[-[by?]0%7v}t‹17{}В vc9K\p}tlM''ΪJNCư}[xR|SGRts1K?HG.ߎыxVhڪ8YVëZ)E[$ݔԢ%'sohQ#DF"4Dh#DF"4EV7TI]A9M- ECeqIϺ<D *lC8#I2RS;a{`ɿy: =E$i91)cf} dV]ծ}T[nZ}oʬӦqR*V^b:<ٓ?uPKW<2 ϼDžO&Gaux2e\o{$5ݗM."3ad98AAݩRwLbV (Qi NR/>XHtFe! 2yҎmhZ7=ZݗUOwK#(lJk{Zmj6hmxKz='J+Jw!qԶu-]UJ$Tx}u,$5*%I>ўs@Gt|q[fb/}y:aJOI6RY_NS.\r$)+KmOmju{Nmm骡6h/Zt&̊XPmX$-/Ej&^&,wҕTޝYETN;OqSx>@F$ڨqhhdY{\]|K*4ugvq3?Q~bT!3j*&MQGR1-ֈ˪*:Kh?8-Oҧџ(ͣjc*h>2fYt@q. l˄:KxtJk(6:lR)A\;2ztN@G&~=GB7,bdۋ)zmBUa:mBtT+qXrOmzanyb^.C)R r8Iϯ5|hrwj)!<=Rp{l(CZT6Q\7RzOZ3!ǬP^RBA?k({p]'Tq+b5ͯ#Rm!j tdđ+հj]xGJKl2HG0"ɖ$Tļzo)rޛb\(;& }JRGmAFmkEٷ4 %"E&zT}kRҺ?JR>xmxbvnƜԗ/lL(R)~^k,2Tho-Os˾<6r 0.KOF"4Dh#DF"4DhA1=K28F=)e'>ē5Gޑ]V% g)U۞zvn Dҥ-AD'b,͝ȋh5qQ0cpnG~UuҠmL8ۆRs%m!GKhC(@ͩwxؽ.&ib:NwN޺>y4I|8&#N?V Lz~&]HhhxtO;H/fcWۼŻoDnx]I@j׉RC5V:ʢJ~M_=kUcf҂,'IIjC̳n-%XP! cYhL[:S; vPgm6ilNMG!T"˿%*G|1?'oܻu:Y۫ @d)7q+tū擾D.cm/]݃6Tרߨх.}isEm0>yCJun٨_usN<|JfYe.j#mӜ E[rT9t;Leߚ(ya,y鈮ag?5(%:%˽;P׀ԉ2T@ej?uiSޑ葟Cڕta*i@u)P%>$H^œFM}% a3w}L6nlZ54vFP_%Ma!"Oy!e/!pi,6~kWF4qt.|G֜.quoI_͔meZd&dR153YHJ'IC3{uCѽ",Xŧ H rlϊnn[E[">׆n͵.yj# ~E!^Sk>*[loV|]c9:7 z:__]6B؏kϴ.; K}nw&ri`l"m4RI!-KZ{SLϐawm{kҔ7EޗY֮|U:ѷ6u9JVjm=)mTZ5 - 6R?i^^^SN b+vp_媧SVQ NLFn͙3wQoe$'ɪe<5y@qZ ')=9LTS5DsIo|y)=*hufmR8;dM,:/u`=P4mq)GHR r4At @YnVE)-VkϸOW#X,~:iJM{W 6zzJ,K0H'\㝸Q*Sp* $,w?RF4KBg_&Kt>d3KK:ڽI=y6v].[Dqi_C.q#~'Xۭ9a]Z5sU75ET3Gǡj0,S#mSo.I$e@- `nEJCPd]6?]b(bx͏Mqe&#դ h{3~QRɔVROqmF촟Gfxqm=R;K.JBqryĕNO|έXF"4Dh#DF"4Dh"_nD :èeE!CT #ZN} ޲m>{esϺknTcd[`0Jc8ݵ4m (Ó;lwOtLuZY[u KjXϡ+eiXQλb)䨏ַ{CCN6wmXhm}VW_}N[TM*)Ŷ^mrq ŵA |Ai0Ok{ܝ Ѯɢ4T0l33;qr}l}hMS;}X6K:* PoA$cI7Z׿zkA 3ph=GL%v6 .v6uO^+ a&e9/vX2jVv V@5D6bۙ۵C.@:|~ k=˅)=}[ݭ*հ7dzzs:jOeOqн-L7ſo(8ЖRhzHv1جϚe_qitni2SʚOƼgH)hc7xɮ%65M%CU3J;?X^smshmn8y$4  1ksu%|zE*b ,5?"[G G\[y^|)m\O^>TAk#@):WAENkԷkQ~Bld$HW zJ=A=cGxC-}.ҦAFE %Iu+PR[0)vnPeL%-Rn`)Ddw%u8 e\uuZ|'o6TwEZڗCiG=[$Փ7tFJ1{w Srᢧ*ZV#eQfrRڀpo}S}Z+пN/8jw,e!KT[Vs7 rapO-C@mM%WV2/!-uD!6ھ3a6P.dKŔq>˖qǮ ۙA I[h.n Iz-zUvِ[-O)}$Ր,: e*jR챎zG ^ܣ ;w;:.Z7ԵFH<FȲghuMHNڤzbٶ%e-"P2]\ {X̣ݯ#k2-]V~[b bjԠ~RItӀ!gn Tou5&!mvEM|r\)13pB9YfjVRROq3(U](S߰,xae֭z" ^y0n{˔|p9%NV 7w>/j0afֳ]ǒݵMU0Uڈ6n],iYϊݞUi̡bCj#)p{Z-n7ZT$z:*GT_=KT:} W!ie #[#U5RzJ?IOM7 iRu_,D7CRWc兮N*mFETBr-OɹeGPfhP@2E5- =sX<FsQTao r^y"NtkޗFS&>aqI*Ci[i }i {^o*:jiE<%;YHiyNۓ1"mD*'2庬ȋGPY4rY+&TnMoT*H!Ns=@{詻h ]iKGm&`Ş~IC6!of*Q u\0aL:}!k dh#DF"4Dh#DFR.B.OΝ߿:OY;a{Ht)S%cĵO]3'2M}$zޒT_)ŢE'mQN&:2Z+Bp9N0kߺ˙QO4(by x[mnw3mȃa&KqA/7jZj^e9TJx$-`Ǐ\עz25T}%[_c-[;#»4ִscaicQKTڏ:ײP=;+2i*j[d^u_$T{vȢ, V*Pm*I=6ұU֌0MZ}=\'Tϓn.ݻA{ wU{ݭ6T%Ԃ N9H'xF5ZIrmQuΓDtrϴmo{ˍ54n ӇUc>WTDJh?UBd,*ξ= uq2x=0*G'۶JW.uv#|ow괨.%vz4Uu|Xe^+4qA[~9v$ԶFqCgTX@J 7R ֽ{+e,XoJq]ZڱvUC"rPY/M7eHn̝ 3ںi- su5'ɻjLH Ux7ΗsoYa&Cf2g0puRqS- o9$'uaq i WԡU벹Omܐ\HǺNaE !:{bia~W^0cOU]R4ﲼ%mۗj]V0W}qPь1J}`xDdѬ 3k:KE2.:VO+ QnDM5/r8A;[z7>>SKE.pRdSp)R |y3Q܄.8s%X+pl)CGCN8j,ܸ)d-` M9Pc{oDI j=ZIqv=59Br6kWDH&FZd-"U^nMI)'(!!cNH:@쬞Kk&7}&_6/T[8dE!R)LrrTCgz{dm{vb(ͼ6&P9`R`ʧbh[i.JV2PFI:W;/[Px`74ᵦGv.#SɩRT&Zx~JG,7q 7ɸM$32+ Z =3l6c!.+Xxǯo;qSH[1A]^u۷u)bO8~%9QRz`3!%eUav2g>]ntwE7=eŶ]CǛbC5HC3]S4r]&Utkt=5vh7*اGz¯LN;yrƹzPښ@!a~;no<~ѥygoqMveuu/HӾ+q&Ee %$!mcc ԚiM,vln]<ӧ:QyMgji;cFBv2y^Z I:\#DF"4Dh#DF"~#/CU)+3jjq֠qSm޳Nj;wӅOBFvUo JOӋQl@2E-MhD>?M_KѪ!T.RH"ޱɆ9W^_G(15}̥) J㐷bFz=)IY(*V;gF"+uA5X͸C@9(#?{(5˖G5b.t}>& g`[-:UGG;-Lچ/N橪iї!rRJBAR*5CWOcYI:fcfx 3.3'X}]{ڎͿtX.U]M6K/WzNA&J`wfrۙx'>ySQ9zn,7ٵߒ;XG:Xu#g {`6IW@:oUQF4E+ zD': Dk_(,N"]&N'lWR{bb/7)(ځR?wPDt\\RN1jl7TmQ`CțWR-BWuϚa* ZJ鬸XT?,8pljO:pw$[\  -R `֦x!R }hkJHOd9\`¢+2ft&CZħOILGZV[) ONT@lE*!rkh 9[nBޓ ,G{ fjoO?u}TsjԀ>lIn--cî9`(s[aDt؟i#ٿ>s6UlJCzیT \%){ksV,l} 'ܴs{.HQѨD=ݘ J:ZL\fP} I)Sc##K{>v D$6Yx헄E)KV=TEm2d<声 ”O9ZDZM-IwVߗ䔻=cBI9`H}5IEx~hw>Td~~WT8%I>=0#7>ÑNEjw(U~oQ&н?k}Rx7nnQkj0)T+8YK̒pPTDzgRD6 [Fk=%;]jTζ8}˘Ϯy2һu qu9:C\b͕}ΦΝo} 6/nʭz>BQDe)Dq뮅H/wqcQ.o8/=UZ^!S`V Pa¡>0 Vܑw]+c?FcM9G/2JupPIkKFZ\]#_=eQWT=k\]nlM7UR"d47UpBμIFg}/Zx:WqAЅ>Tno<弉uxbN,Jg^flDŽB?5Hd4@$N  <P @ DF"4Dh#DF"4Dh m*;Nͷ̉MEiB4e?23G{f!YYgU7^6rD\K*%g̴RHJ $)9>qKo腙v_. &ӐWvϋeWmS/J45YQכpejjj\IܻuP\RtCBt{\W"~(" Qarf毮Qa΁@\+5u J* G𓯢TD.F-fv{_/.MJKYji䫷Vj]][֢?p֤M؂Kv IKMiڭ)3no}ɧ[S#X->|$mRwy]vdUl1 V=Pᨩ)1]TKPrhIalmB7p6>.71w*0ϗAb~Ήl1mpQ$N=HEy{ u(c]IT&fB)~cMTv_j<Ȏ)Q(y`+7-O=!m)HB1ؓ#[_k\]-FgBv^Tr]y1JrBBy`c)6a>~jpM-Vک#HO͆򿔜z:ЅZk0QBR:[Eru"6KCis>[j1lǁ@}MSinh.Vo%Glz7zR~oU ʹx|7·5pvDf-}<z"*kR rǹR%n8-GdY*QEϡt(HKv;@uD4"Drj=.)USHvKI={htޮcw.3+ty+zlVÅr3`dUFܰT+̎ÉId.^*-ROфܧ3 r|sLx[[*HϨ+_,}u5!Sm/j׬6mr£v\i)-_`{ukaPcR^&!~ 8L}Ҍ|y 3tE eRogV39.qmWky*0XRE&7s7Sj}R1J KkHH97ؘ;7Z}5\Y-~Wa-7[!Kl~UF2pww6f_K輟tKqa=I^>[7W6g}rޒGiBRDM05mZHMD_"4Dh#DF"4Dh#DFėnBŮI-| ܞ, QI:I^0V2YSRCmj ý7K# $V(O]s4Vjfg&_K=9hZ!GU&iA&..attXY퍾 Ry֜j3 SaJPt} qyAdQW~630 Yɲf5%_N4 `믇li,)-2p?%2a՟ۆ ^Bo=W 6o䏙Ŕ*rmˆ{*iuD?-&n/?73ގ_y+խܺ̋\ua=iJR;%al%4!OGMT $!]9ung~IBSAR2Z|dx`(v}n9>WL& #)*%%5rOejj-8ۼI^ 'HZ,<Ӟ9n,|ON\u,6qHWRm:q- fS |p!*YOsZXI~}[~2?g&^{wZk }F%s >T(wɵ+;̬xjkuY9s1(BRA)B$}5[_3ݤޮ0 䡏 N!ŨŅT5(#ROX c[.WR5PhۗON}{F"˚9+ h'rs&%~FfMAwVQ# sZ1 ^FJIz"J^ vʭM؆%6G3%reMXe~ӷb<>v\d[N˫1DI쀀Nq'2-]C1vwbEWp7"gݻ@qN2\ y8uњD2_7uZa˿WM7DZ[ݶοA* ZjOy?>\d(d/quج!ss| "a1)Fv{Q?hr󤌳x}6U2M߆GVt]ck!KcB'S~JjE4Dh#DF"4Dh#DFe/T7>V8Tv{ZV=dť䠽QVݻT(gyA9\R1C 􂢣L5QAdRKkt u.SiQ;5@,zja 1v/'W& rw$DVPFT{ %C9)1R'8G[GgY[wYSzsn^H 1r਀F/^ӣ0k=}>_(? ӎoU~eSޠ:ߪ#k-S`>E TK1I#}O"}I׺"{ܟzCH 1n<]dR1ܧ`~Z=dp\6e ZCiQŽ=1l,,8zR)PcLW"M[K.8p1w]쫶iBǤM.Ey[Vd+?CR>k-1+ni5UDuR!|T]NpH8}[m||[d$0pGXX{VUĦ̨S43"wo$,q뭧bx:PPދSrmUd$RE,|*~jXFe WWצ wW>*(=]\eBg7П㎷ø˞O}f^}"MPLZ*A-^N{(` zg:Is'J2l c#꺑m= xX۲{2u}FiHK)ᅩ"k)F>pfX}cNI0IڦJl9B$-s2RaRIqh;>>,OGLZ;}sjWȨ8d[ve-1M(0O<%':XI6::ӝ":{l(Yc -$k#s_mz:ԶPq8'9;jŅ_ѷ߶:{-*K5Ye楦a+7퇅jD]-Ѕ(Hyk%_.~ՙ?j╻c|MMm„=i(mᕴطz\ԛ3Mn1PeK y ?Ccn|;᫽ڦ̻K5EJ[IJRHPG~_ё@Rհ4]Xgm#zG&džPyÊYRU۱z]k"˚(]F3]ey;5J[є1~qR1IcXIKAʊfFbŸ$_) +5F[n[C4%S1-,2}u[DOApzn|;>}e==Fa&4omjm>=DXK( ]!8b ?KX]3f7]KKGW` !]=ζ VV'I*u1gߒ~a=A[|J0G~Ӎqox}%\y?fN$T%FNշC4jS/r}Ũ2~|wEh=8?`@4N9m;3#R8lsF:Ń~m*2;J@2%ɮ'ZW#ӿuk#ff&8rUUI㨙OnzPTHl #׶^1O-7M OŲ̻Cujt?QTӐF{zjg-ʪr[x\uYG0"%iߧ>& P>2o˨E\C2KbJ[Te##'QUb,Inӓ-p=Rin#$H jK}ưwbbyiIƑ&!>qSZH:8"qN'K}ܳYPu:QE24FZy)$\'.[NUimIK)( CuOV%6bfL.qV+2N~ ȨܛkPm*+I:YeJ-px'?嫂x'T1]]M7NniuIdž>Cd ߩbU:;ѽIz׸jk=[y`<TΤku]JmvT 5I#0>V3?ϾÄw%i [^RF}ZHZ᷼OPj $t qDÅSUsWsR /Gb#k.˵m?P?d֤&0in3'?CU!w:TV~hn2 ^e)y*SwPOH$]}lTЫ{Ng2.Ř|\S4qQ7nEx(OF@ywg&,Jt4B1Aٕz˵NY¾Ԍ(ߎa[>,8WW;{ giWޟ;vi5,Ejw+!c BO3ݽ̤Տ+-suXi-RNHB&LmR~i:ͣ |Eɷ{Wط6gH<ϡտkɓ`%A98djb\pw_SWf)3XRu)ҔY.OvC8z Tuak/K6m4ZaU4QY.8FXIsnB6qp1cn1ĠEF!Z=x6Q>:ҩB?p) o-|̌ë<,H$Tn<۱l~cGs.VpXfSt?cyd[;;"1c1 V] 䶛5CK+$c\Y̊guG]~KFeNӭ5SG=[K C p8Ϩ#۱WgV[kEL2 Tz+TOmHek9!|pB@-\a«86$ֲSjT=z׎n+Zr+lGA%-Rv5p쎺7ҺU95ڏMm6v9~;,/jUٻytʏ75-N4|ƾ2ܩOKR5d-ߊþN;ǢDk}6SGb)]O~ 2|sGm\s{ҞuA+ת7 J$62sd+-|#>>4n}Kjzҫ.FD\NDnlfVJRU:Gxk@+yأ޾>7tW'R-]I~G',yA*5j}^b4a )% Q O|,~ֳMv H;+hnʩ*IV/w9UN8/->SHj,>lh 6t]x@\CDm2_q$-6cJ{0R9bշ28gtx_PV1j!^t+f)R= (~%?Mp9KK 0@=w(pW6>g[ K= 8~ DBOQ.uȠɥqiR9jA'8k|R),N-:IbkۚgSMr2-RP~n`e@nC'bS=Y uq7eܕ|",#yzڤg);]snK:nXkBT-I̸4Y-n`e!Dhmlv-Z_F]sXUN* \*^c)ANeNJke4Dh#DF"4Dh#DFtE=KWEjǰ{|u%+Ijru›h-,+Gbs!m#4ř՚~SSHv!Gl.D (# 'Z͙a\xi +Wّ JH==قzlipbEu.K\'ʻJs6ӫ$jR0TVOkRky;PU9 jR^銺oz8/|S J `VǪ{IE?oxT[ mC ?ÌKXr/.: #?>>؛hXT)/!q$>RZ*8x쨟;iT֙Fkeg (v*$CHN>ꃶ4Xsۨ˨U&$qTZlՈ鱎k$xu\˫OˀBYQ@3WB>O3\W4}$=NE'Ԇm@ʒ몶g-+c=ͪ=JLئ=-JϖheKY q rjm蘻:2XeԵ*[p֩Rlش(b46-aCQTize.j *ٷk[}"Mfi{ț[#DF"4Dh#DF"4Dh>DYȕ#bGRs_oLJT[ j(EO/NYQlHewHZ.rȬej*\e䤮8I$};c[̸8AtS+c=eБߒOU)+&%N'01܍ia~r2C !N~ 5ʐqx$y5M=ˊ0@(MB}$~XIՄHnwwա것qam;^:l9; MV۷JT;kRH-]"L$L1ȑ祑acc ;b52_d\iǤx^JpSb]e*{)QZU%%A뽲@u_IT09N%%$<5p\ ~S8?HN BGlzXVd"Ź'i ⟉jY,&B5%?±q.-D_gw.y-UFde)#ҀOi #F}fz'ꤚ#UIu*?(%oO~܏@+YD>iZ`ș=)LX8@c?Ae/UN^ؓ(v=fĔsijZI+u ĨM'I;qHo,)g:̘@)Ke PaQߐL]\Vt2J=r#SOl*L̖ShY!O|j)t|R]vZkc`ҭIiCaY q8W\jp3!e^)X0WwKe` BE.TE7JxG}I;*"+%i-'RR{/Oܕ+d&| !*!A2u bŴA ]*þU$ŦJ I[T4~aIA?tQ6qwqN_VxmbK's6DtwQNˊ-(%.`#807Mf[Bf BVXSi3҆Åz9AA< +Ԩ2,)nYTQ ʉ?A1DN9=U}K~LZc`RT8G\WD&9RmnT_+z7O^\f(ď% @RSq"UpȊ=~7 qaU>uPLV<y_Q_]B}egk%}<)e썥g[@dGqFE=ʯڔRM1S#W8p8nܵ!_}M~Jt}tRa0 / Jy|D4ŠGyjD{7cq7*߷TBsFI Zlp$${ A*^&Y?YŴK 0,_e)EҢ I,G+XběnPiHS/ˈ^@ h#bR핶4hDsK!D vRp}P~Y69[泿*;vWRJ%sp2q%'ډv.a#뒵!i1jlǔRkz$RN]R= K멨{~:: y+SYU럆truPӶm_2%ca vd x$g!Kis߬4Ӯb.uӍc Z{vԒ\8$[j*[J&@%8-j޶¦|I z: U ƨ6lN;IKRx + }㣶i5)gs쏪 GJRr^TLYV~Xq*fK6B p1?Sk"kU=JLf+cu w֛DIXaӲǫ1AiNJ0@}hlcoWIRïK],ٹpud<:9 ӭ۸T J׏RRIzV >+DA"4Dh#DF"4Dh/h|r'P,w-~[A?s뮹  568Gcf 'ܬMݖ`$NBF25zA,+Ćݣ?+Ǧq,IޱKTٽq_ttN/}mRWa#za$ KI8}&M^uHt@~j7ŅH}|a?O5b-v)inn&˴ۑ iiISܝE!lZܑ~I0kv\ZY9 孃5[ҭGŹuσ˪.z6}x3 &[ 晛\q]SR:a3\q,v$N\Fa<8!L~>ՆDC>CēYTeT+TDk(o#!-`0;ε=L٭v}ޣrJ^Vu ʉZl$|$]LA>Vjza@eɻ*tVtxS'B<3v.>?"FW/7+/>> f:N}pRݬEޑώG>by?|CSgzA<O}[IEutSLdUYW8M8ӪIXprzJr2.~H+EO³fp3.+~ /Ll8˵8d8OΝztYB UוA]2b$#F~jsqr:E\HE"Ei8\RCY8 *u G⤀6&CeEuQ勾q BA dQ]oeǽH۠*܇\zTH Β BS=5>beMp:ܽηJVȿBGK3)d 41tA๳+M-uGR9'WagT;ctnmi4<A@' ?˾2YleT.LmE'5Jۡ)k8GȄȯ=:ѡ'ꭵ[lݽ/L[eɓi2Jb05|JS]Pэun^EZ Y1dܯw ,,+J1ȞFbG%( b k6{ Eע縧zG2WȞQ֐Tޫw=q Hb%k鲶)@rp.]=%DrrT?U'NV#ortejʉ +uD) i9K~&p>jg6 ?)"d/Hj;'AKNRe` JgeY}Pҝ.Pӝu2 JrεꐭkΤm˒MfeJj7ƒ3Rgep/`U;w62a!ޡ $[.6꼮16$7@^XI0XEhfL8moھn: %QTRJc!eH* MR,ə<@\1T>"ɲ]ɫXv NUNR}@FBdeĜh#DF"4Dh#DF"tEe8ԳiR5Ӥza[jRRMTmm8ͩ2%6\=Ɛ_fe?T~RUPROG^/ސn$8BT}=h)/WiT eNeѪ"+z$p[Cigi (*!%~gP6vr]uH5UԦg.ͨP!kO R5aKh\쪭B"ؐñbmFV>`$}c R.ZJSd,gR=E1և+L&J<~EBǶS9OCݐejY) 9J2Yum~ n dM6ڍUBR!sВ}-ATlޣ?CӷTiԷ&1M/whW S&vP 6BUܦ%6>qhOit|ۚ\1N}Uԕ@S#& J0C2j*j?d*r:Hc|Fr&3مO?L2Sڶ6sm-kkE6 2CkszwI+)җl)¬|:5fMkYI SGbTʔ]gJ8K5@7X>N=A\wM6hO (S$ #(pJT{φ=ЧIs+tP}.SR2qO}MGhTu ŲJxV+ r%5ٌqG [F} $'ŭ%.p>OKHua `9S*Q @"u5$]z :Oze`\lWֵII(nN{*i@p>C #ZI(:>G3s!0۟.[pYU~SDw-1nݨپwwVzۃ4-Fu8ŧ8{|HP[q&4oGU7m rL+n斏uңgOQ9쒷46ޙ'"FMpX<׽xK`_{Unzc|DڡN7r}Mi쫽=]BhЦK7i)R"Re8Ez&>J,2 =Ãe`ڱᲈ~:V }VГ^2BA^YY#Z=\J'{|ǫV,KTDƳ1TRl4r@BX~)mݢ۩!2d()*X;8G z.W@=6-~~SqL7O:їH[˝\%8umk[bG^qQUH]I6mY /$wxuc!P_vnc֪5'GRi |2 zZ b/r?P!JBu^Mw#DF"4Dh#DF|=˟-`) cX fVThŖ1z#SZ||7B;yWEԳQ#'^{E1CҪ+zPƳ=1o E0ijs K#⊫S.%jZ0I''Q}RQ/VryE\E%T q(aAS X%mXTF:ę^RO% %(P?x})r|WY"՝w%aʓR$;)i%N(yj@β",:ޱŹqPʔ|]ϕpSUX "~\NLyq6<2}rOs%+vnrIShtԶ̖yaGz(9{R )A;ˤu /y5nrvPrL\u=Ns2G[?FJ؛|ZO[j+KnbxHLhMP*øRW2Ŵ*|ՅGp1mVR5L*5GC \"@?ǹOrՑxz2MJjuXMJj<<ԬJVH8^@[n*w{D٦sR۩é6B ~d`2=ȱ0.+#GqVq[.Hq)t˵2ܷҊ >M%2\Gjx]ﵡqZȭK5BBb~bHsN5v\'\>=T]v0*]/-%-aB+o=`%uVWUm|:7l*̻hGE:L>‡>kHHI1QI{&Jܴat ch^kKQ>$H%CYOvV8q#q("~7##t/u8ߵKSj,cng%x5B ޲UgwjmM*u}9DiE G]@\ۊK\,8-i;^\9rꉵv+fX..b!3f N>vP{cd=T u~WQ)y)qX θG0={MX6,E~؎qJugسڡV5#=.GP zjbn#g&z%z\FJk }>9!iH1#P:(GO۽*[ͳ[QL#v݃)P~h[u+@ |q)/rp;pՍvjSݪYZ%-rVT:S2$an,R&(Ysnv˽A6 Q⻶vUv͆%@muI@psj)s őX\y ̺x05.{mH MKDT12Z;oWm ع/CSc{jDX3a6JkMxe穪}Ź)ILDjYW8Y)]XkYm4ROQUm s+]ғoTiųz'o5\EhS6nS Z21GGqM^pl"4Dh#DF"4DhdGM%G}>czK3PXPpT~ߐ*MS7!}"i~FeQ[}ܯ|C4<`F3-|ź݌î%tMmڞhLkTTR{:ʳpÿS/f%ԟY[-JI+bpЖ\8_9%JgwW 37D2. HZO5!JC)eyUӪtZ89QldV.;TӜ8ZpvKE6<T[ aurٯ*Ċ H#$1ko.AgY]n}A%B;?*?YV菣n=UNy-"!NN&ٺ }&{F[iZ V1 Z1G'W` -ΙKhʺZue!Er_z<EQ2{)aK`\J~隸"B͛gvw!?۟{R+r,~cqպuP`O +X'ٟ֜5NvZv:n? -Mqm"ܗDP2DJ`VS#^m=l2p^+W_52tA>S"Q8IW?G2%r.]AOuJ۷# stq 93|8ɦeDuZܖqIP]r [D JY-Pb9g\ڀW)YJx6uXKL>-r8>A# #Bs!ŹvU/.Rz%Fa6i293 2mK%ِH~$gu GrGgA''jqz]qߘ-a RCh0rAƶ~%k+kԕ5Kh,O,HSH$~Za꟱vd/8HIRb'I^{dtk%_4e/ĴOc- Q?_%/{˨ Q4WV<:V:R x,I`mv~ldޏ&::Vmmmu),*}Tu-uI:Bs|SBvoV}a=e l{kͣs\.AmiO$HI楸R%%J! dܰ qIxeh~ǥomfVpgRhl%TY AjaB&h%'WRG% `hEA>93%-YP>bspHU+AYUNV,B) TZV̗J m5ۂ׫UN"TJJrA"/b}їy~_VZA'INꮆv|/TaQm1:qT;|Pl%) AHQՠ!ﲫ-ɵ<|F<75Sy6+f.%!Sr kd 䠐%ILㅭf͸ZBEwL~^ޭG/V=Vu{Յ~{2BuzK2&HRf&)~Ih7Uר^)(o #Mo `>F6u_u7^?Ȥ4^]G v{ %{8RW?NN]Hݱ&'뽲''#ԧ@7L-bں&m~,*:$T)+ڔ0g> ڦ)+!ӵ4ϦZ_ :ꒄ]J_ sW‡G]-"r ?~]"\w]"RtU|0޷1ܠ"WRPTJn6۩ϼOK\ɞ~?F^ y,V?na-'"+cH9q/pb^Ĥ-JF"4Dh#DF"4DhqLʈ#DX5AjBu /v_Ql~&rVjO*r:^sTRа7MHWAO:h|?˵:J1+3YK9`nmx/TS_cFB&By(XX|ү[QzCK[IdN\ulJnӽͩ$s.(IcY:X:T%~F|Yw>FXϹmŤ?#a@A4Vi97qí/>q'9Ϡ?O!+z™C.wmy fe\i!Dr8W8N  ɟl_VSR;K~ɯk: y|  㓁bHT!)l;MZzo .TB;%j\}=B:|AR$KN𒥐jh]Js R>fS$縒a4TG03=PVKB?E}Kv$ݳ>̙%v\V+*RH՘C}Y;gO{cu3&Qi=^\'THaYG{* +^nVOJ+jlj_ҵ]n ߛQX kQNpy OS2I|>V1(TGVswT@x \ 4 1$]ח?5/Qz:rGzvV֥U8-H<Z2}Pu{EVʓ<‡ʇ9lL{PtcuR52xrJr}5ѩ;+>K Uv3ՙ|l'H6wNvecK w]҃R(ʁL]/']ͫ Dh#DF"4Dh#DFn_g FE:#^Rwt~B&\zP'Ґa0T+6?OޜN\wpfE#S5ٴ $g=r8w4h] ^o.vHzc |vy/1XU;rRCxA}[9#sZbuJe[rkScDTR2 "cgPntu*@}U⤶A}3/(T=OIJQpsq5щ 4^l$\Llؓa|,4Ez3ocu_^Bi4|^\riG_~fՠܭov-ST,% dJRЇ<`ze͈oM{;oa™OFej4VZM-AHutIWe`g,XovWb#!k'xݷ>i,k#b1*[vUkԪoHGu$1ﳝ͓}}sZ֑_\NmB> 'o)ǧk"+lIIa2CA(ZU_^Я zvYowPsh:djH~?~R 1GBU˸FO.: b6-jI >E~> }\{plQjb)j˜J}iB\$) @X},Lߊ#16{g{PRmέR|i^SCg!Lu,S'Y]kYN#Kr[O"[(7[d5ўX,CPVxITTO פY52vWuyc͟hy 1R|!${2fEjzq'Snx9W73 g{2ےhIZRZSOU-@T[^ 8ܩDl_u}]xvAМX.ZVgQ* ' SrP8 _T.5Kib7^UVblE.CpnXC %M `#[ˁbl:ܓZ6ݻ5iȷEA \<kͤ+Ma'\]QŅPt? T.2y$rZIƨ\BMcD#}iu+/ Riv8QǘI▐y(}9ZU77/ ޘi4U#DͮOigP$0!ÿLnR}v,-U f\L2eYCQ[>+EV: )0$F*ؤ5%ҜVTZ ,ڠOOm]G/roWٕ`ASZS5 (}le !\s=γ*2]m= Wmg<=v;wmI.s+aTU!e IjqsC%.Jx羬 W>3SЏr`rGU˵ .N͜ao /5RXQ E5Jmߑ\l:ۊA͙zid<<7Zv~,E%^3KmI/m1mJJ*7ѕTLiZ1<,;>3}]_LjIK`=Ror&@5viK|m@w=5#gH%&"MAC rCaI8z"%bKe.oGCyr AV}j9VXRj%8-R NCOGj%DQP-j=ҍ\X+T4Q"\R S;!AA^d!-~Ŵz<-g]eyߓg;:%Un Tڏ-n+JGdJIY#7˹tan 1w)Cѕũo^0H}qbBc&O6dcN%+CzXJ@v7%+[wVz~Zɨ0P؛Sȍer glڄbf~oI+Z݊dmtq )<AqCTbշ=gJ2#8s)~ӸRiCe)JPJ`JX^' õchq)"pGc`q̓jVL>ע讓'L~vա4-0~% Ny{2"4Dh#DF"4Dh.+|1OeM났mhNu)=tuW)RHy/C=^Ft>k,'U=֢Ŵ>?/ *%#/hp'A^~$骕%@{|eP.H2JZq'}@cگ0X)bpDH;R 9!yR#bjy.m-Asझn̰_u~SH\mkI>{KN\{5ʥJn8Vy$)NBM۱TD{"gxSSπ'Cy(-<' A@:mHV({PTtFTB}\uZJV>Am{:mC<פ% N}fŲ+GqaKjz;ӧvn5~о3eI[NR%e|xiIq oo%8ޡGW=f2okU8\In V z54Uw\YVQ*6J©v+c q5HWRAomzcۻUAݵip#yRO~K!)m(7psdToboҚbW[U-<;?{]\1O֤E>*gBMnxd! >j{Fތ0^JI*ٞCܦ_!RïN,AR{/6:*s=tfKB-L~Y&wN;m{+oxʰjr箹mOV%Q>RD6|S˃ͿF261ۇ4uQ r"2 Zİc;x]Yˋ$dLj~~K8քU[=$l }=A:jQV_gjTquxOuK)=qN"u Esm&kDiO)|Ϯ9;ֆL¤a|Jthwt3m£?g+XgwcPWs[rIHuUnRF&FV]SQ98QލӃ{A4lO$|/^y$-qQ=Uӓ&6HezPpC`z'Zc|.:클^U>7 Ғyp'3ZPrudnv2YvwC-̋pHr)%c >XmwvΫRJ/{5σ`%n>i풜Za85x< :1F}{EpR!৿%* qeF\ڟz*QRT(XgI@>Gĕxe"QBN"T)vHZ=0s \x6Snʔ*)mLJKKo _|a`=q҅k_F[wlԧ[GYKںS4%DYF)HP8Wkm&;*ł ݵS8-遏MXUF"4Dh#DF"4DhPWh+otAoέB߯_[V?iB"ug jJ"u'}Vk=Z~^ۥ/'RPC;{~>dS}:ď-ǂU(Ϯ5O\b!޳qQdȺ\'^OU+YvH!Kn9QUnO.qeԔ:%|VeὮqTڷ-uC-\}ltP;NR*4َ )tv䤓*[ c l@8dWi,}iSi@TK*᧱vPeT (uR vmhQu|ea>On*)}%]3Ǩkl+vŽ*ctmya ㉒GV?-JFۻB ߿%xfxCm˦VܓQZԗa&+h;)ا6YAkߟ}i+SݖCRݙ5E_%j&8`!.-) t1zˌUUAiڢCe_i EOY~ۙJtlcnlM]{!@~:Û;mH;3}JiTS"םofǫl䬏C魏Hmُ5oVME2Q}:aWLp9yc}$c4XxnQBeMQ~VnO)*#m!t+*d)M&ihԴuL㵚ҶB.KҌ:TYʕJ9%aeE Q8<;g]Aok\2gaYŹ1[jRJBY^)ۘrT9uÞHsY)i͑!@)b*vA@퓪ZzRӬ\ލ$T!8Ko `{$vƬ<8c⡜"{P^-sq'jq%cm ,PF*#P5jRh7پN2dG(uԅ g\[etJzmwj6@S"S`&z&/C3WO YywzՁZrImӣm-%9JR#{gSO8GJ8A,Vէ%Su J=asd L"V#NM=1z{a+BD3vjT05>nV[zw׫Ҫ.Ԫm̼ %8@Mzܸ#h;ٟozyLO$J9#U[[+%B/z[WOY޹jXdԟ͘| A9H _ݛ|Se%>xTsOJp ЀZ_['!nD'-ؤ)֫DF"4Dh#DF"4Dh% ;\ZU;0O;V#͖n~bWQU"a,AA~+m|5!ϙPOoLzkhz㳳T (tnظQKPΪbȔ޳MʔQO }Xwn-Cl.ͷ~QDñ웩ʥQHՆKbY;/ խ͸s)w]ғK 5)XIAvB}NS?[%P--_;K mKnzP9IE <Vpj#W>iRl  vۏwMvq]!U4ZM5nJ[gC?t[bm-tʲ:U0B#n'>n}v%kXI PaSueO>,Q@h,}\(as{{LvӮJP`rY޳^HiOO9LZ^huFJBG@"9ifhȫ[Z"-2dW]>s:HoO @^UC6UFGR[It/@#=p VC?s:w7xaV[-BQYioQC>jRQZLRH2OǗg˗@mx_b"Ӻc\^3Sm)%h*?tse3j)V6y]Vt_ZJjmއ F;] `<+uջɴX&fcVv锹5 h!\ZRW #:HmU]Sv-fTfBiO{*)ʼTGw5p I'.Уn 6ج\qʖU9^_}m)R|j:M(=ԸLA7%=BZtҟyvEREFDy~fUƸu.h=V-CY6ɻ*O܋KAATr3 0( }%Z+Q"4Dh#DF"4Dh#D\wyQdZ"ez_겎Af: ,-Yr,|w*}IHƩElϦpuУl~O_n_$ *yt]"mn=S0S{wZ ,=Ygpr(ROԟUWDt.E-m0>|OC#>j!n4c1O".d>aC\w7uVZEhlӷpi{Ԣײ$3>h'_ZNpP>UbTE乕N嫏'Yl՗i=nՊzK ݷjCF;APx%jk51QY7]eVnVb_M$1L6H.bIZ 6qd;K+pKi \Gs;]J9G|,'A> ֻx]ivաQFSNH֗[ ukJ)Ʀ~U1%E-gkzj6uJQl FD<> u 9MApq$\Fmp]|=R{ĐG~J8x)8 =5dQ?D` ERcњP/;k*m(sp{j31+gZ=%kf2^5q"9S[Rb=B[)Pp!>>^fs ݪuK~UgeIue[K`’s'#=2F{޽N}ħdn*>5W )N!c]q4sC`vgm]]C^%PUo1nME9Ɗ lPޣ/皿5 ~*Zk٪R]r-9UYR#.,$d1k 6wm,WVoԍzU{rVeV曚BjSL\@B9'u-ؾ,me5;JZSRs}Nӈ%\pI O4+5˛#YxN.[ /UQ۔RBZENyI)RpڒlGĎ "° eu3F"4Dh#DF"4Dho ~E2:DJ;!ljxп'Ŕ"IgGVCuWF3 ErZGu:_1ҏөJK#h'^`3UGo7"%͓LPfa.Z$% pJ'*V?D\iXpynyӎ'Tet_6=ɊB\YZr Onޚر: <( J sJ@~^Ŵ 2>ڴa)h$'9e#:Z#ڢ ЭV&m]vP;jJx\tf^m٪R"maKRwXR~ޢ-xuIܮ.=Xm&iM:MX~R`uJX¢ UJ̭!a Apew7⛽@&xO[~KZݹ.BT%+C(M=Ln+OSvԮRIMZeՐ鿠ۮnSoTvإyp$2rE-;?ڞ=5~ :ӷok).1kVv*TFw+MU~M$̦>>ٯΎ ҷ)-j$VYpQ- ~,J~ GCLsldU:n+zWC1>".mՖ)I9?(~lBQs_@dJHmBCx >7PCe9 LêFA[IՏwPKǵJ=ni}]};Ox.; @)mGۊ>?77G$ro<οIMkCNm6(G}Xej!C'VޢuOU&ޤ]DnE`*~]PvTاR>ؚ! FKҖXaꦅG6ջS48YtWJVK{G{EB=N uRBThQJ43I veJJnM ֹUUmr5.)6u7Jm]R~i/~j[3rnzqDLIS&R#< {d\ 1Ogk;mfĸ%\b$?wȎQfʼnY ]uw֫n]|%m$ҩBGP!Z[m2u>!meң}\yы#qbRҶ )r "\2BEr c۱56]v{>qOYv' ,'BwRy9jpfX[Y2c5ַ3nmU+H(Ku?h8吖А=I:UY:RYGlr.VLav 2S.8CRd:aԌshOA T/QsK[G-ո;coJ^bҎҲkhOX'6\N=L"4Dh#DF"4Dh%s6w[ 9BUJ͓%2RTG!&-GT#ɛ[l*IJ[rpTuKB"_5~V~k/rwΖ!JS =I ZP\T}NѧzCGJT'>֕,^n<֑چtZkqiՌ? U!%-&?ve ΀OlMa]iqNi>P'`>\K2%=ӷc?gL:ڴKIG 6rGajYn/XlrkɼڗV3Q1Wb]T:jiyë r8CRHZGܹ//^5#]nmת-փ\W*U&iͲ*S_t rT#m59!>L -j{j+j[#ӿmB8}eylGf ]52DFppHi9UF b.9]acM^r}D}*6 /GԨVYt${RnOqzo۝i(mR:4~. {~.!Iy@u 3 )>T-0&N(PC !#0uIY7I&HKqaE̺@HIY 9Ê/hQo趬9s "Q":g8VyX8 qZDmof^QW^ \-?Oǹ $6fFGn#JWJ¾ƣԋoc Hبv;M6i]z~3"yld{2f~ ,zvl! fcMqs-:Fi uv3&,CvT\fԟ+̛^zĹQ[Wae^5Gw{w)lupZm ^ň8D}EE_:JIV! w)-Xo楎{Mc[ZxE4)-2Ik_(D#{BoVߓvfVj[xq%` $\ӧ& $_pI(ghWmi s(˝H#<RFZ5w:)ڹT΅i/h^IFqSM뻡$ מf2;YYo{E5zó8L4DYuP3]7, ;3{U@aǬwO /ofZLϦmk2ݦVRQ<檣+@_r{ƸHܷhp>Rfk6t 1MC#PR!!#*QVy}uJRYA}Z^V@zVd2~!dfu4E>>i1T@^,j~nMNp[%EDo os!!]+G*4h#DF"4Dh#DFkC&Hё`WιyUFz] fmvSln*UE @ޤ4UŸB^RhjRt<>nCm4k% (%x#M4;b:NAwovVpCZJ5 }?t~'j̫FlJn6ι66bflծsZ uRh#,sQziZ]Sʧsn*-*iiTpPOʃk.>P)Empcq3 ܹT.:e'.wWrOb3OEd/zW^)mȍ )$$dw*9Ƥb0 FlĹRCLT-*by Uuda~q{#@=!*VopQК KJk8+߯CvHB=t;#ظLVuEѴ(}FDn+g!** J$(<$8ᔩ]qN(] nVʰpbvPExρLQl%)k<=}yjIw+1(ua!.3 kd$+ܟNګEtf㇂[.U֮{Jv8q遬ma5nVc R}ul#7j !6G.$w$ %*wmL8R9oLJ[c*Q~( 箝#.Ms\g" p4j/%KNJ|J9#kdXo:%ݗ7JE"bOSA #:ZX AAݯF+^[l4)AM5hު&fQ乎T28 Y˧QL[[#%+[mq\6nT=j:0*gȝmZ/ǞhL:RuZpI풜y0q2P@IY=0N}>غT,1 JV3vvpX.Zd{WGf7o\"c #UsepĜK L[N_L46 RJR<;d]8ŵy5\Zh U2я݁MTˏKuƟp<) =W".f,Lm]ƨKC.!$,PF:\du6Z}g5vQ~dInY%.:QIQI=w^_JHTatr/'b]&eL*Rr6s9gp;(qi+ܺjf%(Sѓ %@mW$,P[km=]f:GSM8r$[fwW)Dp⽸T}"W2ԫot GZOԞi(kt}QnlW:SѮC)Bdʿ|qoۓiQlQACؖa%_?ge~u2"4Dh#DF"4Dh.JڸR$J?ᬶX]W+|tk_(~1.(eHRe=⼏婣 Zbm7WBΖ75E_]wϤJ}ODg)4@ z~_4ݾIh;n&K b5Te8[TJSRI |[cmeVV*C^RHGj9* ?d"bXJ$zojNG15MQ\eoc ,Eabb$TҫG5VpԢ3pIuM)$ܞʼni)JZ|1!uKA : jnYVX0'8 BhR@=uqm yfzU16 CN^F20[rv⢙pt[k"5:kThr5VK9@yք|I!E*䀹ZDg9{{ܪ@!wݹIͲ]l(v2\D6#<ڳX;%Z%ߣ;e=FtгAAT[]m~.]&Y"7I[AKDjvB]N ?WPZiR R5CCP}tΌZi'X "cٳP,B+#w;aCRaș }6FPjEYB0NRjRZOF1@[YkKr&[.t_˹4k|ۃMSTuz*t*R*d"+55)qNeAX9ׅ?J[9^/C}Y!WgmclҎ`Wy̞ 2eg~[*u!$4XLv?ieok5-襐k/ͣ8+3ͤ)%92Adv׬ y fr 6d7ê[shS-Vf*L&*֒Z)#\C!w 2V -;4HF{}u#UV)mtIe沗Nc>:i{[-e*K{L0üY $;Co \Z7;=xWؖM~5>P*kKANB@)_zSP^|\yk0VßCoJRcQzo0BmR}ХFuG˳G)=w'm-WGf^z3ժE=-7a W%qib? w7]<%ܟaXv=N)W f+b`%9qPHv$&!֮]5hVͯ[R/jJ1|d]=uB"wVۻP(͕SMǐO+:mջ[~}H>r7"Hȇ-c֭)z&Cj\par*IKmJ!W~PUәoՊ9";%a*V *|?&4zz#(J*BwuX |2n/j2+)~>%͟ctq5.QAfed ԛBF ZCnپ|V6?7F>kiZ$]d{8?Uּ6uI9IV}h#DF"4Dh#DFYIhU__f#?A'D_>j^O﭅.ɵ$< ̟7p9cjq3a~/ACE/՘{XZ|;5݄{Vĥ.#)S}ŕ96 I' v [CGFlKnOCU*2kp=Ven|ԨToBeX}2RRJ{a3ܼܮA!MkG{$&](llń[Qq FslOnozrurY ~_ԆWL`i]Ƹ'B{A)SY61[ep|Ԥ).ĒP=5aI㏴ߙ兜FI>uP 􊧐X>R|iGXT-7VIS֖#D}|%!.(6la?%dLjrgnECdy*O۔pJeH/>t[2.Wrvfܳu r^8(ޞR8K>\uf[ߣCv}V ^Wա'idI* AL%D)TxOa"8,BEd_ gVGE."ە%>D02 )RRx)SR&}iZc΋)eշ]{6(eLủ53䰴l`xmC%*!TcDuRBrK %5QruG jorOj+5Y[Nu(\h2}SA To^oG}i# qMKs!M=:KU p%B$j85xOm3WаR#Sз\.Kx$i_im6ߢGHEtr|R @Q'D {kR;:oSboYob:KhmNQ9QZk\L[I_p(kme΀HB SY5=Rܣ[C6ެCKͥ^V:1n]N?tc|\Ij$vֳ#VQ[6RVz"nG(vk3!ՙ6J KXm~T% ujk_G k;.}54PLM}+Opl2rgBԘRt ~<(jr ji{hu5pMn>aݓ=M"*uVyd(yjCe@?wP"Ŋ7ϕSkMfmPkT:K9M-S[I4`yg$Ug>*Ik[o=sThe"eBrgd-QJBqG8V?w^D&fܸBYߎ{ˤ¸)]]BP%(!X# 6"kۂh{߉k[R*cI283%@ \EX+a5dKUIP!j.Q)c\ n|bra-cZǹc>ΌDxp?Np{ȓ'G۶.MǰQ2ފq =^ʂI{Iпv/$.àk)42'fr&v]o ޠ-P-£CR/_w%Q*'*h|:[Xn^WKVAzǥ0Mz#DF"4Dh#DF"yeM)R Th_J-ݶh2f]w~JםENCO KmL?|M h4zC̵ۭg)qe JԝfKwit )J@ㄬ%#?Lꐍ⦓vU?R-R0I$q(ae\P,„TPxGv"p32@+uHZpe^%HDtx#QSoMV~&X]J )).>b5T!’SHgRmVX7=)+YbI.=~*k%$9ۉuJrͱhRc"+r帾ElD;> fvM5.L`\v HSU-*P)}8$Z?ioܺwr]۹,Q0&E,rҥ~ѵg(5n!;rܠ8+oT ݸ7d+i64Y2aPߒ䵶_ZH `]x@Ӌ< tW:ZUjO~H뚘@q InARǾy5R'Vb !CjH6HYW.74قLu4 PahM%ߒ6&ٹR6;M^2PK(ZpBHŠ{Xuon<[T(\~eu+Uy\T17¤deOO/\)gpY~  $6ܰv7Z>9CHKDbLp%*h֫S#V.P}+ W*w Y9'I.{ds̛m:d"z̒Ke!P\69Dwk]5|D0ektڧm>Zr\p{a.+%P#u$̞BrDܗl: QjH[(ΫLB"vwV׎=f[Hm-Gfuǂ FTFGFz/:֐m,ۆէYӭ*'=>d>3ʰGH>šԆ,Yvm5kw51mܻ~Q4X1)$UrW!+e=qzt`jf]ikiW uiJ%J.ެyMZ8~yns[ǯ"4Dh#DF"4DhrG/eT۷E.!k,{ތ!@,j),CI0UMv.ζ *9bQ9=x՞1\uU BeӷШ*̆Pm켏PMfxU,G řP߂pܯGm1m^V=k3-[1Ŗ_xYu_^u_D0iS(ruVܬ10wn'_=2RҘpJ,`s]Ƀg =)Iz*kEmè?=I e6mښU&LyM.;8Sz Ԇ˙K|!HǨdTRgJ@Lt;b"J#* |EPRAm-dumT [,NFԶdqc2Q-rR}T={{SG_ w.=EHٛl (Ej;[LbkzHf Y;puA!dщpj8̉R-]T˨_qlE0z׏Ι.o7t6TauGM@IaQ!GW6FQɒ5-gPws-;店Qi qH*inB{غM-QaorUomSoõT\5*ϕ ruNJ/5ٿ䬍5Φf+OKkiPdmlêGm?qM4eLr v:Jw`WYxe2}Ŗ`!&4o%k2o*NҤljT1)Sm̤`q6g m꾸_jﻱ;201 ѩTB0DT7Br (U Y}oU.v]Zq"eENT*Ď 8βdު6&ʼnׄy!4JytRz\D^z:a [)^{T;'V%G($|$'崅vKFxe^yt%69Khq.;hȭ${t1PH\U w|y Nn[g%{:;eľtNǥԣI]NyAm?}|JdjFjG˓(Jaa;6ςmzY'p&ؗuNzqVvKkpJIRGWWFV3G͹3Zu[ڀfTM2lERiiy@e~:YLIvgu5-Hd<SKMUgyAP{g}VqwR;>sb8j,y 2ôQ׾zi5Rn]qUd̦Lũ)<5ym(%6,on?Drϙ;e?m>UUoݵ\+M_ и6G,s!qm޽u`l9X16nM^, P ޴*q߅ ܭSrڔ%H)KK]<]?ѕhd;;6&uYvPPh1h0Ȕ rNIci"q?6T(I_Cvo)&8rU鯠X:JLzEw=HIN,\W:RqO[Sai*jV?=TrRq鮋JL] vrǁPHյ& y 8?['{(= O1۟2ѡr:n@KKYQpw/T}AcݙrHZ$ɥ: _>H|X#Dcֶ[G{a.MMԗi1؏.,RFXON([۫yy'\}e>GۑZ|_pϯ[3b*=UӶv̸Iu7 %i\y(D dg&+smX{6m)VmHQ~C6lPKy$}1DTn#Z˪=5'Vk2>93 P-a?6uuDY[G"{=ŝ&[M\+. td|M b(תi|hnO)IVRDkr传ٵX>"EZ̝[YTA)@jMt- ~U,u[ɟk<ZIF2;:бb[JUKmW%$AO~ݲ%O/ImM)=>$ioimB,)10kvSP_3p+ #yV.an/o'zAQӡғ}2Jon"L}.5%*b>ԂEI?' vƓ)t}_'LeV&i7Wwۥ|ئ.|TFx>+md k3\m@#DF"4Dh#DF1zݶKi*#h ϔe+PgYԊz,[g mew$9YiIDRF@+>σ007B، 7 ZqxuLvUQiqg$p{h,%J;*NV PǷu6w\[ȦWua.!*'zX׶jˏLzkn5s+wMTd%QUc +:4;3k{s=I>oo9UӽtQ򹙩Qt[y2RpVGNSp|ߊ5`NE&J;w.m5鳕RRT?pU܌jHi_="qݳey~m-T;:!NOD%wR .O5| sSBe*æLNj̄I|} A9㩮O`NÊ~j WA]jӿaW9= ;_A,2B9ΫP}z`XkvjY;'\{ivq1 e^jrFչ@;)f|57!hѝ8j[oI GS8\]fb}lo`=.zġYVkCrd@A$@WAr7}U*39وT>ھ6TT3i&C! w }&%:GS,rLr!CeEâ0w>U5kAJj}]ª a"&0I/Lm.JSѻ8=e~oϕ_5ZP5s#' m;٩.yԁl>!eIAh>C9ydjh`,"M^oUdE*nDwuKYQu^b ﴔ)ȭL b͖yeK'9 S_ uI$ =E~Q-Z%dhťG1+l)&*~[R3{Yfڱrm$!Cr%CYb\+LX˓S.Kasj<[kY=A=βw_ZDӃ,r'ʞXCotF2)woJhG[B8ALw$tJ**w'vݸG{!7sAE UbU+zhsg]WK׎ IvkXr#9!-(^|ZCܻBb}Uz:]h$!LYOA >ZĽ]E*겸"4Dh#DF"4EThK˵Kwnc{euYNOoO,JjRc%^ַ+ mX|e-Yahh*QI ZzڽJ#U4w~}vNҒ(W~޽b^VKCi[ʑ81g)G[+2 iS,oMBXDqmh'id53mX}sV3騪-X._v+fIJrTgLVҲG,!}}u@)̯k0 OE48nBm۪*NB1K#L,8F-)lzMnġ6$:$ C.:+$;G>[ޤPS),Ը]UĮ>ZDyJT/MQ?kϸ!4ePq>YʖBR':aE,2ܶsuuũҽ){]h{AHn[BuFpٝ9xO >SH7rMqvcd`ux_H"]Ege Y & B}ߛ;COxuo}Q: ߭ܭ˝:jr[qhg H^rMRxNxܛ3{H0-';~qu96n@{cc.&UJxG*=4` m4^zF h}<͸>w^oh^t}ҁjG+495<ov{q{o=m)˳P4w]NSR5B&3לBAЪǣ%s39'v Ś5;*UG~TEViҟCү7zEA*:-, |:OKSbGA#Mɭekvii V"3ݛZMZX4J ~K"yҟ>ao'{kIB6ɟY:nEKYI#_/f~G J |51K4,Y.߅?5EoRžPI6 eK#?ۊKu+@^ߚN_ *lm7&W{fU^!v {*~*$$]lFF`Z0b-S%*՛Ja7)X$.LjHq2wNFD3h輪)o^ee>ܒVSwԺ~{&sH!6wvxZz )V* gܤG#Z4 )u"nt{bO|%srքFC2z8?JҦP{TNuעH+EF-dƭuf84ߚmCu@H{KQT>E~Ӏ|ݒb|VzmmڕG8!\m+?:]&У֩q>K;"B[ǂ9GFf2Jc9Ǧ{j='Z9os̶H hk>&:E5;hSsS!PPB}OwE}yW5>_4_%}gP,',q:DV|O)4-vofXᬻ}7e{׶r q߳J(8?M77n|Fn.m&6C)701(,߶~k/CMԚ"&fxl۔pnMi*r6d y 9'5oG.^ὼlݫ#}6l%o7hϫ4Gww|~ f-VU帩HmcP>mvJԡsnŌPjz>[Aꥁ. WH1cVTA"4Dh#DF"4D]Mc#XI H=ᬀܖe1=ZU^ԙ1nkIFmV=zkA lq7g%NHuʲ?<ߚzUSaXC#02S` ^Gt1|W L-?Y C^m>wɄ2,7 F^ TAjORqZ2"TWP{wmH쨮baL\5 !m7K= +4ճm-f6H{jj5 8mJLwDd)u;nuظm~܄ϋ0Lِ-{ uZjf| u٤7uG}L\xDE+Ut/Pζz'iZAQ |u7C4mjS~&r ?u8oޤm+/KbeN3E u VO`=oFTNݫb$Tj-*#M7q16d{V%n-;;E|U5F;[p88ֱO3wd֐ץMt%/JQ؇͟Nר, i9Ge3V|j=xj ͹?jbtEKЭ%N 6MU45,1>NNjjYN&`pg쾩OOE(8Q!JH> djzڙ[T:xzY턢;.9+룣hJ ? =߿r!"jSL77k/Kxm b>,$BPF[SfR՟BKՙ稕<1X}M rkv3p7GMѷM0Fd*B}ұ?G 57x]+Kav?#TMTBuH"UVH-bn$]!Q;?`پrtY iPӧ|fn؜fjSqw9ua(ILIQÿertῶ +j+5t3@J8Fgg囿bG1ڊ/GwtV7"%zEáavZ/Pjd1Kw3&~omam6ԉ?"}xzwV&3eǖ6;_? <4Xvݾ` ?S)jh:fI]K((uw9bo+*ˮ3`?{^4 ->=ŸP5s^w2uSq/idOO ˱˶ێ}D#BteIӾ^JO/-iJe ?Ng0u7~aê?@}1Cϗk>0JWھ7[ ejbRoHvz/F߫7LL]'6_Jt9v}Ҁf[F- ߺ~(34Ukm5j?(d#"b35 tOsf߃.7>}i/"~Y,v{}!< '*=J ֟nb5/QmpVnJ@/Wɨ LÆ ^Kc I]]&uwl=􇧝&ӂ%]plu Ս{.cRS6%|!(KM4RYtk,U#|#~q{ VyV0esYqv$XK142d! `+T$9=~|cZ&$oq ,[?QUIm#DF"4Dh#DFUɫѤXnt%h)$~8={-Hnl*D$OH}Au;ږEgԵ6>EU~'I)oI[z1ti:lz?gAJ_;3?}.m e.-͛HyM6S"mͷL{ǁ#O+n-rk4nRm>6=>{ zT~ꬅ)3&)JA' @(B5=93nM 8=. 5O@i?nJ"OKSjkm|SFW"#m>di#܎qd՗$Nם;%2J ߅>7Kc#7 K5 GeKȗ%.I==Jm]n*Cʯ~RзKe'ײP{}uTjOn<cm lϮVf_rdGVD~l<4'˓#!Iנּ:R~~vK*^w"RBT@$V_\;MyNDb&>|y,)ʒ{)5[[IusܕDqdד@:/eiI$Bg˫\>RAYqAO` c'}UtP\CNu& !iYp'$(룻[,en=vemQx e>: ɥpZ6{WX+W{p2;|t-J$Va=}G:Ǩ; oI(4يB*p} HAdd]:)Y#3)H`؈>Veiw7`nځQa! -^܊PrTqTu32+n7,rI?Y]JnLwG(:ÍU𑺕(?;k]P{,uMSslc( ɬy^,_\ͩ ]!cc6u#d;">ҋ$MgO0\Q*=IQ߱q_.T"\DN{zztjGT~rĕ(ST`"3 ok"4Dh#DF"4Dܼ(ŚVhJR0FdH _nS6]mcCc([d昽UqtKrmGu'\t~ָ7} I -}8~i$57~굒g]eeMG"mJTZMHw@L9ľ0bS3? ՠTY5khGYvp^uy ˓T$( Itԧ 2bAIT*q٣򷶞شRJԏDňHԝmaKe %~CX[/"4Dh#DF^Uϖ %}pH;-5[W$P)?Zδ9/7ɒ%v hZ)* 3#Vٮ ` p;cֺ泫nK{g<0Iu\tIua|ņ˜f VsMg9ˬk jǒ%h6mQi.k%tcVd6~n} q֟K7Ovf= )l$:k4 ޫ.JBE=b2YՇ%歓VGԈ_'汪K&ڰ?lxBm9MasYՏ%rЩJCkg_ h=1[c%X]Mžp%1{&25'[`evZ4Ja':c%X^ 4zh?&bQ]<'X4TV'>&?># *zK(bꢠWxJ\Okᘒꭩ-HVROq;X+Q_*=Ip5-čh y42("Q@QEQEEPQ@W5ĥ!KuՁkJmZyI8#֕һq;-Z - R#wF$ɷj\7 R,xCU߮Р̀Uⶔҩw 3s6ռ]l)ZRX<ِLu݃R~*xgr_bL%GU?V_vcVn+Y6_īZ#95:ӻcEtӽija=#/mO /WD.R V?:rgkX{vπ}F'vRGѲ3jɍѨ 7Ŕ[շv RC[M-( P3Z;F f_r|9N Je^hxl1''q< k)ֵG|`_ƕ_u,&ʯ}ݒ}iܴ\+IGPEEsn 5&tcZޡ)7K͑g!~INptP]QÊ]Fԗ NR!SMf7ŲwOgdCCy(P?N<6תt7 ~u}jS[^d>]ҧWeN_6ܿ\)K7[Op}|mdq5٦&?/:=| O9dhx֗GZ86STs7E1旅E_Su >(N|0jMxZ *Ro}Z?ZlUޝpDWRΞqv ovxZj=eIz#U^lG1m-AzJ7r3V_un>sohhRw`# \mix!ˆC7;Lwq9jk&J7n-l+!E9۞>~U0{5OA)\{:V鮍( V0 (((( (Պ[v-J@LA%X1w֊)AĐic5+fOmJl(DVxY< U*]A2{.ZV҉*^>°^j*e\n2RP>RD%Y$$Xown5R縕4IÍ# hITո.ž# vTx}q\2XB^K]wzw;evHw~nMJW e qZi-7U}Jtf ۿh߳ykbMkWqLjFM%:_CSP 2m'H+5DmF#ʷ R0VJ=TxU]Pi'HcW7Eb⸩NOXkRT]Nh({ g pz%'?./}gm7%6㺆6_?,Tt-#~z͵JK~B2qG\rԴ >%>+]s-nQ6{9f0J&>?.iH\pG;m~Zq>>rÈ)}dzna ~x 8sv _ŇLc ̨Mn3)[ }À}bHN7u>yz HCm5=# $> s>˖kuH=.Iܭ[xU`ŚK{)H)Ču]T'xNsvj6-Z7;#U007#V08=im/(jH7Pd; F=Fk,,ɯC8䦫mp aX¿ \qîcfa6dEt{+q1^ڗ2~o5&fk#\y>R]&"WՀOjxqY] $)ƋhejuQ#9)fnN cZ_.`g s jg-Z[\z< zZ{%Go51H\y-)|RE,N9]j6]IR2BA>5aVlLHQaƺLd:h)* IH'75CZk]JQ+\7$yՍp}W=6:+*R }EPQ@QEQEEPbУ'SF|qI ר>Z(Y$G\iիY3}yW$u#C98ŵpYrٴv};srdٰKN=h} u ]~5QYӎǕxdD;xV2 feK,̘˕r<qX(oy\uzx\kYeiһo].qP);>O$g+=k:c\gnxƸDݲue˓&D2^Q85Dв\˭N%s-N' 嚆?Ok|DyTcoz$׫/W%ԐilP3@=hbhSי}H)GjcՁ*LFn}*.ǥ_Av%qVz,mlz uZ$GMrN^5ѵMT,+/X""N*/4½wJo:#j]Ccr(~A%fkuΠ:= AO(-=MU.N].2]Mpn+.8yx*&>w7 OWOd1\uN kFlySʱEnhdcRa@O\-Ȉ[e *s=.ҋӶO% 8@pG]F_ݸ=m^ BcBqJRVsy]|k~~Tbd-]< kʋy9UCSеZbdx<£ȈR>ǙQ_ c.*R薮/R07Lh41[Uow"vv[!>I-.$.dֻIʑ1p3䚰YF\-vxuHQZQNtrKL/R9J, }FN|IƆpPZןyKYo,٧Bm)kTW8ƷG4Z?D0#%n\I 8Wtw`L(2) )MȖf+ܤ^ORD]7u> *NNOI Y.*MU-l\T8ϯO~}Jֆpя>T΍rkKNM\KF?3NLL]kGQY5gd2)i#lHqIPҶ).ׁM)xwjHI'5!W iksH.#_'%2- Rp%Ĩ5NTu,RJiӶhF[yA6sӊljrmd( )RPPO<9 ˋCmplp>97ocPƷ,Mm搅s895tSӔI?f߳%N}Nm(wNU %_g=nzW [p |*Od]d$QEhrQ@QEQEEPQ@@jEyoםtV..&Pe=-8KmDpν VF,>?#H,6jէ,GL1įWPzYy*Jr~9̜\%\L.*gב7]0SۣgBxxMClؖewqN(Np9KLVN%5<kx)MwdG᥏*w^CK(U6[d)?`u/#I슲b5_Xe{D xs_5;IuRJ8!kGSَ37[;d[6~<: Sf-EuC˖ _Y894VֺM³薛sV5 rV:Ur|j]%(O`m>gN8qQ[Ϻ:5ֻx º!TQ\fN&\]/`uЄ9 Wrq?v0 H'+ /U'5[E}ڀ"\Q7mڤvR$grFp}NiVy/9>z6b9*n=)$Hlj5k !]HBBFJyݫGtIڃ 't;Gstߎ8KKjMuO-?ҷCC7Op~TКM_n;ZGm{ ) >EKywsHge[[Y?V:r ryk;mW禓e*WAOrhi9r4-D%*𤻪[};+Z'j]T^ۺۮL¸9@q `+pq6Nd$6[BSgiOrbԆ\4=jts|O<+YJm.ǹeKJ [i+$*8e,- BӜ ZrkZcqdm %hǞq"='J 1,=eo}. T$sRr <4v!?Xc֫=GSl uq8'/i9leDCw2m wg#ICy#>>jlf{mSIuVOM(u\wٱe@mZ} IRRJ~u[!/BBR=_.b%LEs((( (( C(Uyv$f^.$7}3^ Rb1ҜJ;N~+JQTY}y˱?Jri'PBYJ@8O E5 R(𥦞1ZC'_+G꣩+ɒ)v}Ϥ d5m*Fˌk{[zC(t.#4Jj9*''Cʟ2UYqUJ>tǥjH]QQǁ;qG<B~M[s\МSKߔF\8_eJ*;7Dr6rU8 m*(NҵRڜ>~"'I5Q1wR N&o[BМ_tcү׷ G.KJԄ76wsL((tfpǘ"@JO rFjۅSma|ͳ̖ɈJ$,3,S}X-IH&yRvY-'|9B++^$gp)SJ?Sj8;cG~Z'<+Nk E c{r V05w"XzrDG]V3sQ?t'qq<YjMP.lm*UbZBQ)OO>NjX> +v;sە"B БF;dqGىA$56VwW~:e躊\e¶ԟJGN)|⻦ֱԜ95Sl{5G(ug֑ ^-PGxroh*.zF=v" tR y9/].n~*&esB7%/!m L<1y)lh tT˰ё/>uqL\R- D8ũ;2t_&4irsc vT|n;v<2xQ*π5M7c6H[=,>R=` mV)'4%Y)8d~؍1O&fmn)JZJQHY𨚢*JQ@QEQEEPQ@VQtJ6v4hF8פp"؟5;Uy}Ф %g Qx~!r{r#R'ۃRu VX܋M2Xx yF\8JQv כCqѵ6=N^[Ц>~5qZTQo5 BӂwxIĄ*mu[Ĥ!9O$1U s('+Jz t> 2C-Z=R*mĖn y AK"Z]Cm J H@ʇʥf%}6\eIT1& >eDNoqTPG&)%,gy Y*vZ:c467Ѕ RTڮHvbzJ946RXiǤIW(m'֡.2գ vTޝ]Zm;3hI=F$h2-_MG8SO0˥ZM͎O*; ]Gizvc7p%}LeCϊIu35u+6K^otZh|VL3ػ1eZ{OHXޛD~[HQе%x"#*)Y#Rݟ4f<¢) ghr~M6t[{Ka?eh0b5oa(A<'4&Ω}p=ʞWwHg?*27/é X9S؝m{6a\y{2t8 [ԮcV86^ yWxNx W!7*lbJ1G aTlMjpgdz[.qż p<\ hhlVP8 Ǖa9Z7iy6ؽܩB V%˅<d56IJP!$s«fZzskhmxHinV[O\dծ3M-hmJaTz#8ijJJqtMe˅,wvzsZ@'֨ڗq'++RD89YIVGZI7(=*}[BWv`x*i~В~qSRg%*xp1ЏRDmJ- kY-c4R ˾ \>!|TXDwZnc%mR$,z#s*"wc-A%]TkRy;Yp*{E|3L _Ha%A?yAYFƋYMA|P\tٞ3Ǡ$ՊUI[zP~zBcU .R%L6i*zI /nq.G ̌=è5ˬjȶICt(XWdV$%KA8qEDS_vuV/o6r @qKR:$ҟ-yu.MDR$q%K/ :H[_yߩ-\m= Jdv -5׌+rj\卮c{H['jqhIPڴA^8')D~U+/}7ӕ>:݄. tPs^Gn{?hn+OHE%B( ((((*l>H}_^]r<]z[/djXUQ^vOT_17ؿ.Q#a\wDw%'9ŜBH&7ا+#Z3 < yЌA2|}0V9k)(gvTovT;JA _#ֺuCS:´2$ :0#[qCA #HQԚ G{$ ,rh-@r:<+c rCn-P#!?*gģ|A3J[-> yUhJTTj>+DtciVƮB-QW㯥}F ^ŋBDհj _"iI*e]Ӧ5t e a0 %@uN)ŕRSJ&]7d/v\wАI$SNv/g=E$KLH>|Kemn}i֧_u-?VeDN[i:FUKm*)oiOexrxԪ"WN+Ct~"^]զ{r93NT蹖~eikOJ(4P*zחp6u rlW$YObC"%Y#}[qIےv1SSRuP=I"~-Z[3!Xr4AmuYI.~dOE$8P1K봶JiF2[ݸ^*V9UA)U͢*JQ@QEQEEPQ@QCYCC-A^| X ngWGV?):rD~=Iӏ;8X[隁CB.qH+m\MP(kԊW2_)?1pAAi&.[0[}i֋\M5#?*Ś9$=|]+ZQB- #zCjϠ+r$i t()G 'vPp%dohPg [9--[ P>p+ح8Vӗ %FE/S>;htޮ.-6Ɋ8HkO۷)(WV)Q\rT9 c>t)P{MEC:Fٜ[-N9&3oB;`ZM˱.j3 {A!~$URkGmuޙN Y1>֧&0P*aJ 'MgeYvrT)=$_v~n"{'#Wuil)܁1RӢ-5cRdZHɷFLw`d|N}jœ#O͖X^Ԗ$1);ڽ+ ̧_×=;6}<SHlv<Wkl O'̎=3W*6Q#[.-4FD^/ƖRxIqЊ7QvGBfZCh@TIy& nJ$+JN*nڛÈSBqw tI@MM%ޣ~re!:XRYَٗt:mLp_RJ!PIJ'vǕ_Gڼ)?HH'ȄOWW3nkHPWZ!8 ^z*"6WۃJa0Iʕ*)nMP^`)^3:TcFv<Ǻ=|!V*%Kq)G~fA(A>:T9Ap~B|iʀx0ҁTv #'m$HPDiQA隬~4L ]BRaM,}o^¤ w/oﮇ2Kmꢊ*aEPQ@QEQEEP .U{9,p;0u+䥟Mۿ~I?•+ ٱ; ׊:8YrW/FZW>+Xu╴xjs#(F ܋jsf4pq㚼r^fޤ!$8NO}M\RV*+O*?VLu\nmZrczx )@=q_C=Tkzm97q*V-kzJ+Ghc[@eil-xI\皇رi Zϼ”<՚'H=ˊ,b$Th }6#<$z֖gZMK.,:37R|a9?)}z)M;>U$8r9%;FjjfP7ي9rSRSTc;o* %|:I۸pHqPJ~D;S": V2sP>`-*mk 8oѢ<d-hn )߇C֘k}械)^ڴ 0d4pI<ⴉIh'nZF4nѥH~[P<{AiǏ~uee+WHWolaKy}v>HIo -EظqL8- eJG#rܯ@x֚NH*VظRC)7)ΘEК5[Ӭ4 ۛ)I?!cv?>u~k-HqLLvRe (IRO$nȔh.m0JXq>͌`NIHdž攷HY 땊BR\|w$U6RW;JPa@RQ&tP9)i!hm Ҕ)ݟt(~V6ґOJt2#Ga m u >]i\%IZiZFj@Pq.(Y)Ö[W&c$gJu՟ rb>|ǜ?rxIXBV*D+p_qEU( ((((ݸ'JiYrhL e ?j)mT\f2N±Œs+@R5up؟֗ܰP+VL=]ya)q=:x(𤎹BS.x20xG590zG?V)YH;ASkm8 琤Tv`$$ V6f m8k x2z8v<6:娦1R,yxh-J.jFMaiGβӺa]nɞI#G>*fiFfj{ Jf[g/^YXRx_K Bg)z=ÆIe'O1}IE]\M?fT< ~u# qmq\]?i {RFAizZ'52ߴm˫+؁)@< -J/]X֭Y厠^M3Fͺi&”i{nD7ЏO*6ꛟYǓ䅌1yxz3L[-temMrW(TOJE7+iFL]tN_|KmvT_i:&=R{͌\a¤;|Q#<׳CӣDp̨Q`j]\b܏uo:rVjꘃ'8›:[Z5]=ub/'|Vw:+9&X"2zO=0iS .\)p̎KG>ZOFz OBjao3n0vdyA=O¼x@@AKV͵+vgIiij6ݖLD2D9Ηj00ݢKGqWݩvdҢ%obJ`̸ܙmoa/|M?aÉgHu5ExڏGU+Pt&oBw;b+ދ?r8%i?Tjz~Ao^1gJ3n) S.:*Qe]TOΙnul5=皟n譴 Z9iNe' g&:%ua==I~dXi}jߒiQHqV9L@U1&zYM<5,8#t֥s[-kgJ ܐj+Ṙ{2"$^+ўEh`QEQEEPQ@QE h]HmQ0_j9z'~ԉTiUezxif{k?hdkiԓ)N~|~Kgg9wj5{RK<8p6aD&ƹ~4 +5ʵh"e7H%R,MM1k߯tmZnj-eFQ 䝇IQU RY[ON!Č֗n7ƒ%8#\˒4Ԥ^J ~-VIEړrc/.3k>ΘL9c\͐ZňDNr}p jNޥkipj>Ӆz m{BN~ kҽ 9-$ppuH_EZΚEy[^JPl%u.PYZ Vv(x\uQW}\Rr=i=qS:Y;r|/߻Ԯ+\ q(ZSG5foMvT\rO9;UrH#8ǝ!խ=W}/xֶBm+O)BW'-#M;de}֔1{G\sTMPRLH!n!8u %)J-%'n zyf^Xy@Q}}?>BsmUU{ %!mcjez>'}= phrKKN ?ҡ53m7~*$HСAiA  r>-ZfcS VJ.HMŖR 9"J}OZyvk4:ȮX*CWk-P!;t ֕jђu¿G[Bq+H>]a =vU?:N0"ʙ!(*B ’ﰫB[T`l>w;ʳMԚ]b( (((( (V+.L,4O4O/T;O%Vw3I uP{ki?scG4v->O?XKvTδRȭ jew)Khcێε}/FI]{Ugmyh (kб;Ӑm\nI);Uݩ['=-K1}*ڦː#+9E汚;ܾiԯj SW\%N,Z~d%L:2₈*v~jnae4S㹨-cۙVcF^ХR}ԁ$Vrm2M&Z}LibLup"Hǂ?/ j7H*璐 #H=VPy\۪pAyA/cj:dUseE;\|,6>#όxIja2'ʛ%y g>?  / S\;U(ztǖ X^<LjW#JݩOb=Cn(n(͚vC@\n&r,nS$|s$gƫzŶ2ZBTzɢzJ.lI%8!SRx\/7Q}d{eOTJ)W}b*d[){6-*>>cUXHU,mt0JG`|:;tz%lv ?H 9*{<;aMsfHJvIm([j%0Z&!!#v:$ t  5t[&}te+A?$l((( (((om Nj (>Ru )$V0GB)G"_Ɓ`eof:*y5^1/7w.? u+tZ:BoS I^1`, I'd~tHCa^PBzNvwO8EAXA9L2 $<@>Iڍ@*WκR2=9J,pU慗š=iR h Vfˡ_R2z|I5^~Cv ۏ"*Jhp٧hlMDY"@3(;{d5C7n)qSJT> e:\jyl:JbK(Y:װ.kuFJ=!kYq*;|AnvmVwRjP[ܣmr ;q[JĥC/X.g;g&- lxT-\)NTyuq>gNLڔG^d#oy<|zUȕ6'γJ\.?*K$AqUJ=,;5m갫4uI!JDNjCǐƬi4 * **).+#KZą<8EvʆB6(*L%E L8@ܴ,`}H @ !jN<2kbH @^sOc*%*\6P2tcbxykJ?iI>Tۉqr|q[l+&C[^:͛AOOCtUlHνMn ]/(E6h'ke63t󀷽ICd4$,pjw e$Gm^NyώIqmrvw'5hҭk$ /2Nʱ :蝚i->'c`q֗䌩{Š!Um7Q6HQ =i]fYze:ӕz`㠪qvtA \ 8`|(\ڛ+BS!2Vwy(* 8ʫYV5ñOKY820ARd2cږ`6ܶZBPpryZ#ۛIR!l9ߐ]~\)>]}N4pQRJ\V$5]TN;mI)B]I I@Vr}¢{dҊV \iV°f% >=8:Q@((( (('D5hnrv¡,AUm%ȗ"T|P +u[Aj<-6SI{i>J_9{t<12㩧w~MÓWߥ٦~ad<_K) m )IϥR;I,Nˇ1)ϥhhФry@f$r[Vߕ>?Xcq޼U J@f& v$y^~W ha%A.) 㚖@끯q2f;1-1=Iڂz%$WOnSò RԻlmjJ6=SFq)/iZQKcZwB7y)Bs+LK:po wXq\hmh+QN}x%~\!@)DO^9vsQݒy1ZV\C pTGrQeEm>9 Q t㚚e jSyJu?3QĈ 7ԙzORUji3kvRO,)ْ *#ݢ}+mї. 4]})} %7"Zn,Kޕh6Vp2LW,j`~1rp4A8|=*-/eMSzCIVZQ@ڇ@ƭ.)`OmJ7a[rxAֲ_2/).V]AH})*G@+-~Rk$NSμCm6sq#\!Vբ!޲x@9j }C "c]#  kfcND$r;ڛRquH6dx,6%'yjinѽ5IBͱӣhDkw'Aȩ${˛mK|\ Ԡ@KlTO5Tɍ+&} EPQ@QEQE;+/>R7&N1-> ڼQK渞 (=o/EρJqW/RBK8f2%Fr:c@JF7|4N{O$գԉlt$ۏgq+T&o!J* =<~KP =/3-~ TU 6M.;3-JD.*[ݰx rID6Xcoջ=|1[ER\w{$m@lzK)61}< SLufԷ,ktWvC/rI$d8ljsگ_'.B[,!$%I9P$ K5*h᫾u8)'#ίN'NMuB܏1Rq[2R9.ZD-qhan&IS}II'ǧ^}j{P;xO.77k|gX\g4(r[ GJjA98K{ODpG5IuhIN1&rGV>€ijLdoR`"nbR{*L ٿo3jcFVYŹ"GRio 'sry$ 0b:҈jB6+q)O=?.ug%eJ};ф$NyӶXҢCqևNBs|kUC']eܰFm=* ]nrY .Bd6:ulĸ#s>*OH RJ QNsZ"#X[7\d8 7 76`nJx}*)`ohtvAQrO5Q@~b"owq㼨uN_7yPiH ldCg*8<Ղ-HZ[m╞v qS@\f;8ݍiVƣq J)'QNTh 6S`oSBEpNaKMrKs N*|jq#YP4 wzxWsA$P '=zu>YVDŶvQrRR@lz'ֻ\ udx]o0q%'!C,Q* B^ȊjrTDw;!X5 rB %j^wճ涙&nꕏӣH:uE8i Hc? cߩ?%Qq*N`(( ((.r m9 inE=y% oRGW_'$|* 44},GHRDVl^u\ʾL/B3~b(ŕ85x֨I)5ޞ"#) pOnyZ[PKP9‡Ny3gW K&W8 Q°Dwطμjʖ{J8[u?ޗRtz Zt{i,2 H>9R*Ö%BLSKKja\9@oP|)bDHDL>_ګ]*$rkpn9%gzĎ1]Ytc 1쀞ev@qʑܣɜ&*f@Is0kѯiҘqZ(%9# *s@ҩe7G-VJLCzJILhXجk"*VK A1,*-0aH/iEN }+Q*RjtmvSmR*N˨U MZj yN njxP:79]߅*(M}Hm#j^!JOI*/Mk-f-J\qCDӎ|T &ձ $oX[?5-P,&d4ld{{TMޅ̢ӷU٢d[=ؐ!xuGg@pʊV56i| NAP >?[KO@~u07^c/GWH-:9V7#ư 2w'օ3-κXԂܻk&VIg"0ř CJ;rxϨm8BVาI8axAZ.:Āy-gj%8|25PyP6.P;V˥+zc(:8;xbk&t͛K}{H{V//&c\ny+)n'rR9VѺqzz47EuW{vB4$#l$}/ [!G51,ies{/QE@ (((Jvk=jROEc:sYZ]aMӡd*Q )ug넎W7}Zy(*Mn=*ÍHi/o2@皏vr%ʴok w7#W q[+04s"&_CxcZr}i^(6wHuy&Дec*Wxy|P[uSm'!].}YS1XgBrkK9yWW&$-s ] (n3xLfǨ[GQQtnp;T `+㛗VVLdc>_xVzĺ7nTj͘0#Ƈ Xȗ;e9II8NƇÚa&x̥?`"Ql05 NS_/ZIhOڇR4 F,KNٖK/gwzp: Vijq_Y?ƦJeK͇FBRoLɎoo7eo$`l+Js[jS S9a==āKCR_,nmW7pE%MnJTuqQvDBqc2Ir~~uZV&N^Ng]NBQaxf>q;[oA_N=עV> B ξ,tOVQNqA>\mٮ8?7KN3wL ;Mf'8KFWϡf\/$m/<~5b֠G9b㪢J[LӫX;#o)mwp0\OβUG!{Iu_cyO<]W;ќY;BZ=p纥h> '*i.vZ\S?Ʒ&pVy#疮MبH-Ek5eT;f{lwk7aT^H͓9-:J1Vs=SsS+<0Mꗑuq E6o*Zub g8pS GC|jɥtJ7!v>> exchange = Exchange('tasks', 'direct') >>> connection = Connection() >>> bound_exchange = exchange(connection) >>> bound_exchange.delete() # the original exchange is not affected, and stays unbound. >>> exchange.delete() raise NotBoundError: Can't call delete on Exchange not bound to a channel. Terminology =========== There are some concepts you should be familiar with before starting: * Producers Producers sends messages to an exchange. * Exchanges Messages are sent to exchanges. Exchanges are named and can be configured to use one of several routing algorithms. The exchange routes the messages to consumers by matching the routing key in the message with the routing key the consumer provides when binding to the exchange. * Consumers A consumer declares a queue, binds it to an exchange and receives messages from it. * Queues Queues receive messages sent to exchanges. The queues are declared by consumers. * Routing keys Every message has a routing key. The interpretation of the routing key depends on the exchange type. There are four default exchange types defined by the AMQP standard, and vendors can define custom types (so see your vendors manual for details). These are the default exchange types defined by AMQP/0.8: * Direct exchange Matches if the routing key property of the message and the `routing_key` attribute of the consumer are identical. * Fan-out exchange Always matches, even if the binding does not have a routing key. * Topic exchange Matches the routing key property of the message by a primitive pattern matching scheme. The message routing key then consists of words separated by dots (`"."`, like domain names), and two special characters are available; star (`"*"`) and hash (`"#"`). The star matches any word, and the hash matches zero or more words. For example `"*.stock.#"` matches the routing keys `"usd.stock"` and `"eur.stock.db"` but not `"stock.nasdaq"`. kombu-5.5.3/docs/includes/resources.txt000066400000000000000000000013261477772317200201750ustar00rootroot00000000000000Getting Help ============ Mailing list ------------ Join the `carrot-users`_ mailing list. .. _`carrot-users`: http://groups.google.com/group/carrot-users/ Bug tracker =========== If you have any suggestions, bug reports or annoyances please report them to our issue tracker at http://github.com/celery/kombu/issues/ Contributing ============ Development of `Kombu` happens at Github: http://github.com/celery/kombu You are highly encouraged to participate in the development. If you don't like Github (for some reason) you're welcome to send regular patches. License ======= This software is licensed under the `New BSD License`. See the `LICENSE` file in the top distribution directory for the full license text. kombu-5.5.3/docs/index.rst000066400000000000000000000004571477772317200154610ustar00rootroot00000000000000Kombu Documentation ================================== Contents: .. toctree:: :maxdepth: 2 introduction userguide/index .. toctree:: :maxdepth: 1 faq reference/index changelog Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` kombu-5.5.3/docs/introduction.rst000066400000000000000000000003301477772317200170610ustar00rootroot00000000000000======================================== Getting Started ======================================== .. include:: includes/introduction.txt .. include:: includes/installation.txt .. include:: includes/resources.txt kombu-5.5.3/docs/make.bat000066400000000000000000000164641477772317200152320ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. epub3 to make an epub3 echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 1>NUL 2>NUL if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PROJ.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PROJ.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "epub3" ( %SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3 if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub3 file is in %BUILDDIR%/epub3. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end kombu-5.5.3/docs/reference/000077500000000000000000000000001477772317200155505ustar00rootroot00000000000000kombu-5.5.3/docs/reference/index.rst000066400000000000000000000041121477772317200174070ustar00rootroot00000000000000=========================== API Reference =========================== :Release: |version| :Date: |today| Kombu Core ========== .. toctree:: :maxdepth: 1 kombu kombu.common kombu.matcher kombu.mixins kombu.simple kombu.clocks kombu.compat kombu.pidbox kombu.exceptions kombu.log kombu.connection kombu.message kombu.compression kombu.pools kombu.abstract kombu.resource kombu.serialization kombu.native_delayed_delivery Kombu Transports ================ .. toctree:: :maxdepth: 1 kombu.transport kombu.transport.base kombu.transport.virtual kombu.transport.virtual.exchange kombu.transport.azurestoragequeues kombu.transport.azureservicebus kombu.transport.pyamqp kombu.transport.librabbitmq kombu.transport.qpid kombu.transport.memory kombu.transport.redis kombu.transport.mongodb kombu.transport.consul kombu.transport.etcd kombu.transport.zookeeper kombu.transport.filesystem kombu.transport.sqlalchemy kombu.transport.SQS kombu.transport.SLMQ kombu.transport.pyro Kombu Asynchronous ================== .. toctree:: :maxdepth: 1 kombu.asynchronous kombu.asynchronous.hub kombu.asynchronous.semaphore kombu.asynchronous.timer kombu.asynchronous.debug kombu.asynchronous.http kombu.asynchronous.http.base kombu.asynchronous.http.urllib3_client kombu.asynchronous.aws kombu.asynchronous.aws.connection kombu.asynchronous.aws.sqs kombu.asynchronous.aws.sqs.connection kombu.asynchronous.aws.sqs.message kombu.asynchronous.aws.sqs.queue Kombu utils =========== .. toctree:: :maxdepth: 1 kombu.utils.amq_manager kombu.utils.collections kombu.utils.compat kombu.utils.debug kombu.utils.div kombu.utils.encoding kombu.utils.eventio kombu.utils.functional kombu.utils.imports kombu.utils.json kombu.utils.limits kombu.utils.objects kombu.utils.scheduling kombu.utils.text kombu.utils.time kombu.utils.url kombu.utils.uuid kombu-5.5.3/docs/reference/kombu.abstract.rst000066400000000000000000000004551477772317200212250ustar00rootroot00000000000000======================================= Abstract Classes - ``kombu.abstract`` ======================================= .. currentmodule:: kombu.abstract .. automodule:: kombu.abstract .. contents:: :local: .. autoclass:: MaybeChannelBound :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.aws.connection.rst000066400000000000000000000005421477772317200251010ustar00rootroot00000000000000============================================================== Amazon AWS Connection - ``kombu.asynchronous.aws.connection`` ============================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.aws.connection .. automodule:: kombu.asynchronous.aws.connection :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.aws.rst000066400000000000000000000004731477772317200227460ustar00rootroot00000000000000========================================================== Async Amazon AWS Client - ``kombu.asynchronous.aws`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.aws .. automodule:: kombu.asynchronous.aws :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.aws.sqs.connection.rst000066400000000000000000000005411477772317200257050ustar00rootroot00000000000000=========================================================== SQS Connection - ``kombu.asynchronous.aws.sqs.connection`` =========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.aws.sqs.connection .. automodule:: kombu.asynchronous.aws.sqs.connection :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.aws.sqs.message.rst000066400000000000000000000005241477772317200251730ustar00rootroot00000000000000========================================================== SQS Messages - ``kombu.asynchronous.aws.sqs.message`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.aws.sqs.message .. automodule:: kombu.asynchronous.aws.sqs.message :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.aws.sqs.queue.rst000066400000000000000000000005141477772317200246720ustar00rootroot00000000000000========================================================== SQS Queues - ``kombu.asynchronous.aws.sqs.queue`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.aws.sqs.queue .. automodule:: kombu.asynchronous.aws.sqs.queue :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.aws.sqs.rst000066400000000000000000000005071477772317200235510ustar00rootroot00000000000000========================================================== Async Amazon SQS Client - ``kombu.asynchronous.aws.sqs`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.aws.sqs .. automodule:: kombu.asynchronous.aws.sqs :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.debug.rst000066400000000000000000000005041477772317200232350ustar00rootroot00000000000000========================================================== Event Loop Debugging Utils - ``kombu.asynchronous.debug`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.debug .. automodule:: kombu.asynchronous.debug :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.http.base.rst000066400000000000000000000005331477772317200240410ustar00rootroot00000000000000=============================================================== Async HTTP Client Interface - ``kombu.asynchronous.http.base`` =============================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.http.base .. automodule:: kombu.asynchronous.http.base :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.http.curl.rst000066400000000000000000000005221477772317200240720ustar00rootroot00000000000000============================================================ Async pyCurl HTTP Client - ``kombu.asynchronous.http.curl`` ============================================================ .. contents:: :local: .. currentmodule:: kombu.asynchronous.http.curl .. automodule:: kombu.asynchronous.http.curl :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.http.rst000066400000000000000000000004701477772317200231300ustar00rootroot00000000000000========================================================== Async HTTP Client - ``kombu.asynchronous.http`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.http .. automodule:: kombu.asynchronous.http :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.http.urllib3_client.rst000066400000000000000000000005601477772317200260410ustar00rootroot00000000000000============================================================ Urllib3 HTTP Client Pool - ``kombu.asynchronous.http.urllib3_client`` ============================================================ .. contents:: :local: .. currentmodule:: kombu.asynchronous.http.urllib3_client .. automodule:: kombu.asynchronous.http.urllib3_client :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.hub.rst000066400000000000000000000004751477772317200227340ustar00rootroot00000000000000========================================================== Event Loop Implementation - ``kombu.asynchronous.hub`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.hub .. automodule:: kombu.asynchronous.hub :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.rst000066400000000000000000000004421477772317200221510ustar00rootroot00000000000000========================================================== Event Loop - ``kombu.asynchronous`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous .. automodule:: kombu.asynchronous :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.semaphore.rst000066400000000000000000000005001477772317200241260ustar00rootroot00000000000000========================================================== Semaphores - ``kombu.asynchronous.semaphore`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.semaphore .. automodule:: kombu.asynchronous.semaphore :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.asynchronous.timer.rst000066400000000000000000000004571477772317200232760ustar00rootroot00000000000000========================================================== Timer - ``kombu.asynchronous.timer`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.asynchronous.timer .. automodule:: kombu.asynchronous.timer :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.clocks.rst000066400000000000000000000004501477772317200206730ustar00rootroot00000000000000========================================================== Logical Clocks and Synchronization - ``kombu.clocks`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.clocks .. automodule:: kombu.clocks :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.common.rst000066400000000000000000000004261477772317200207100ustar00rootroot00000000000000========================================================== Common Utilities - ``kombu.common`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.common .. automodule:: kombu.common :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.compat.rst000066400000000000000000000013511477772317200207010ustar00rootroot00000000000000========================================== Carrot Compatibility - ``kombu.compat`` ========================================== .. currentmodule:: kombu.compat .. automodule:: kombu.compat .. contents:: :local: Publisher --------- Replace with :class:`kombu.Producer`. .. autoclass:: Publisher :members: :undoc-members: :inherited-members: Consumer -------- Replace with :class:`kombu.Consumer`. .. autoclass:: Consumer :members: :undoc-members: :inherited-members: ConsumerSet ----------- Replace with :class:`kombu.Consumer`. .. autoclass:: ConsumerSet :members: :undoc-members: :inherited-members: kombu-5.5.3/docs/reference/kombu.compression.rst000066400000000000000000000010011477772317200217470ustar00rootroot00000000000000============================================= Message Compression - ``kombu.compression`` ============================================= .. currentmodule:: kombu.compression .. automodule:: kombu.compression .. contents:: :local: Encoding/decoding ----------------- .. autofunction:: compress .. autofunction:: decompress Registry -------- .. autofunction:: encoders .. autofunction:: get_encoder .. autofunction:: get_decoder .. autofunction:: register kombu-5.5.3/docs/reference/kombu.connection.rst000066400000000000000000000015641477772317200215630ustar00rootroot00000000000000======================================= Connection - ``kombu.connection`` ======================================= .. currentmodule:: kombu.connection .. automodule:: kombu.connection .. contents:: :local: Connection ---------- .. autoclass:: Connection :members: :undoc-members: Pools ----- .. seealso:: The shortcut methods :meth:`Connection.Pool` and :meth:`Connection.ChannelPool` is the recommended way to instantiate these classes. .. autoclass:: ConnectionPool .. autoattribute:: LimitExceeded .. automethod:: acquire .. automethod:: release .. automethod:: force_close_all .. autoclass:: ChannelPool .. autoattribute:: LimitExceeded .. automethod:: acquire .. automethod:: release .. automethod:: force_close_all kombu-5.5.3/docs/reference/kombu.exceptions.rst000066400000000000000000000007151477772317200216020ustar00rootroot00000000000000===================================== Exceptions - ``kombu.exceptions`` ===================================== .. currentmodule:: kombu.exceptions .. automodule:: kombu.exceptions .. contents:: :local: .. autoexception:: NotBoundError .. autoexception:: MessageStateError .. autoexception:: TimeoutError .. autoexception:: LimitExceeded .. autoexception:: ConnectionLimitExceeded .. autoexception:: ChannelLimitExceeded kombu-5.5.3/docs/reference/kombu.log.rst000066400000000000000000000004041477772317200201750ustar00rootroot00000000000000========================================================== Logging - ``kombu.log`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.log .. automodule:: kombu.log :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.matcher.rst000066400000000000000000000004121477772317200210360ustar00rootroot00000000000000============================================== Pattern matching registry - ``kombu.matcher`` ============================================== .. contents:: :local: .. currentmodule:: kombu.matcher .. automodule:: kombu.matcher :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.message.rst000066400000000000000000000004301477772317200210370ustar00rootroot00000000000000========================================================== Message Objects - ``kombu.message`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.message .. automodule:: kombu.message :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.mixins.rst000066400000000000000000000004231477772317200207240ustar00rootroot00000000000000========================================================== Mixin Classes - ``kombu.mixins`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.mixins .. automodule:: kombu.mixins :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.pidbox.rst000066400000000000000000000044371477772317200207130ustar00rootroot00000000000000========================================= Pidbox - ``kombu.pidbox`` ========================================= .. currentmodule:: kombu.pidbox .. automodule:: kombu.pidbox .. contents:: :local: Introduction ------------ Creating the applications Mailbox ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python >>> mailbox = pidbox.Mailbox('celerybeat', type='direct') >>> @mailbox.handler >>> def reload_schedule(state, **kwargs): ... state['beat'].reload_schedule() >>> @mailbox.handler >>> def connection_info(state, **kwargs): ... return {'connection': state['connection'].info()} Example Node ~~~~~~~~~~~~ .. code-block:: python >>> connection = kombu.Connection() >>> state = {'beat': beat, 'connection': connection} >>> consumer = mailbox(connection).Node(hostname).listen() >>> try: ... while True: ... connection.drain_events(timeout=1) ... finally: ... consumer.cancel() Example Client ~~~~~~~~~~~~~~ .. code-block:: python >>> mailbox.cast('reload_schedule') # cast is async. >>> info = celerybeat.call('connection_info', timeout=1) Mailbox ------- .. autoclass:: Mailbox .. autoattribute:: namespace .. autoattribute:: connection .. autoattribute:: type .. autoattribute:: exchange .. autoattribute:: reply_exchange .. automethod:: Node .. automethod:: call .. automethod:: cast .. automethod:: abcast .. automethod:: multi_call .. automethod:: get_reply_queue .. automethod:: get_queue Node ---- .. autoclass:: Node .. autoattribute:: hostname .. autoattribute:: mailbox .. autoattribute:: handlers .. autoattribute:: state .. autoattribute:: channel .. automethod:: Consumer .. automethod:: handler .. automethod:: listen .. automethod:: dispatch .. automethod:: dispatch_from_message .. automethod:: handle_call .. automethod:: handle_cast .. automethod:: handle .. automethod:: handle_message .. automethod:: reply kombu-5.5.3/docs/reference/kombu.pools.rst000066400000000000000000000004341477772317200205530ustar00rootroot00000000000000========================================================== Connection/Producer Pools - ``kombu.pools`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.pools .. automodule:: kombu.pools :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.resource.rst000066400000000000000000000004371477772317200212510ustar00rootroot00000000000000========================================================== Resource Management - ``kombu.resource`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.resource .. automodule:: kombu.resource :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.rst000066400000000000000000000136171477772317200174270ustar00rootroot00000000000000=================================== Kombu - ``kombu`` =================================== .. currentmodule:: kombu .. contents:: :local: .. automodule:: kombu .. autofunction:: enable_insecure_serializers .. autofunction:: disable_insecure_serializers Connection ---------- .. autoclass:: Connection .. admonition:: Attributes .. autoattribute:: hostname .. autoattribute:: port .. autoattribute:: userid .. autoattribute:: password .. autoattribute:: virtual_host .. autoattribute:: ssl .. autoattribute:: login_method .. autoattribute:: failover_strategy .. autoattribute:: connect_timeout .. autoattribute:: heartbeat .. autoattribute:: default_channel .. autoattribute:: connected .. autoattribute:: recoverable_connection_errors .. autoattribute:: recoverable_channel_errors .. autoattribute:: connection_errors .. autoattribute:: channel_errors .. autoattribute:: transport .. autoattribute:: connection .. autoattribute:: uri_prefix .. autoattribute:: declared_entities .. autoattribute:: cycle .. autoattribute:: host .. autoattribute:: manager .. autoattribute:: supports_heartbeats .. autoattribute:: is_evented .. admonition:: Methods .. automethod:: as_uri .. automethod:: connect .. automethod:: channel .. automethod:: drain_events .. automethod:: release .. automethod:: autoretry .. automethod:: ensure_connection .. automethod:: ensure .. automethod:: revive .. automethod:: create_transport .. automethod:: get_transport_cls .. automethod:: clone .. automethod:: info .. automethod:: switch .. automethod:: maybe_switch_next .. automethod:: heartbeat_check .. automethod:: maybe_close_channel .. automethod:: register_with_event_loop .. automethod:: close .. automethod:: _close .. automethod:: completes_cycle .. automethod:: get_manager .. automethod:: Producer .. automethod:: Consumer .. automethod:: Pool .. automethod:: ChannelPool .. automethod:: SimpleQueue .. automethod:: SimpleBuffer Exchange -------- Example creating an exchange declaration:: >>> news_exchange = Exchange('news', type='topic') For now `news_exchange` is just a declaration, you can't perform actions on it. It just describes the name and options for the exchange. The exchange can be bound or unbound. Bound means the exchange is associated with a channel and operations can be performed on it. To bind the exchange you call the exchange with the channel as argument:: >>> bound_exchange = news_exchange(channel) Now you can perform operations like :meth:`declare` or :meth:`delete`:: >>> # Declare exchange manually >>> bound_exchange.declare() >>> # Publish raw string message using low-level exchange API >>> bound_exchange.publish( ... 'Cure for cancer found!', ... routing_key='news.science', ... ) >>> # Delete exchange. >>> bound_exchange.delete() .. autoclass:: Exchange :members: :undoc-members: .. automethod:: maybe_bind Queue ----- Example creating a queue using our exchange in the :class:`Exchange` example:: >>> science_news = Queue('science_news', ... exchange=news_exchange, ... routing_key='news.science') For now `science_news` is just a declaration, you can't perform actions on it. It just describes the name and options for the queue. The queue can be bound or unbound. Bound means the queue is associated with a channel and operations can be performed on it. To bind the queue you call the queue instance with the channel as an argument:: >>> bound_science_news = science_news(channel) Now you can perform operations like :meth:`declare` or :meth:`purge`: .. code-block:: python >>> bound_science_news.declare() >>> bound_science_news.purge() >>> bound_science_news.delete() .. autoclass:: Queue :members: :undoc-members: .. automethod:: maybe_bind Message Producer ---------------- .. autoclass:: Producer .. autoattribute:: channel .. autoattribute:: exchange .. autoattribute:: routing_key .. autoattribute:: serializer .. autoattribute:: compression .. autoattribute:: auto_declare .. autoattribute:: on_return .. autoattribute:: connection .. automethod:: declare .. automethod:: maybe_declare .. automethod:: publish .. automethod:: revive Message Consumer ---------------- .. autoclass:: Consumer .. autoattribute:: channel .. autoattribute:: queues .. autoattribute:: no_ack .. autoattribute:: auto_declare .. autoattribute:: callbacks .. autoattribute:: on_message .. autoattribute:: on_decode_error .. autoattribute:: connection .. automethod:: declare .. automethod:: register_callback .. automethod:: add_queue .. automethod:: consume .. automethod:: cancel .. automethod:: cancel_by_queue .. automethod:: consuming_from .. automethod:: purge .. automethod:: flow .. automethod:: qos .. automethod:: recover .. automethod:: receive .. automethod:: revive kombu-5.5.3/docs/reference/kombu.serialization.rst000066400000000000000000000021361477772317200222750ustar00rootroot00000000000000================================================ Message Serialization - ``kombu.serialization`` ================================================ .. currentmodule:: kombu.serialization .. automodule:: kombu.serialization .. contents:: :local: Overview -------- Centralized support for encoding/decoding of data structures. Contains json, pickle, msgpack, and yaml serializers. Optionally installs support for YAML if the `PyYAML`_ package is installed. Optionally installs support for `msgpack`_ if the `msgpack-python`_ package is installed. Exceptions ---------- .. autoexception:: SerializerNotInstalled Serialization ------------- .. autofunction:: dumps .. autofunction:: loads .. autofunction:: raw_encode Registry -------- .. autofunction:: register .. autofunction:: unregister .. autodata:: registry .. _`Python 2.7+`: https://docs.python.org/library/json.html .. _`PyYAML`: https://pyyaml.org/ .. _`msgpack`: https://msgpack.org/ .. _`msgpack-python`: https://pypi.org/project/msgpack-python/ kombu-5.5.3/docs/reference/kombu.simple.rst000066400000000000000000000037751477772317200207230ustar00rootroot00000000000000=============================================== Simple Messaging API - ``kombu.simple`` =============================================== .. currentmodule:: kombu.simple .. automodule:: kombu.simple .. contents:: :local: Persistent ---------- .. autoclass:: SimpleQueue .. attribute:: channel Current channel .. attribute:: producer :class:`~kombu.Producer` used to publish messages. .. attribute:: consumer :class:`~kombu.Consumer` used to receive messages. .. attribute:: no_ack flag to enable/disable acknowledgments. .. attribute:: queue :class:`~kombu.Queue` to consume from (if consuming). .. attribute:: queue_opts Additional options for the queue declaration. .. attribute:: exchange_opts Additional options for the exchange declaration. .. automethod:: get .. automethod:: get_nowait .. automethod:: put .. automethod:: clear .. automethod:: __len__ .. automethod:: qsize .. automethod:: close Buffer ------ .. autoclass:: SimpleBuffer .. attribute:: channel Current channel .. attribute:: producer :class:`~kombu.Producer` used to publish messages. .. attribute:: consumer :class:`~kombu.Consumer` used to receive messages. .. attribute:: no_ack flag to enable/disable acknowledgments. .. attribute:: queue :class:`~kombu.Queue` to consume from (if consuming). .. attribute:: queue_opts Additional options for the queue declaration. .. attribute:: exchange_opts Additional options for the exchange declaration. .. automethod:: get .. automethod:: get_nowait .. automethod:: put .. automethod:: clear .. automethod:: __len__ .. automethod:: qsize .. automethod:: close kombu-5.5.3/docs/reference/kombu.transport.SLMQ.rst000066400000000000000000000006761477772317200222360ustar00rootroot00000000000000============================================= SLMQ Transport - ``kombu.transport.SLMQ`` ============================================= .. currentmodule:: kombu.transport.SLMQ .. automodule:: kombu.transport.SLMQ .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.SQS.rst000066400000000000000000000056131477772317200221240ustar00rootroot00000000000000================================================ Amazon SQS Transport - ``kombu.transport.SQS`` ================================================ .. currentmodule:: kombu.transport.SQS .. automodule:: kombu.transport.SQS .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: Back-off policy ------------------------ Back-off policy is using SQS visibility timeout mechanism altering the time difference between task retries. The mechanism changes message specific ``visibility timeout`` from queue ``Default visibility timeout`` to policy configured timeout. The number of retries is managed by SQS (specifically by the ``ApproximateReceiveCount`` message attribute) and no further action is required by the user. Configuring the queues and backoff policy:: broker_transport_options = { 'predefined_queues': { 'my-q': { 'url': 'https://ap-southeast-2.queue.amazonaws.com/123456/my-q', 'access_key_id': 'xxx', 'secret_access_key': 'xxx', 'backoff_policy': {1: 10, 2: 20, 3: 40, 4: 80, 5: 320, 6: 640}, 'backoff_tasks': ['svc.tasks.tasks.task1'] } } } ``backoff_policy`` dictionary where key is number of retries, and value is delay seconds between retries (i.e SQS visibility timeout) ``backoff_tasks`` list of task names to apply the above policy The above policy: +-----------------------------------------+--------------------------------------------+ | **Attempt** | **Delay** | +-----------------------------------------+--------------------------------------------+ | ``2nd attempt`` | 20 seconds | +-----------------------------------------+--------------------------------------------+ | ``3rd attempt`` | 40 seconds | +-----------------------------------------+--------------------------------------------+ | ``4th attempt`` | 80 seconds | +-----------------------------------------+--------------------------------------------+ | ``5th attempt`` | 320 seconds | +-----------------------------------------+--------------------------------------------+ | ``6th attempt`` | 640 seconds | +-----------------------------------------+--------------------------------------------+ Message Attributes ------------------------ SQS supports sending message attributes along with the message body. To use this feature, you can pass a 'message_attributes' as keyword argument to `basic_publish` method.kombu-5.5.3/docs/reference/kombu.transport.azureservicebus.rst000066400000000000000000000010251477772317200246700ustar00rootroot00000000000000================================================================== Azure Service Bus Transport - ``kombu.transport.azureservicebus`` ================================================================== .. currentmodule:: kombu.transport.azureservicebus .. automodule:: kombu.transport.azureservicebus .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.azurestoragequeues.rst000066400000000000000000000010551477772317200254150ustar00rootroot00000000000000======================================================================== Azure Storage Queues Transport - ``kombu.transport.azurestoragequeues`` ======================================================================== .. currentmodule:: kombu.transport.azurestoragequeues .. automodule:: kombu.transport.azurestoragequeues .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.base.rst000066400000000000000000000035331477772317200223670ustar00rootroot00000000000000================================================== Transport Base Class - ``kombu.transport.base`` ================================================== .. currentmodule:: kombu.transport.base .. automodule:: kombu.transport.base .. contents:: :local: Message ------- .. autoclass:: Message .. autoattribute:: payload .. autoattribute:: channel .. autoattribute:: delivery_tag .. autoattribute:: content_type .. autoattribute:: content_encoding .. autoattribute:: delivery_info .. autoattribute:: headers .. autoattribute:: properties .. autoattribute:: body .. autoattribute:: acknowledged .. automethod:: ack .. automethod:: reject .. automethod:: requeue .. automethod:: decode Transport --------- .. autoclass:: Transport .. autoattribute:: client .. autoattribute:: default_port .. attribute:: recoverable_connection_errors Optional list of connection related exceptions that can be recovered from, but where the connection must be closed and re-established first. If not defined then all :attr:`connection_errors` and :class:`channel_errors` will be regarded as recoverable, but needing to close the connection first. .. attribute:: recoverable_channel_errors Optional list of channel related exceptions that can be automatically recovered from without re-establishing the connection. .. autoattribute:: connection_errors .. autoattribute:: channel_errors .. automethod:: establish_connection .. automethod:: close_connection .. automethod:: create_channel .. automethod:: close_channel .. automethod:: drain_events kombu-5.5.3/docs/reference/kombu.transport.confluentkafka.rst000066400000000000000000000011351477772317200244440ustar00rootroot00000000000000========================================================= confluent-kafka Transport - ``kombu.transport.confluentkafka`` ========================================================= .. currentmodule:: kombu.transport.confluentkafka .. automodule:: kombu.transport.confluentkafka .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: Message ------- .. autoclass:: Message :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.consul.rst000066400000000000000000000007131477772317200227550ustar00rootroot00000000000000================================================ Consul Transport - ``kombu.transport.consul`` ================================================ .. currentmodule:: kombu.transport.consul .. automodule:: kombu.transport.consul .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.etcd.rst000066400000000000000000000007031477772317200223700ustar00rootroot00000000000000================================================ Etcd Transport - ``kombu.transport.etcd`` ================================================ .. currentmodule:: kombu.transport.etcd .. automodule:: kombu.transport.etcd .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.filesystem.rst000066400000000000000000000007551477772317200236440ustar00rootroot00000000000000======================================================== File-system Transport - ``kombu.transport.filesystem`` ======================================================== .. currentmodule:: kombu.transport.filesystem .. automodule:: kombu.transport.filesystem .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.gcpubsub.rst000066400000000000000000000007731477772317200232720ustar00rootroot00000000000000============================================================== Google Cloud Pub/Sub Transport - ``kombu.transport.gcpubsub`` ============================================================== .. currentmodule:: kombu.transport.gcpubsub .. automodule:: kombu.transport.gcpubsub .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.librabbitmq.rst000066400000000000000000000013451477772317200237440ustar00rootroot00000000000000=============================================================== librabbitmq AMQP transport - ``kombu.transport.librabbitmq`` =============================================================== .. currentmodule:: kombu.transport.librabbitmq .. automodule:: kombu.transport.librabbitmq .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Connection ---------- .. autoclass:: Connection :members: :undoc-members: :inherited-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: Message ------- .. autoclass:: Message :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.memory.rst000066400000000000000000000007221477772317200227620ustar00rootroot00000000000000================================================== In-memory Transport - ``kombu.transport.memory`` ================================================== .. currentmodule:: kombu.transport.memory .. automodule:: kombu.transport.memory .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.mongodb.rst000066400000000000000000000007211477772317200230760ustar00rootroot00000000000000================================================= MongoDB Transport - ``kombu.transport.mongodb`` ================================================= .. currentmodule:: kombu.transport.mongodb .. automodule:: kombu.transport.mongodb .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.native_delayed_delivery.rst000066400000000000000000000005651477772317200263370ustar00rootroot00000000000000========================================================== Native Delayed Delivery - ``native_delayed_delivery`` ========================================================== .. versionadded:: 5.5 .. contents:: :local: .. currentmodule:: kombu.transport.native_delayed_delivery .. automodule:: kombu.transport.native_delayed_delivery :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.pyamqp.rst000066400000000000000000000013131477772317200227560ustar00rootroot00000000000000========================================================= Pure-python AMQP Transport - ``kombu.transport.pyamqp`` ========================================================= .. currentmodule:: kombu.transport.pyamqp .. automodule:: kombu.transport.pyamqp .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Connection ---------- .. autoclass:: Connection :members: :undoc-members: :inherited-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: Message ------- .. autoclass:: Message :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.pyro.rst000066400000000000000000000010271477772317200224420ustar00rootroot00000000000000================================================ Pyro Transport - ``kombu.transport.pyro`` ================================================ .. currentmodule:: kombu.transport.pyro .. automodule:: kombu.transport.pyro .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: KombuBroker ----------- .. autoclass:: KombuBroker :members: kombu-5.5.3/docs/reference/kombu.transport.qpid.rst000066400000000000000000000012301477772317200224020ustar00rootroot00000000000000=================================================== Apache QPid Transport - ``kombu.transport.qpid`` =================================================== .. currentmodule:: kombu.transport.qpid .. automodule:: kombu.transport.qpid .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Connection ---------- .. autoclass:: Connection :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: Message ------- .. autoclass:: Message :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.redis.rst000066400000000000000000000011001477772317200225470ustar00rootroot00000000000000================================================= Redis Transport - ``kombu.transport.redis`` ================================================= .. currentmodule:: kombu.transport.redis .. automodule:: kombu.transport.redis .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: SentinelChannel --------------- .. autoclass:: SentinelChannel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.rst000066400000000000000000000010371477772317200214530ustar00rootroot00000000000000=========================================== Built-in Transports - ``kombu.transport`` =========================================== .. currentmodule:: kombu.transport .. automodule:: kombu.transport .. contents:: :local: Data ---- .. data:: DEFAULT_TRANSPORT Default transport used when no transport specified. .. data:: TRANSPORT_ALIASES Mapping of transport aliases/class names. Functions --------- .. autofunction:: get_transport_cls .. autofunction:: resolve_transport kombu-5.5.3/docs/reference/kombu.transport.sqlalchemy.rst000066400000000000000000000022001477772317200236050ustar00rootroot00000000000000============================================================= SQLAlchemy Transport Model - ``kombu.transport.sqlalchemy`` ============================================================= .. currentmodule:: kombu.transport.sqlalchemy .. automodule:: kombu.transport.sqlalchemy .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: SQLAlchemy Transport Model - ``kombu.transport.sqlalchemy.models`` ================================================================== .. currentmodule:: kombu.transport.sqlalchemy.models .. automodule:: kombu.transport.sqlalchemy.models .. contents:: :local: Models ------ .. autoclass:: Queue .. autoattribute:: Queue.id .. autoattribute:: Queue.name .. autoclass:: Message .. autoattribute:: Message.id .. autoattribute:: Message.visible .. autoattribute:: Message.sent_at .. autoattribute:: Message.payload .. autoattribute:: Message.version kombu-5.5.3/docs/reference/kombu.transport.virtual.exchange.rst000066400000000000000000000014041477772317200247170ustar00rootroot00000000000000============================================================================= Virtual AMQ Exchange Implementation - ``kombu.transport.virtual.exchange`` ============================================================================= .. currentmodule:: kombu.transport.virtual.exchange .. automodule:: kombu.transport.virtual.exchange .. contents:: :local: Direct ------ .. autoclass:: DirectExchange :members: :undoc-members: Topic ----- .. autoclass:: TopicExchange :members: :undoc-members: Fanout ------ .. autoclass:: FanoutExchange :members: :undoc-members: Interface --------- .. autoclass:: ExchangeType :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.transport.virtual.rst000066400000000000000000000043371477772317200231460ustar00rootroot00000000000000============================================================ Virtual Transport Base Class - ``kombu.transport.virtual`` ============================================================ .. currentmodule:: kombu.transport.virtual .. automodule:: kombu.transport.virtual .. contents:: :local: Transports ---------- .. autoclass:: Transport .. autoattribute:: Channel .. autoattribute:: Cycle .. autoattribute:: polling_interval .. autoattribute:: default_port .. autoattribute:: state .. autoattribute:: cycle .. automethod:: establish_connection .. automethod:: close_connection .. automethod:: create_channel .. automethod:: close_channel .. automethod:: drain_events Channel ------- .. autoclass:: AbstractChannel :members: .. autoclass:: Channel .. autoattribute:: Message .. autoattribute:: state .. autoattribute:: qos .. autoattribute:: do_restore .. autoattribute:: exchange_types .. automethod:: exchange_declare .. automethod:: exchange_delete .. automethod:: queue_declare .. automethod:: queue_delete .. automethod:: queue_bind .. automethod:: queue_purge .. automethod:: basic_publish .. automethod:: basic_consume .. automethod:: basic_cancel .. automethod:: basic_get .. automethod:: basic_ack .. automethod:: basic_recover .. automethod:: basic_reject .. automethod:: basic_qos .. automethod:: get_table .. automethod:: typeof .. automethod:: drain_events .. automethod:: prepare_message .. automethod:: message_to_python .. automethod:: flow .. automethod:: close Message ------- .. autoclass:: Message :members: :undoc-members: :inherited-members: Quality Of Service ------------------ .. autoclass:: QoS :members: :undoc-members: :inherited-members: In-memory State --------------- .. autoclass:: BrokerState :members: :undoc-members: :inherited-members: kombu-5.5.3/docs/reference/kombu.transport.zookeeper.rst000066400000000000000000000007441477772317200234610ustar00rootroot00000000000000====================================================== Zookeeper Transport - ``kombu.transport.zookeeper`` ====================================================== .. currentmodule:: kombu.transport.zookeeper .. automodule:: kombu.transport.zookeeper .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.amq_manager.rst000066400000000000000000000004731477772317200230310ustar00rootroot00000000000000======================================================== Generic RabbitMQ manager - ``kombu.utils.amq_manager`` ======================================================== .. contents:: :local: .. currentmodule:: kombu.utils.amq_manager .. automodule:: kombu.utils.amq_manager :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.collections.rst000066400000000000000000000004711477772317200230750ustar00rootroot00000000000000========================================================== Custom Collections - ``kombu.utils.collections`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.collections .. automodule:: kombu.utils.collections :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.compat.rst000066400000000000000000000004541477772317200220430ustar00rootroot00000000000000========================================================== Python Compatibility - ``kombu.utils.compat`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.compat .. automodule:: kombu.utils.compat :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.debug.rst000066400000000000000000000004501477772317200216420ustar00rootroot00000000000000========================================================== Debugging Utilities - ``kombu.utils.debug`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.debug .. automodule:: kombu.utils.debug :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.div.rst000066400000000000000000000004341477772317200213400ustar00rootroot00000000000000========================================================== Div Utilities - ``kombu.utils.div`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.div .. automodule:: kombu.utils.div :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.encoding.rst000066400000000000000000000004671477772317200223520ustar00rootroot00000000000000========================================================== String Encoding Utilities - ``kombu.utils.encoding`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.encoding .. automodule:: kombu.utils.encoding :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.eventio.rst000066400000000000000000000004561477772317200222330ustar00rootroot00000000000000========================================================== Async I/O Selectors - ``kombu.utils.eventio`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.eventio .. automodule:: kombu.utils.eventio :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.functional.rst000066400000000000000000000004761477772317200227260ustar00rootroot00000000000000========================================================== Functional-style Utilities - ``kombu.utils.functional`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.functional .. automodule:: kombu.utils.functional :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.imports.rst000066400000000000000000000004651477772317200222570ustar00rootroot00000000000000========================================================== Module Importing Utilities - ``kombu.utils.imports`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.imports .. automodule:: kombu.utils.imports :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.json.rst000066400000000000000000000004401477772317200215240ustar00rootroot00000000000000========================================================== JSON Utilities - ``kombu.utils.json`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.json .. automodule:: kombu.utils.json :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.limits.rst000066400000000000000000000004451477772317200220610ustar00rootroot00000000000000========================================================== Rate limiting - ``kombu.utils.limits`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.limits .. automodule:: kombu.utils.limits :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.objects.rst000066400000000000000000000004641477772317200222120ustar00rootroot00000000000000========================================================== Object/Property Utilities - ``kombu.utils.objects`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.objects .. automodule:: kombu.utils.objects :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.scheduling.rst000066400000000000000000000004451477772317200227050ustar00rootroot00000000000000================================================= Consumer Scheduling - ``kombu.utils.scheduling`` ================================================= .. contents:: :local: .. currentmodule:: kombu.utils.scheduling .. automodule:: kombu.utils.scheduling :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.text.rst000066400000000000000000000004411477772317200215400ustar00rootroot00000000000000========================================================== Text utilitites - ``kombu.utils.text`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.text .. automodule:: kombu.utils.text :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.time.rst000066400000000000000000000004401477772317200215110ustar00rootroot00000000000000========================================================== Time Utilities - ``kombu.utils.time`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.time .. automodule:: kombu.utils.time :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.url.rst000066400000000000000000000004041477772317200213550ustar00rootroot00000000000000============================================== URL Utilities - ``kombu.utils.url`` ============================================== .. contents:: :local: .. currentmodule:: kombu.utils.url .. automodule:: kombu.utils.url :members: :undoc-members: kombu-5.5.3/docs/reference/kombu.utils.uuid.rst000066400000000000000000000004401477772317200215210ustar00rootroot00000000000000========================================================== UUID Utilities - ``kombu.utils.uuid`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.uuid .. automodule:: kombu.utils.uuid :members: :undoc-members: kombu-5.5.3/docs/templates/000077500000000000000000000000001477772317200156105ustar00rootroot00000000000000kombu-5.5.3/docs/templates/readme.txt000066400000000000000000000023561477772317200176140ustar00rootroot00000000000000======================================== kombu - Messaging library for Python ======================================== |build-status| |coverage| |license| |wheel| |pyversion| |pyimp| .. include:: ../includes/introduction.txt .. include:: ../includes/installation.txt .. include:: ../includes/resources.txt .. |build-status| image:: https://github.com/celery/kombu/actions/workflows/ci.yaml/badge.svg :alt: Build status :target: https://github.com/celery/kombu/actions/workflows/ci.yml .. |coverage| image:: https://codecov.io/github/celery/kombu/coverage.svg?branch=main :target: https://codecov.io/github/celery/kombu?branch=main .. |license| image:: https://img.shields.io/pypi/l/kombu.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/kombu.svg :alt: Kombu can be installed via wheel :target: https://pypi.org/project/kombu/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/kombu.svg :alt: Supported Python versions. :target: https://pypi.org/project/kombu/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/kombu.svg :alt: Support Python implementations. :target: https://pypi.org/project/kombu/ -- kombu-5.5.3/docs/userguide/000077500000000000000000000000001477772317200156065ustar00rootroot00000000000000kombu-5.5.3/docs/userguide/connections.rst000066400000000000000000000204261477772317200206660ustar00rootroot00000000000000.. _guide-connections: ============================ Connections and transports ============================ .. _connection-basics: Basics ====== To send and receive messages you need a transport and a connection. There are several transports to choose from (amqp, librabbitmq, redis, qpid, in-memory, etc.), and you can even create your own. The default transport is amqp. Create a connection using the default transport: .. code-block:: pycon >>> from kombu import Connection >>> connection = Connection('amqp://guest:guest@localhost:5672//') The connection will not be established yet, as the connection is established when needed. If you want to explicitly establish the connection you have to call the :meth:`~kombu.Connection.connect` method: .. code-block:: pycon >>> connection.connect() You can also check whether the connection is connected: .. code-block:: pycon >>> connection.connected True Connections must always be closed after use: .. code-block:: pycon >>> connection.close() But best practice is to release the connection instead, this will release the resource if the connection is associated with a connection pool, or close the connection if not, and makes it easier to do the transition to connection pools later: .. code-block:: pycon >>> connection.release() .. seealso:: :ref:`guide-pools` Of course, the connection can be used as a context, and you are encouraged to do so as it makes it harder to forget releasing open resources: .. code-block:: python with Connection() as connection: # work with connection .. _debug-logs: Debug Logs ========== Kombu exposes multiple environment variables that control debug logging for connection and channel logs. This is useful for situations where you want to debug Kombu or contribute to the project. If ``KOMBU_LOG_CONNECTION`` is set to 1, debug logs are enabled for connections. If ``KOMBU_LOG_CHANNEL`` is set to 1, debug logs are enabled for channels. If ``KOMBU_LOG_DEBUG`` is set to 1, debug logs are enabled for both connections and channels. .. _connection-urls: Celery with SQS =============== SQS broker url doesn't include queue_name_prefix by default. So we can use the following code snippet to make it work in celery. .. code-block:: python from celery import Celery def make_celery(app): celery = Celery( app.import_name, broker="sqs://", broker_transport_options={ "queue_name_prefix": "{SERVICE_ENV}-{SERVICE_NAME}-" }, ) task_base = celery.Task class ContextTask(task_base): abstract = True def __call__(self, *args, **kwargs): with app.app_context(): return task_base.__call__(self, *args, **kwargs) celery.Task = ContextTask return celery URLs ==== Connection parameters can be provided as a URL in the format: .. code-block:: text transport://userid:password@hostname:port/virtual_host All of these are valid URLs: .. code-block:: text # Specifies using the amqp transport only, default values # are taken from the keyword arguments. amqp:// # Using Redis redis://localhost:6379/ # Using Redis over a Unix socket redis+socket:///tmp/redis.sock # Using Redis sentinel sentinel://sentinel1:26379;sentinel://sentinel2:26379 # Using Qpid qpid://localhost/ # Using virtual host '/foo' amqp://localhost//foo # Using virtual host 'foo' amqp://localhost/foo # Using Pyro with name server running on 'localhost' pyro://localhost/kombu.broker The query part of the URL can also be used to set options, e.g.: .. code-block:: text amqp://localhost/myvhost?ssl=1 See :ref:`connection-options` for a list of supported options. A connection without options will use the default connection settings, which is using the localhost host, default port, user name `guest`, password `guest` and virtual host "/". A connection without arguments is the same as: .. code-block:: pycon >>> Connection('amqp://guest:guest@localhost:5672//') The default port is transport specific, for AMQP this is 5672. Other fields may also have different meaning depending on the transport used. For example, the Redis transport uses the `virtual_host` argument as the redis database number. .. _connection-options: Keyword arguments ================= The :class:`~kombu.Connection` class supports additional keyword arguments, these are: :hostname: Default host name if not provided in the URL. :userid: Default user name if not provided in the URL. :password: Default password if not provided in the URL. :virtual_host: Default virtual host if not provided in the URL. :port: Default port if not provided in the URL. :transport: Default transport if not provided in the URL. Can be a string specifying the path to the class. (e.g. ``kombu.transport.pyamqp:Transport``), or one of the aliases: ``pyamqp``, ``librabbitmq``, ``redis``, ``qpid``, ``memory``, and so on. :ssl: Use SSL to connect to the server. Default is ``False``. Only supported by the amqp and qpid transports. :insist: Insist on connecting to a server. *No longer supported, relic from AMQP 0.8* :connect_timeout: Timeout in seconds for connecting to the server. May not be supported by the specified transport. :transport_options: A dict of additional connection arguments to pass to alternate kombu channel implementations. Consult the transport documentation for available options. AMQP Transports =============== There are 4 transports available for AMQP use. 1. ``pyamqp`` uses the pure Python library ``amqp``, automatically installed with Kombu. 2. ``librabbitmq`` uses the high performance transport written in C. This requires the ``librabbitmq`` Python package to be installed, which automatically compiles the C library. 3. ``amqp`` tries to use ``librabbitmq`` but falls back to ``pyamqp``. 4. ``qpid`` uses the pure Python library ``qpid.messaging``, automatically installed with Kombu. The Qpid library uses AMQP, but uses custom extensions specifically supported by the Apache Qpid Broker. For the highest performance, you should install the ``librabbitmq`` package. To ensure librabbitmq is used, you can explicitly specify it in the transport URL, or use ``amqp`` to have the fallback. Transport Comparison ==================== +---------------+----------+------------+------------+---------------+--------------+ | **Client** | **Type** | **Direct** | **Topic** | **Fanout** | **Priority** | +---------------+----------+------------+------------+---------------+--------------+ | *amqp* | Native | Yes | Yes | Yes | Yes [#f3]_ | +---------------+----------+------------+------------+---------------+--------------+ | *qpid* | Native | Yes | Yes | Yes | No | +---------------+----------+------------+------------+---------------+--------------+ | *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | Yes | +---------------+----------+------------+------------+---------------+--------------+ | *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | No | +---------------+----------+------------+------------+---------------+--------------+ | *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | Yes | +---------------+----------+------------+------------+---------------+--------------+ | *in-memory* | Virtual | Yes | Yes [#f1]_ | No | No | +---------------+----------+------------+------------+---------------+--------------+ | *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | No | +---------------+----------+------------+------------+---------------+--------------+ .. [#f1] Declarations only kept in memory, so exchanges/queues must be declared by all clients that needs them. .. [#f2] Fanout supported via storing routing tables in SimpleDB. Disabled by default, but can be enabled by using the ``supports_fanout`` transport option. .. [#f3] AMQP Message priority support depends on broker implementation. Transport Options ================= py-amqp ~~~~~~~ :read_timeout: Timeout for reading data from RabbitMQ. :write_timeout: Timeout for writing data to RabbitMQ. kombu-5.5.3/docs/userguide/consumers.rst000066400000000000000000000162021477772317200203570ustar00rootroot00000000000000.. _guide-consumers: =========== Consumers =========== .. _consumer-basics: Basics ====== The :class:`~kombu.messaging.Consumer` takes a connection (or channel) and a list of queues to consume from. Several consumers can be mixed to consume from different channels, as they all bind to the same connection, and ``drain_events`` will drain events from all channels on that connection. .. note:: Kombu since 3.0 will only accept json/binary or text messages by default, to allow deserialization of other formats you have to specify them in the ``accept`` argument (in addition to setting the right content type for your messages): .. code-block:: python >>> Consumer(conn, accept=['json', 'pickle', 'msgpack', 'yaml']) You can create a consumer using a Connection. This consumer is consuming from a single queue with name `'queue'`: .. code-block:: python >>> queue = Queue('queue', routing_key='queue') >>> consumer = connection.Consumer(queue) You can also instantiate Consumer directly, it takes a channel or a connection as an argument. This consumer also consumes from single queue with name `'queue'`: .. code-block:: python >>> queue = Queue('queue', routing_key='queue') >>> with Connection('amqp://') as conn: ... with conn.channel() as channel: ... consumer = Consumer(channel, queue) A consumer needs to specify a handler for received data. This handler is specified in the form of a callback. The callback function is called by kombu every time a new message is received. The callback is called with two parameters: ``body``, containing deserialized data sent by a producer, and a :class:`~kombu.message.Message` instance ``message``. The user is responsible for acknowledging messages when manual acknowledgement is set. .. code-block:: python >>> def callback(body, message): ... print(body) ... message.ack() >>> consumer.register_callback(callback) Draining events from a single consumer -------------------------------------- The method ``drain_events`` blocks indefinitely by default. This example sets the timeout to 1 second: .. code-block:: python >>> with consumer: ... connection.drain_events(timeout=1) Draining events from several consumers -------------------------------------- Each consumer has its own list of queues. Each consumer accepts data in `'json'` format: .. code-block:: python >>> from kombu.utils.compat import nested >>> queues1 = [Queue('queue11', routing_key='queue11'), Queue('queue12', routing_key='queue12')] >>> queues2 = [Queue('queue21', routing_key='queue21'), Queue('queue22', routing_key='queue22')] >>> with connection.channel(), connection.channel() as (channel1, channel2): ... with nested(Consumer(channel1, queues1, accept=['json']), ... Consumer(channel2, queues2, accept=['json'])): ... connection.drain_events(timeout=1) The full example will look as follows: .. code-block:: python from kombu import Connection, Consumer, Queue def callback(body, message): print('RECEIVED MESSAGE: {0!r}'.format(body)) message.ack() queue1 = Queue('queue1', routing_key='queue1') queue2 = Queue('queue2', routing_key='queue2') with Connection('amqp://') as conn: with conn.channel() as channel: consumer = Consumer(conn, [queue1, queue2], accept=['json']) consumer.register_callback(callback) with consumer: conn.drain_events(timeout=1) Consumer mixin classes ====================== Kombu provides predefined mixin classes in module :py:mod:`~kombu.mixins`. It contains two classes: :class:`~kombu.mixins.ConsumerMixin` for creating consumers and :class:`~kombu.mixins.ConsumerProducerMixin` for creating consumers supporting also publishing messages. Consumers can be created just by subclassing mixin class and overriding some of the methods: .. code-block:: python from kombu.mixins import ConsumerMixin class C(ConsumerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [ Consumer(channel, callbacks=[self.on_message], accept=['json']), ] def on_message(self, body, message): print('RECEIVED MESSAGE: {0!r}'.format(body)) message.ack() C(connection).run() and with multiple channels again: .. code-block:: python from kombu import Consumer from kombu.mixins import ConsumerMixin class C(ConsumerMixin): channel2 = None def __init__(self, connection): self.connection = connection def get_consumers(self, _, default_channel): self.channel2 = default_channel.connection.channel() return [Consumer(default_channel, queues1, callbacks=[self.on_message], accept=['json']), Consumer(self.channel2, queues2, callbacks=[self.on_special_message], accept=['json'])] def on_consume_end(self, connection, default_channel): if self.channel2: self.channel2.close() C(connection).run() The main use of :class:`~kombu.mixins.ConsumerProducerMixin` is to create consumers that need to also publish messages on a separate connection (e.g. sending rpc replies, streaming results): .. code-block:: python from kombu import Producer, Queue from kombu.mixins import ConsumerProducerMixin rpc_queue = Queue('rpc_queue') class Worker(ConsumerProducerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [Consumer( queues=[rpc_queue], on_message=self.on_request, accept={'application/json'}, prefetch_count=1, )] def on_request(self, message): n = message.payload['n'] print(' [.] fib({0})'.format(n)) result = fib(n) self.producer.publish( {'result': result}, exchange='', routing_key=message.properties['reply_to'], correlation_id=message.properties['correlation_id'], serializer='json', retry=True, ) message.ack() .. seealso:: :file:`examples/rpc-tut6/` in the Github repository. Advanced Topics =============== RabbitMQ -------- Consumer Priorities ~~~~~~~~~~~~~~~~~~~ RabbitMQ defines a consumer priority extension to the amqp protocol, that can be enabled by setting the ``x-priority`` argument to ``basic.consume``. In kombu you can specify this argument on the :class:`~kombu.Queue`, like this: .. code-block:: python queue = Queue('name', Exchange('exchange_name', type='direct'), consumer_arguments={'x-priority': 10}) Read more about consumer priorities here: https://www.rabbitmq.com/consumer-priority.html Reference ========= .. autoclass:: kombu.Consumer :noindex: :members: kombu-5.5.3/docs/userguide/examples.rst000066400000000000000000000025561477772317200201660ustar00rootroot00000000000000.. _examples: ======================== Examples ======================== .. _hello-world-example: Hello World Example =================== Below example uses :ref:`guide-simple` to send helloworld message through message broker (rabbitmq) and print received message :file:`hello_publisher.py`: .. literalinclude:: ../../examples/hello_publisher.py :language: python :file:`hello_consumer.py`: .. literalinclude:: ../../examples/hello_consumer.py :language: python .. _task-queue-example: Task Queue Example ================== Very simple task queue using pickle, with primitive support for priorities using different queues. :file:`queues.py`: .. literalinclude:: ../../examples/simple_task_queue/queues.py :language: python :file:`worker.py`: .. literalinclude:: ../../examples/simple_task_queue/worker.py :language: python :file:`tasks.py`: .. literalinclude:: ../../examples/simple_task_queue/tasks.py :language: python :file:`client.py`: .. literalinclude:: ../../examples/simple_task_queue/client.py .. _native-delayed-delivery-example: Native Delayed Delivery ======================= This example demonstrates how to declare native delayed delivery queues and exchanges and publish a message using the native delayed delivery mechanism. :file:`delayed_infra.py`: .. literalinclude:: ../../examples/delayed_infra.py :language: python kombu-5.5.3/docs/userguide/failover.rst000066400000000000000000000151061477772317200201520ustar00rootroot00000000000000.. _guide-failover: ==================== Automatic Failover ==================== Automatic failover is functionality for connecting to clustered broker. Application using automatic failover should be able to automatically connect to healthy node and react to unexpected failure of node in cluster. Connection failover =================== The :class:`~kombu.Connection` is accepting multiple URLs to several brokers. During connecting to broker, kombu is automatically picking the healthy node from the list. In the example below, kombu uses healthy.example.com broker: .. code-block:: python >>> conn = Connection( ... 'amqp://guest:guest@broken.example.com;guest:guest@healthy.example.com' ... ) >>> conn.connect() >>> conn :class:`~kombu.Connection` also accepts failover_strategy parameter which defines the strategy of trying the nodes: .. code-block:: python >>> Connection( ... 'amqp://broker1.example.com;amqp://broker2.example.com', ... failover_strategy='round-robin' ... ) The current list of available failver strategies is defined in kombu.connection module: .. code-block:: python >>> import kombu >>> kombu.connection.failover_strategies {'round-robin': , 'shuffle': } Failover during connection handle only failover during calling :attr:`~kombu.Connection.connect()` method of :class:`~kombu.Connection`. Operation failover ================== Failover of connection using multiple connection strings in :class:`~kombu.Connection` solves problem when broker is unavailable during creating new connection. But in real world these connections are long lived and hence it is possible that broker fails during lifetime of connection. For this scenario retrying of operation executed against broker is needed. Retrying ensures that failed operation triggers new connection to healthy broker and re-execution of failed operation. Failover is implemented in :attr:`~kombu.Connection.ensure()` method which tries to execute the function. When contacting broker fails, it reconnects the underlying connection and re-executes the function again. The following example is ensuring that :attr:`~kombu.Producer.publish()` method is re-executed when errors occurred: .. code-block:: python >>> from kombu import Connection, Producer >>> conn = Connection('amqp://') >>> producer = Producer(conn) >>> def errback(exc, interval): ... logger.error('Error: %r', exc, exc_info=1) ... logger.info('Retry in %s seconds.', interval) >>> publish = conn.ensure(producer, producer.publish, ... errback=errback, max_retries=3) >>> publish({'hello': 'world'}, routing_key='dest') Some methods are accepting channel as a parameter, e.g. :attr:`~kombu.Queue.declare()`. Since channel is passed as parameter, it is not refreshed automatically during failover and hence retrying calling of method fails. In this scenarios :attr:`~kombu.Connection.autoretry()` needs to be used which automatically passes channel and refresh it during failover: .. code-block:: python >>> import kombu >>> conn = kombu.Connection('amqp://broker1:5672;amqp://broker2:5672') >>> conn.connect() >>> q = kombu.Queue('test_queue') >>> declare = conn.autoretry(q.declare) >>> declare() Producer ======== :attr:`~kombu.Producer.publish()` can have automatic failover using :attr:`~kombu.Connection.ensure()` as mentioned before. Moreover, it contains retry parameter as a shortcut for retrying. The following example is retrying publishing when error occurs: .. code-block:: python >>> from kombu import * >>> with Connection('amqp://broker1:5672;amqp://broker2:5672') as conn: ... with conn.channel() as channel: ... producer = conn.Producer() ... producer = Producer(channel) ... producer.publish( ... {'hello': 'world'}, routing_key='queue', retry=True ... ) Consumer ======== Consumer with failover functionality can be implemented using following function: .. code-block:: python >>> def consume(): ... while True: ... try: ... conn.drain_events(timeout=1) ... except socket.timeout: ... pass This function is draining events in infinite loop with timeout to avoid blocked connections of unavailable broker. Consumer with failover is implemented by wrapping consume function using :attr:`~kombu.Connection.ensure()` method: .. code-block:: python >>> consume = conn.ensure(conn, consume) >>> consume() The full example implementing consumer with failover is as follows: .. code-block:: python >>> from kombu import * >>> import socket >>> def callback(body, message): ... print(body) ... message.ack() >>> queue = Queue('queue', routing_key='queue') >>> with Connection('amqp://broker1:5672;amqp://broker2:5672') as conn: ... def consume(): ... while True: ... try: ... conn.drain_events(timeout=1) ... except socket.timeout: ... pass ... with conn.channel() as channel: ... consumer = Consumer(channel, queue) ... consumer.register_callback(callback) ... with consumer: ... while True: ... consume = conn.ensure(conn, consume) ... consume() When implementing consumer as :class:`~kombu.mixins.ConsumerMixin`, the failover functionality is by wrapping consume method with :attr:`~kombu.Connection.ensure()`: .. code-block:: python >>> from kombu import * >>> from kombu.mixins import ConsumerMixin >>> class C(ConsumerMixin): ... def __init__(self, connection): ... self.connection = connection ... def get_consumers(self, Consumer, channel): ... return [ ... Consumer( ... [Queue('queue', routing_key='queue')], ... callbacks=[self.on_message], accept=['json'] ... ), ... ] ... def on_message(self, body, message): ... print('RECEIVED MESSAGE: {0!r}'.format(body)) ... message.ack() ... def consume(self, *args, **kwargs): ... consume = conn.ensure(conn, super().consume) ... return consume(*args, **kwargs) >>> with Connection('amqp://broker1:5672;amqp://broker2:5672') as conn: ... C(conn).run() kombu-5.5.3/docs/userguide/index.rst000066400000000000000000000003501477772317200174450ustar00rootroot00000000000000============ User Guide ============ :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 introduction connections producers consumers examples simple pools serialization failover kombu-5.5.3/docs/userguide/introduction.rst000066400000000000000000000062571477772317200210730ustar00rootroot00000000000000.. _guide-intro: ============== Introduction ============== .. _intro-messaging: What is messaging? ================== In times long ago people didn't have email. They had the postal service, which with great courage would deliver mail from hand to hand all over the globe. Soldiers deployed at wars far away could only communicate with their families through the postal service, and posting a letter would mean that the recipient wouldn't actually receive the letter until weeks or months, sometimes years later. It's hard to imagine this today when people are expected to be available for phone calls every minute of the day. So humans need to communicate with each other, this shouldn't be news to anyone, but why would applications? One example is banks. When you transfer money from one bank to another, your bank sends a message to a central clearinghouse. The clearinghouse then records and coordinates the transaction. Banks need to send and receive millions and millions of messages every day, and losing a single message would mean either losing your money (bad) or the banks money (very bad) Another example is the stock exchanges, which also have a need for very high message throughputs and have strict reliability requirements. Email is a great way for people to communicate. It is much faster than using the postal service, but still using email as a means for programs to communicate would be like the soldier above, waiting for signs of life from his girlfriend back home. .. _messaging-scenarios: Messaging Scenarios =================== * Request/Reply The request/reply pattern works like the postal service example. A message is addressed to a single recipient, with a return address printed on the back. The recipient may or may not reply to the message by sending it back to the original sender. Request-Reply is achieved using *direct* exchanges. * Broadcast In a broadcast scenario a message is sent to all parties. This could be none, one or many recipients. Broadcast is achieved using *fanout* exchanges. * Publish/Subscribe In a publish/subscribe scenario producers publish messages to topics, and consumers subscribe to the topics they are interested in. If no consumers subscribe to the topic, then the message will not be delivered to anyone. If several consumers subscribe to the topic, then the message will be delivered to all of them. Pub-sub is achieved using *topic* exchanges. .. _messaging-reliability: Reliability =========== For some applications reliability is very important. Losing a message is a critical situation that must never happen. For other applications losing a message is fine, it can maybe recover in other ways, or the message is resent anyway as periodic updates. AMQP defines two built-in delivery modes: * persistent Messages are written to disk and survives a broker restart. * transient Messages may or may not be written to disk, as the broker sees fit to optimize memory contents. The messages won't survive a broker restart. Transient messaging is by far the fastest way to send and receive messages, so having persistent messages comes with a price, but for some applications this is a necessary cost. kombu-5.5.3/docs/userguide/pools.rst000066400000000000000000000125211477772317200174750ustar00rootroot00000000000000.. _guide-pools: =============================== Connection and Producer Pools =============================== .. _default-pools: Default Pools ============= Kombu ships with two global pools: one connection pool, and one producer pool. These are convenient and the fact that they are global may not be an issue as connections should often be limited at the process level, rather than per thread/application and so on, but if you need custom pools per thread see :ref:`custom-pool-groups`. .. _default-connections: The connection pool group ------------------------- The connection pools are available as :attr:`kombu.pools.connections`. This is a pool group, which means you give it a connection instance, and you get a pool instance back. We have one pool per connection instance to support multiple connections in the same app. All connection instances with the same connection parameters will get the same pool: .. code-block:: pycon >>> from kombu import Connection >>> from kombu.pools import connections >>> connections[Connection('redis://localhost:6379')] >>> connections[Connection('redis://localhost:6379')] Let's acquire and release a connection: .. code-block:: python from kombu import Connection from kombu.pools import connections connection = Connection('redis://localhost:6379') with connections[connection].acquire(block=True) as conn: print('Got connection: {0!r}'.format(connection.as_uri())) .. note:: The ``block=True`` here means that the acquire call will block until a connection is available in the pool. Note that this will block forever in case there is a deadlock in your code where a connection is not released. There is a ``timeout`` argument you can use to safeguard against this (see :meth:`kombu.connection.Resource.acquire`). If blocking is disabled and there aren't any connections left in the pool an :class:`kombu.exceptions.ConnectionLimitExceeded` exception will be raised. That's about it. If you need to connect to multiple brokers at once you can do that too: .. code-block:: python from kombu import Connection from kombu.pools import connections c1 = Connection('amqp://') c2 = Connection('redis://') with connections[c1].acquire(block=True) as conn1: with connections[c2].acquire(block=True) as conn2: # .... .. _default-producers: The producer pool group ======================= This is a pool group just like the connections, except that it manages :class:`~kombu.Producer` instances used to publish messages. Here is an example using the producer pool to publish a message to the ``news`` exchange: .. code-block:: python from kombu import Connection, Exchange from kombu.pools import producers # The exchange we send our news articles to. news_exchange = Exchange('news') # The article we want to send article = {'title': 'No cellular coverage on the tube for 2012', 'ingress': 'yadda yadda yadda'} # The broker where our exchange is. connection = Connection('amqp://guest:guest@localhost:5672//') with producers[connection].acquire(block=True) as producer: producer.publish( article, exchange=news_exchange, routing_key='domestic', declare=[news_exchange], serializer='json', compression='zlib') .. _default-pool-limits: Pool limits ------------------- By default every connection instance has a limit of 10 connections. You can change this limit using :func:`kombu.pools.set_limit`. You are able to grow the pool at runtime, but you can't shrink it, so it is best to set the limit as early as possible after your application starts: .. code-block:: pycon >>> from kombu import pools >>> pools.set_limit() You can also get current limit using :func:`kombu.pools.get_limit`: .. code-block:: pycon >>> from kombu import pools >>> pools.get_limit() 10 >>> pools.set_limit(100) 100 >>> kombu.pools.get_limit() 100 Resetting all pools ------------------- You can close all active connections and reset all pool groups by using the :func:`kombu.pools.reset` function. Note that this will not respect anything currently using these connections, so will just drag the connections away from under their feet: you should be very careful before you use this. Kombu will reset the pools if the process is forked, so that forked processes start with clean pool groups. .. _custom-pool-groups: Custom Pool Groups ================== To maintain your own pool groups you should create your own :class:`~kombu.pools.Connections` and :class:`kombu.pools.Producers` instances: .. code-block:: python from kombu import pools from kombu import Connection connections = pools.Connections(limit=100) producers = pools.Producers(limit=connections.limit) connection = Connection('amqp://guest:guest@localhost:5672//') with connections[connection].acquire(block=True): # ... If you want to use the global limit that can be set with :func:`~kombu.pools.set_limit` you can use a special value as the ``limit`` argument: .. code-block:: python from kombu import pools connections = pools.Connections(limit=pools.use_global_limit) kombu-5.5.3/docs/userguide/producers.rst000066400000000000000000000070631477772317200203540ustar00rootroot00000000000000.. _guide-producers: =========== Producers =========== .. _producer-basics: Basics ====== You can create a producer using a :class:`~kombu.Connection`: .. code-block:: pycon >>> producer = connection.Producer() You can also instantiate :class:`~kombu.Producer` directly, it takes a channel or a connection as an argument: .. code-block:: pycon >>> with Connection('amqp://') as conn: ... with conn.channel() as channel: ... producer = Producer(channel) Having a producer instance you can publish messages: .. code-block:: pycon >>> from kombu import Exchange >>> exchange = Exchange('name', type='direct') >>> producer.publish( ... {'hello': 'world'}, # message to send ... exchange=exchange, # destination exchange ... routing_key='rk', # destination routing key, ... declare=[exchange], # make sure exchange is declared, ... ) Mostly you will be getting a connection from a connection pool, and this connection can be stale, or you could lose the connection in the middle of sending the message. Using retries is a good way to handle these intermittent failures: .. code-block:: pycon >>> producer.publish({'hello': 'world', ..., retry=True}) In addition a retry policy can be specified, which is a dictionary of parameters supported by the :func:`~kombu.utils.functional.retry_over_time` function .. code-block:: pycon >>> producer.publish( ... {'hello': 'world'}, ..., ... retry=True, ... retry_policy={ ... 'interval_start': 0, # First retry immediately, ... 'interval_step': 2, # then increase by 2s for every retry. ... 'interval_max': 30, # but don't exceed 30s between retries. ... 'max_retries': 30, # give up after 30 tries. ... }, ... ) The ``declare`` argument lets you pass a list of entities that must be declared before sending the message. This is especially important when using the ``retry`` flag, since the broker may actually restart during a retry in which case non-durable entities are removed. Say you are writing a task queue, and the workers may have not started yet so the queues aren't declared. In this case you need to define both the exchange, and the declare the queue so that the message is delivered to the queue while the workers are offline: .. code-block:: pycon >>> from kombu import Exchange, Queue >>> task_queue = Queue('tasks', Exchange('tasks'), routing_key='tasks') >>> producer.publish( ... {'hello': 'world'}, ..., ... retry=True, ... exchange=task_queue.exchange, ... routing_key=task_queue.routing_key, ... declare=[task_queue], # declares exchange, queue and binds. ... ) Bypassing routing by using the anon-exchange -------------------------------------------- You may deliver to a queue directly, bypassing the brokers routing mechanisms, by using the "anon-exchange": set the exchange parameter to the empty string, and set the routing key to be the name of the queue: .. code-block:: pycon >>> producer.publish( ... {'hello': 'world'}, ... exchange='', ... routing_key=task_queue.name, ... ) Serialization ============= Json is the default serializer when a non-string object is passed to publish, but you can also specify a different serializer: .. code-block:: pycon >>> producer.publish({'hello': 'world'}, serializer='pickle') See :ref:`guide-serialization` for more information. Reference ========= .. autoclass:: kombu.Producer :noindex: :members: kombu-5.5.3/docs/userguide/serialization.rst000066400000000000000000000155251477772317200212250ustar00rootroot00000000000000.. _guide-serialization: =============== Serialization =============== .. _serializers: Serializers =========== By default every message is encoded using `JSON`_, so sending Python data structures like dictionaries and lists works. `YAML`_, `msgpack`_ and Python's built-in `pickle` module is also supported, and if needed you can register any custom serialization scheme you want to use. By default Kombu will only load JSON messages, so if you want to use other serialization format you must explicitly enable them in your consumer by using the ``accept`` argument: .. code-block:: python Consumer(conn, [queue], accept=['json', 'pickle', 'msgpack']) The accept argument can also include MIME-types. .. _`JSON`: http://www.json.org/ .. _`YAML`: http://yaml.org/ .. _`msgpack`: https://msgpack.org/ Each option has its advantages and disadvantages. `json` -- JSON is supported in many programming languages, is a standard part of Python, and is fairly fast to decode. The primary disadvantage to `JSON` is that it limits you to the following data types: strings, Unicode, floats, boolean, dictionaries, lists, decimals, DjangoPromise, datetimes, dates, time, bytes and UUIDs. For dates, datetimes, UUIDs and bytes the serializer will generate a dict that will later instruct the deserializer how to produce the right type. Also, binary data will be transferred using Base64 encoding, which will cause the transferred data to be around 34% larger than an encoding which supports native binary types. This will only happen if the bytes object can't be decoded into utf8. However, if your data fits inside the above constraints and you need cross-language support, the default setting of `JSON` is probably your best choice. If you need support for custom types, you can write serialize/deserialize functions and register them as follows: .. code-block:: python from kombu.utils.json import register_type from django.db.models import Model from django.apps import apps # Allow serialization of django models: register_type( Model, "model", lambda o: [o._meta.label, o.pk], lambda o: apps.get_model(o[0]).objects.get(pk=o[1]), ) `pickle` -- If you have no desire to support any language other than Python, then using the `pickle` encoding will gain you the support of all built-in Python data types (except class instances), smaller messages when sending binary files, and a slight speedup over `JSON` processing. .. admonition:: Pickle and Security The pickle format is very convenient as it can serialize and deserialize almost any object, but this is also a concern for security. Carefully crafted pickle payloads can do almost anything a regular Python program can do, so if you let your consumer automatically decode pickled objects you must make sure to limit access to the broker so that untrusted parties do not have the ability to send messages! By default Kombu uses pickle protocol 4, but this can be changed using the :envvar:`PICKLE_PROTOCOL` environment variable or by changing the global :data:`kombu.serialization.pickle_protocol` flag. `yaml` -- YAML has many of the same characteristics as `json`, except that it natively supports more data types (including dates, recursive references, etc.) However, the Python libraries for YAML are a good bit slower than the libraries for JSON. If you need a more expressive set of data types and need to maintain cross-language compatibility, then `YAML` may be a better fit than the above. To instruct `Kombu` to use an alternate serialization method, use one of the following options. 1. Set the serialization option on a per-producer basis: .. code-block:: pycon >>> producer = Producer(channel, ... exchange=exchange, ... serializer='yaml') 2. Set the serialization option per message: .. code-block:: pycon >>> producer.publish(message, routing_key=rkey, ... serializer='pickle') Note that a `Consumer` do not need the serialization method specified. They can auto-detect the serialization method as the content-type is sent as a message header. .. _sending-raw-data: Sending raw data without Serialization ====================================== In some cases, you don't need your message data to be serialized. If you pass in a plain string or Unicode object as your message and a custom `content_type`, then `Kombu` will not waste cycles serializing/deserializing the data. You can optionally specify a `content_encoding` for the raw data: .. code-block:: pycon >>> with open('~/my_picture.jpg', 'rb') as fh: ... producer.publish(fh.read(), content_type='image/jpeg', content_encoding='binary', routing_key=rkey) The `Message` object returned by the `Consumer` class will have a `content_type` and `content_encoding` attribute. .. _serialization-entrypoints: Creating extensions using Setuptools entry-points ================================================= A package can also register new serializers using Setuptools entry-points. The entry-point must provide the name of the serializer along with the path to a tuple providing the rest of the args: ``encoder_function, decoder_function, content_type, content_encoding``. An example entrypoint could be: .. code-block:: python from setuptools import setup setup( entry_points={ 'kombu.serializers': [ 'my_serializer = my_module.serializer:register_args' ] } ) Then the module ``my_module.serializer`` would look like: .. code-block:: python register_args = (my_encoder, my_decoder, 'application/x-mimetype', 'utf-8') When this package is installed the new 'my_serializer' serializer will be supported by Kombu. .. admonition:: Buffer Objects The decoder function of custom serializer must support both strings and Python's old-style buffer objects. Python pickle and json modules usually don't do this via its ``loads`` function, but you can easily add support by making a wrapper around the ``load`` function that takes file objects instead of strings. Here's an example wrapping :func:`pickle.loads` in such a way: .. code-block:: python import pickle from io import BytesIO from kombu import serialization def loads(s): return pickle.load(BytesIO(s)) serialization.register( 'my_pickle', pickle.dumps, loads, content_type='application/x-pickle2', content_encoding='binary', ) kombu-5.5.3/docs/userguide/simple.rst000066400000000000000000000075141477772317200176400ustar00rootroot00000000000000.. _guide-simple: ================== Simple Interface ================== .. contents:: :local: :mod:`kombu.simple` is a simple interface to AMQP queueing. It is only slightly different from the :class:`~Queue.Queue` class in the Python Standard Library, which makes it excellent for users with basic messaging needs. Instead of defining exchanges and queues, the simple classes only requires two arguments, a connection channel and a name. The name is used as the queue, exchange and routing key. If the need arises, you can specify a :class:`~kombu.Queue` as the name argument instead. In addition, the :class:`~kombu.Connection` comes with shortcuts to create simple queues using the current connection: .. code-block:: pycon >>> queue = connection.SimpleQueue('myqueue') >>> # ... do something with queue >>> queue.close() This is equivalent to: .. code-block:: pycon >>> from kombu.simple import SimpleBuffer >>> channel = connection.channel() >>> queue = SimpleBuffer(channel, 'mybuffer') >>> # ... do something with queue >>> channel.close() >>> queue.close() .. _simple-send-receive: Sending and receiving messages ============================== The simple interface defines two classes; :class:`~kombu.simple.SimpleQueue`, and :class:`~kombu.simple.SimpleBuffer`. The former is used for persistent messages, and the latter is used for transient, buffer-like queues. They both have the same interface, so you can use them interchangeably. Here is an example using the :class:`~kombu.simple.SimpleQueue` class to produce and consume logging messages: .. code-block:: python import socket import datetime from time import time from kombu import Connection class Logger: def __init__(self, connection, queue_name='log_queue', serializer='json', compression=None): self.queue = connection.SimpleQueue(queue_name) self.serializer = serializer self.compression = compression def log(self, message, level='INFO', context={}): self.queue.put({'message': message, 'level': level, 'context': context, 'hostname': socket.gethostname(), 'timestamp': time()}, serializer=self.serializer, compression=self.compression) def process(self, callback, n=1, timeout=1): for i in xrange(n): log_message = self.queue.get(block=True, timeout=1) entry = log_message.payload # deserialized data. callback(entry) log_message.ack() # remove message from queue def close(self): self.queue.close() if __name__ == '__main__': from contextlib import closing with Connection('amqp://guest:guest@localhost:5672//') as conn: with closing(Logger(conn)) as logger: # Send message logger.log('Error happened while encoding video', level='ERROR', context={'filename': 'cutekitten.mpg'}) # Consume and process message # This is the callback called when a log message is # received. def dump_entry(entry): date = datetime.datetime.fromtimestamp(entry['timestamp']) print('[%s %s %s] %s %r' % (date, entry['hostname'], entry['level'], entry['message'], entry['context'])) # Process a single message using the callback above. logger.process(dump_entry, n=1) kombu-5.5.3/examples/000077500000000000000000000000001477772317200145005ustar00rootroot00000000000000kombu-5.5.3/examples/complete_receive.py000066400000000000000000000030161477772317200203640ustar00rootroot00000000000000""" Example of simple consumer that waits for a single message, acknowledges it and exits. """ from __future__ import annotations from pprint import pformat from kombu import Connection, Consumer, Exchange, Queue, eventloop #: By default messages sent to exchanges are persistent (delivery_mode=2), #: and queues and exchanges are durable. exchange = Exchange('kombu_demo', type='direct') queue = Queue('kombu_demo', exchange, routing_key='kombu_demo') def pretty(obj): return pformat(obj, indent=4) #: This is the callback applied when a message is received. def handle_message(body, message): print(f'Received message: {body!r}') print(f' properties:\n{pretty(message.properties)}') print(f' delivery_info:\n{pretty(message.delivery_info)}') message.ack() #: Create a connection and a channel. #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. with Connection('amqp://guest:guest@localhost:5672//') as connection: #: Create consumer using our callback and queue. #: Second argument can also be a list to consume from #: any number of queues. with Consumer(connection, queue, callbacks=[handle_message]): #: Each iteration waits for a single event. Note that this #: event may not be a message, or a message that is to be #: delivered to the consumers channel, but any event received #: on the connection. for _ in eventloop(connection): pass kombu-5.5.3/examples/complete_send.py000066400000000000000000000022261477772317200176750ustar00rootroot00000000000000""" Example producer that sends a single message and exits. You can use `complete_receive.py` to receive the message sent. """ from __future__ import annotations from kombu import Connection, Exchange, Producer, Queue #: By default messages sent to exchanges are persistent (delivery_mode=2), #: and queues and exchanges are durable. exchange = Exchange('kombu_demo', type='direct') queue = Queue('kombu_demo', exchange, routing_key='kombu_demo') with Connection('amqp://guest:guest@localhost:5672//') as connection: #: Producers are used to publish messages. #: a default exchange and routing key can also be specified #: as arguments the Producer, but we rather specify this explicitly #: at the publish call. producer = Producer(connection) #: Publish the message using the json serializer (which is the default), #: and zlib compression. The kombu consumer will automatically detect #: encoding, serialization and compression used and decode accordingly. producer.publish( {'hello': 'world'}, exchange=exchange, routing_key='kombu_demo', serializer='json', compression='zlib', ) kombu-5.5.3/examples/delayed_infra.py000066400000000000000000000017751477772317200176520ustar00rootroot00000000000000from __future__ import annotations from kombu import Connection, Exchange, Queue from kombu.transport.native_delayed_delivery import ( bind_queue_to_native_delayed_delivery_exchange, calculate_routing_key, declare_native_delayed_delivery_exchanges_and_queues, level_name) with Connection('amqp://guest:guest@localhost:5672//') as connection: declare_native_delayed_delivery_exchanges_and_queues(connection, 'quorum') channel = connection.channel() destination_exchange = Exchange('destination_exchange', type='topic') queue = Queue("destination", exchange=destination_exchange, routing_key='destination_route') queue.declare(channel=connection.channel()) bind_queue_to_native_delayed_delivery_exchange(connection, queue) with connection.Producer(channel=channel) as producer: routing_key = calculate_routing_key(30, 'destination_route') producer.publish( "delayed msg", routing_key=routing_key, exchange=level_name(27) ) kombu-5.5.3/examples/experimental/000077500000000000000000000000001477772317200171755ustar00rootroot00000000000000kombu-5.5.3/examples/experimental/async_consume.py000066400000000000000000000013521477772317200224160ustar00rootroot00000000000000#!/usr/bin/env python3 from __future__ import annotations from kombu import Connection, Consumer, Exchange, Producer, Queue from kombu.asynchronous import Hub hub = Hub() exchange = Exchange('asynt') queue = Queue('asynt', exchange, 'asynt') def send_message(conn): producer = Producer(conn) producer.publish('hello world', exchange=exchange, routing_key='asynt') print('message sent') def on_message(message): print(f'received: {message.body!r}') message.ack() hub.stop() # <-- exit after one message if __name__ == '__main__': conn = Connection('amqp://') conn.register_with_event_loop(hub) with Consumer(conn, [queue], on_message=on_message): send_message(conn) hub.run_forever() kombu-5.5.3/examples/hello_consumer.py000066400000000000000000000005011477772317200200640ustar00rootroot00000000000000from __future__ import annotations from kombu import Connection with Connection('amqp://guest:guest@localhost:5672//') as conn: simple_queue = conn.SimpleQueue('simple_queue') message = simple_queue.get(block=True, timeout=1) print(f'Received: {message.payload}') message.ack() simple_queue.close() kombu-5.5.3/examples/hello_publisher.py000066400000000000000000000005351477772317200202350ustar00rootroot00000000000000from __future__ import annotations import datetime from kombu import Connection with Connection('amqp://guest:guest@localhost:5672//') as conn: simple_queue = conn.SimpleQueue('simple_queue') message = f'helloworld, sent at {datetime.datetime.today()}' simple_queue.put(message) print(f'Sent: {message}') simple_queue.close() kombu-5.5.3/examples/memory_transport.py000066400000000000000000000013631477772317200205010ustar00rootroot00000000000000""" Example that use memory transport for message produce. """ from __future__ import annotations import time from kombu import Connection, Consumer, Exchange, Queue media_exchange = Exchange('media', 'direct') video_queue = Queue('video', exchange=media_exchange, routing_key='video') task_queues = [video_queue] def handle_message(body, message): print(f"{time.time()} RECEIVED MESSAGE: {body!r}") message.ack() connection = Connection("memory:///") consumer = Consumer(connection, task_queues, callbacks=[handle_message]) producer = connection.Producer(serializer='json') producer.publish( {"foo": "bar"}, exchange=media_exchange, routing_key='video', declare=task_queues, ) consumer.consume() connection.drain_events() kombu-5.5.3/examples/rpc-tut6/000077500000000000000000000000001477772317200161645ustar00rootroot00000000000000kombu-5.5.3/examples/rpc-tut6/rpc_client.py000066400000000000000000000026471477772317200206710ustar00rootroot00000000000000#!/usr/bin/env python3 from __future__ import annotations from kombu import Connection, Consumer, Producer, Queue, uuid class FibonacciRpcClient: def __init__(self, connection): self.connection = connection self.callback_queue = Queue(uuid(), exclusive=True, auto_delete=True) def on_response(self, message): if message.properties['correlation_id'] == self.correlation_id: self.response = message.payload['result'] def call(self, n): self.response = None self.correlation_id = uuid() with Producer(self.connection) as producer: producer.publish( {'n': n}, exchange='', routing_key='rpc_queue', declare=[self.callback_queue], reply_to=self.callback_queue.name, correlation_id=self.correlation_id, ) with Consumer(self.connection, on_message=self.on_response, queues=[self.callback_queue], no_ack=True): while self.response is None: self.connection.drain_events() return self.response def main(broker_url): connection = Connection(broker_url) fibonacci_rpc = FibonacciRpcClient(connection) print(' [x] Requesting fib(30)') response = fibonacci_rpc.call(30) print(f' [.] Got {response!r}') if __name__ == '__main__': main('pyamqp://') kombu-5.5.3/examples/rpc-tut6/rpc_server.py000066400000000000000000000024641477772317200207160ustar00rootroot00000000000000#!/usr/bin/env python3 from __future__ import annotations from kombu import Connection, Queue from kombu.mixins import ConsumerProducerMixin rpc_queue = Queue('rpc_queue') def fib(n): if n == 0: return 0 elif n == 1: return 1 else: return fib(n - 1) + fib(n - 2) class Worker(ConsumerProducerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [Consumer( queues=[rpc_queue], on_message=self.on_request, accept={'application/json'}, prefetch_count=1, )] def on_request(self, message): n = message.payload['n'] print(f' [.] fib({n})') result = fib(n) self.producer.publish( {'result': result}, exchange='', routing_key=message.properties['reply_to'], correlation_id=message.properties['correlation_id'], serializer='json', retry=True, ) message.ack() def start_worker(broker_url): connection = Connection(broker_url) print(' [x] Awaiting RPC requests') worker = Worker(connection) worker.run() if __name__ == '__main__': try: start_worker('pyamqp://') except KeyboardInterrupt: pass kombu-5.5.3/examples/simple_eventlet_receive.py000066400000000000000000000023111477772317200217500ustar00rootroot00000000000000""" Example that sends a single message and exits using the simple interface. You can use `simple_receive.py` (or `complete_receive.py`) to receive the message sent. """ from __future__ import annotations import eventlet from kombu import Connection eventlet.monkey_patch() def wait_many(timeout=1): #: Create connection #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. with Connection('amqp://guest:guest@localhost:5672//') as connection: #: SimpleQueue mimics the interface of the Python Queue module. #: First argument can either be a queue name or a kombu.Queue object. #: If a name, then the queue will be declared with the name as the #: queue name, exchange name and routing key. with connection.SimpleQueue('kombu_demo') as queue: while True: try: message = queue.get(block=False, timeout=timeout) except queue.Empty: break else: message.ack() print(message.payload) eventlet.spawn(wait_many).wait() kombu-5.5.3/examples/simple_eventlet_send.py000066400000000000000000000022351477772317200212640ustar00rootroot00000000000000""" Example that sends a single message and exits using the simple interface. You can use `simple_receive.py` (or `complete_receive.py`) to receive the message sent. """ from __future__ import annotations import eventlet from kombu import Connection eventlet.monkey_patch() def send_many(n): #: Create connection #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. with Connection('amqp://guest:guest@localhost:5672//') as connection: #: SimpleQueue mimics the interface of the Python Queue module. #: First argument can either be a queue name or a kombu.Queue object. #: If a name, then the queue will be declared with the name as the #: queue name, exchange name and routing key. with connection.SimpleQueue('kombu_demo') as queue: def send_message(i): queue.put({'hello': f'world{i}'}) pool = eventlet.GreenPool(10) for i in range(n): pool.spawn(send_message, i) pool.waitall() if __name__ == '__main__': send_many(10) kombu-5.5.3/examples/simple_receive.py000066400000000000000000000016531477772317200200520ustar00rootroot00000000000000""" Example receiving a message using the SimpleQueue interface. """ from __future__ import annotations from kombu import Connection #: Create connection #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. with Connection('amqp://guest:guest@localhost:5672//') as conn: #: SimpleQueue mimics the interface of the Python Queue module. #: First argument can either be a queue name or a kombu.Queue object. #: If a name, then the queue will be declared with the name as the queue #: name, exchange name and routing key. with conn.SimpleQueue('kombu_demo') as queue: message = queue.get(block=True, timeout=10) message.ack() print(message.payload) #### #: If you don't use the with statement then you must always # remember to close objects after use: # queue.close() # connection.close() kombu-5.5.3/examples/simple_send.py000066400000000000000000000017511477772317200173600ustar00rootroot00000000000000""" Example that sends a single message and exits using the simple interface. You can use `simple_receive.py` (or `complete_receive.py`) to receive the message sent. """ from __future__ import annotations from kombu import Connection #: Create connection #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. with Connection('amqp://guest:guest@localhost:5672//') as conn: #: SimpleQueue mimics the interface of the Python Queue module. #: First argument can either be a queue name or a kombu.Queue object. #: If a name, then the queue will be declared with the name as the queue #: name, exchange name and routing key. with conn.SimpleQueue('kombu_demo') as queue: queue.put({'hello': 'world'}, serializer='json', compression='zlib') ##### # If you don't use the with statement, you must always # remember to close objects. # queue.close() # connection.close() kombu-5.5.3/examples/simple_task_queue/000077500000000000000000000000001477772317200202175ustar00rootroot00000000000000kombu-5.5.3/examples/simple_task_queue/__init__.py000066400000000000000000000000001477772317200223160ustar00rootroot00000000000000kombu-5.5.3/examples/simple_task_queue/client.py000066400000000000000000000017421477772317200220530ustar00rootroot00000000000000from __future__ import annotations from kombu.pools import producers from .queues import task_exchange priority_to_routing_key = { 'high': 'hipri', 'mid': 'midpri', 'low': 'lopri', } def send_as_task(connection, fun, args=(), kwargs={}, priority='mid'): payload = {'fun': fun, 'args': args, 'kwargs': kwargs} routing_key = priority_to_routing_key[priority] with producers[connection].acquire(block=True) as producer: producer.publish(payload, serializer='pickle', compression='bzip2', exchange=task_exchange, declare=[task_exchange], routing_key=routing_key) if __name__ == '__main__': from kombu import Connection from .tasks import hello_task connection = Connection('amqp://guest:guest@localhost:5672//') send_as_task(connection, fun=hello_task, args=('Kombu',), kwargs={}, priority='high') kombu-5.5.3/examples/simple_task_queue/queues.py000066400000000000000000000005031477772317200220760ustar00rootroot00000000000000from __future__ import annotations from kombu import Exchange, Queue task_exchange = Exchange('tasks', type='direct') task_queues = [Queue('hipri', task_exchange, routing_key='hipri'), Queue('midpri', task_exchange, routing_key='midpri'), Queue('lopri', task_exchange, routing_key='lopri')] kombu-5.5.3/examples/simple_task_queue/tasks.py000066400000000000000000000001341477772317200217140ustar00rootroot00000000000000from __future__ import annotations def hello_task(who='world'): print(f'Hello {who}') kombu-5.5.3/examples/simple_task_queue/worker.py000066400000000000000000000023741477772317200221100ustar00rootroot00000000000000from __future__ import annotations from kombu.log import get_logger from kombu.mixins import ConsumerMixin from kombu.utils.functional import reprcall from .queues import task_queues logger = get_logger(__name__) class Worker(ConsumerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [Consumer(queues=task_queues, accept=['pickle', 'json'], callbacks=[self.process_task])] def process_task(self, body, message): fun = body['fun'] args = body['args'] kwargs = body['kwargs'] logger.info('Got task: %s', reprcall(fun.__name__, args, kwargs)) try: fun(*args, **kwargs) except Exception as exc: logger.error('task raised exception: %r', exc) message.ack() if __name__ == '__main__': from kombu import Connection from kombu.utils.debug import setup_logging # setup root logger setup_logging(loglevel='INFO', loggers=['']) with Connection('amqp://guest:guest@localhost:5672//') as conn: try: worker = Worker(conn) worker.run() except KeyboardInterrupt: print('bye bye') kombu-5.5.3/kombu/000077500000000000000000000000001477772317200137775ustar00rootroot00000000000000kombu-5.5.3/kombu/__init__.py000066400000000000000000000074731477772317200161230ustar00rootroot00000000000000"""Messaging library for Python.""" from __future__ import annotations import os import re import sys from collections import namedtuple from typing import Any, cast __version__ = '5.5.3' __author__ = 'Ask Solem' __contact__ = 'auvipy@gmail.com' __homepage__ = 'https://kombu.readthedocs.io' __docformat__ = 'restructuredtext en' # -eof meta- version_info_t = namedtuple('version_info_t', ( 'major', 'minor', 'micro', 'releaselevel', 'serial', )) # bumpversion can only search for {current_version} # so we have to parse the version here. _temp = cast(re.Match, re.match( r'(\d+)\.(\d+).(\d+)(.+)?', __version__)).groups() VERSION = version_info = version_info_t( int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '') del _temp del re STATICA_HACK = True globals()['kcah_acitats'[::-1].upper()] = False if STATICA_HACK: # pragma: no cover # This is never executed, but tricks static analyzers (PyDev, PyCharm, # pylint, etc.) into knowing the types of these symbols, and what # they contain. from kombu.common import eventloop, uuid # noqa from kombu.connection import BrokerConnection, Connection # noqa from kombu.entity import Exchange, Queue, binding # noqa from kombu.message import Message # noqa from kombu.messaging import Consumer, Producer # noqa from kombu.pools import connections, producers # noqa from kombu.serialization import disable_insecure_serializers # noqa from kombu.serialization import enable_insecure_serializers # noqa from kombu.utils.url import parse_url # noqa # Lazy loading. # - See werkzeug/__init__.py for the rationale behind this. from types import ModuleType # noqa all_by_module = { 'kombu.connection': ['Connection', 'BrokerConnection'], 'kombu.entity': ['Exchange', 'Queue', 'binding'], 'kombu.message': ['Message'], 'kombu.messaging': ['Consumer', 'Producer'], 'kombu.pools': ['connections', 'producers'], 'kombu.utils.url': ['parse_url'], 'kombu.common': ['eventloop', 'uuid'], 'kombu.serialization': [ 'enable_insecure_serializers', 'disable_insecure_serializers', ], } object_origins = {} for _module, items in all_by_module.items(): for item in items: object_origins[item] = _module class module(ModuleType): """Customized Python module.""" def __getattr__(self, name: str) -> Any: if name in object_origins: module = __import__(object_origins[name], None, None, [name]) for extra_name in all_by_module[module.__name__]: setattr(self, extra_name, getattr(module, extra_name)) return getattr(module, name) return ModuleType.__getattribute__(self, name) def __dir__(self) -> list[str]: result = list(new_module.__all__) result.extend(('__file__', '__path__', '__doc__', '__all__', '__docformat__', '__name__', '__path__', 'VERSION', '__package__', '__version__', '__author__', '__contact__', '__homepage__', '__docformat__')) return result # keep a reference to this module so that it's not garbage collected old_module = sys.modules[__name__] new_module = sys.modules[__name__] = module(__name__) new_module.__dict__.update({ '__file__': __file__, '__path__': __path__, '__doc__': __doc__, '__all__': tuple(object_origins), '__version__': __version__, '__author__': __author__, '__contact__': __contact__, '__homepage__': __homepage__, '__docformat__': __docformat__, '__package__': __package__, 'version_info_t': version_info_t, 'version_info': version_info, 'VERSION': VERSION }) if os.environ.get('KOMBU_LOG_DEBUG'): # pragma: no cover os.environ.update(KOMBU_LOG_CHANNEL='1', KOMBU_LOG_CONNECTION='1') from .utils import debug debug.setup_logging() kombu-5.5.3/kombu/abstract.py000066400000000000000000000105121477772317200161530ustar00rootroot00000000000000"""Object utilities.""" from __future__ import annotations from copy import copy from typing import TYPE_CHECKING, Any, Callable, TypeVar from .connection import maybe_channel from .exceptions import NotBoundError from .utils.functional import ChannelPromise if TYPE_CHECKING: from kombu.connection import Connection from kombu.transport.virtual import Channel __all__ = ('Object', 'MaybeChannelBound') _T = TypeVar("_T") _ObjectType = TypeVar("_ObjectType", bound="Object") _MaybeChannelBoundType = TypeVar( "_MaybeChannelBoundType", bound="MaybeChannelBound" ) def unpickle_dict( cls: type[_ObjectType], kwargs: dict[str, Any] ) -> _ObjectType: return cls(**kwargs) def _any(v: _T) -> _T: return v class Object: """Common base class. Supports automatic kwargs->attributes handling, and cloning. """ attrs: tuple[tuple[str, Any], ...] = () def __init__(self, *args: Any, **kwargs: Any) -> None: for name, type_ in self.attrs: value = kwargs.get(name) if value is not None: setattr(self, name, (type_ or _any)(value)) else: try: getattr(self, name) except AttributeError: setattr(self, name, None) def as_dict(self, recurse: bool = False) -> dict[str, Any]: def f(obj: Any, type: Callable[[Any], Any] | None = None) -> Any: if recurse and isinstance(obj, Object): return obj.as_dict(recurse=True) return type(obj) if type and obj is not None else obj return { attr: f(getattr(self, attr), type) for attr, type in self.attrs } def __reduce__(self: _ObjectType) -> tuple[ Callable[[type[_ObjectType], dict[str, Any]], _ObjectType], tuple[type[_ObjectType], dict[str, Any]] ]: return unpickle_dict, (self.__class__, self.as_dict()) def __copy__(self: _ObjectType) -> _ObjectType: return self.__class__(**self.as_dict()) class MaybeChannelBound(Object): """Mixin for classes that can be bound to an AMQP channel.""" _channel: Channel | None = None _is_bound = False #: Defines whether maybe_declare can skip declaring this entity twice. can_cache_declaration = False def __call__( self: _MaybeChannelBoundType, channel: (Channel | Connection) ) -> _MaybeChannelBoundType: """`self(channel) -> self.bind(channel)`.""" return self.bind(channel) def bind( self: _MaybeChannelBoundType, channel: (Channel | Connection) ) -> _MaybeChannelBoundType: """Create copy of the instance that is bound to a channel.""" return copy(self).maybe_bind(channel) def maybe_bind( self: _MaybeChannelBoundType, channel: (Channel | Connection) ) -> _MaybeChannelBoundType: """Bind instance to channel if not already bound.""" if not self.is_bound and channel: self._channel = maybe_channel(channel) self.when_bound() self._is_bound = True return self def revive(self, channel: Channel) -> None: """Revive channel after the connection has been re-established. Used by :meth:`~kombu.Connection.ensure`. """ if self.is_bound: self._channel = channel self.when_bound() def when_bound(self) -> None: """Callback called when the class is bound.""" def __repr__(self) -> str: return self._repr_entity(type(self).__name__) def _repr_entity(self, item: str = '') -> str: item = item or type(self).__name__ if self.is_bound: return '<{} bound to chan:{}>'.format( item or type(self).__name__, self.channel.channel_id) return f'' @property def is_bound(self) -> bool: """Flag set if the channel is bound.""" return self._is_bound and self._channel is not None @property def channel(self) -> Channel: """Current channel if the object is bound.""" channel = self._channel if channel is None: raise NotBoundError( "Can't call method on {} not bound to a channel".format( type(self).__name__)) if isinstance(channel, ChannelPromise): channel = self._channel = channel() return channel kombu-5.5.3/kombu/asynchronous/000077500000000000000000000000001477772317200165325ustar00rootroot00000000000000kombu-5.5.3/kombu/asynchronous/__init__.py000066400000000000000000000003551477772317200206460ustar00rootroot00000000000000"""Event loop.""" from __future__ import annotations from kombu.utils.eventio import ERR, READ, WRITE from .hub import Hub, get_event_loop, set_event_loop __all__ = ('READ', 'WRITE', 'ERR', 'Hub', 'get_event_loop', 'set_event_loop') kombu-5.5.3/kombu/asynchronous/aws/000077500000000000000000000000001477772317200173245ustar00rootroot00000000000000kombu-5.5.3/kombu/asynchronous/aws/__init__.py000066400000000000000000000007331477772317200214400ustar00rootroot00000000000000from __future__ import annotations from typing import Any from kombu.asynchronous.aws.sqs.connection import AsyncSQSConnection def connect_sqs( aws_access_key_id: str | None = None, aws_secret_access_key: str | None = None, **kwargs: Any ) -> AsyncSQSConnection: """Return async connection to Amazon SQS.""" from .sqs.connection import AsyncSQSConnection return AsyncSQSConnection( aws_access_key_id, aws_secret_access_key, **kwargs ) kombu-5.5.3/kombu/asynchronous/aws/connection.py000066400000000000000000000212631477772317200220410ustar00rootroot00000000000000"""Amazon AWS Connection.""" from __future__ import annotations from email import message_from_bytes from email.mime.message import MIMEMessage from vine import promise, transform from kombu.asynchronous.aws.ext import AWSRequest, get_cert_path, get_response from kombu.asynchronous.http import Headers, Request, get_client def message_from_headers(hdr): bs = "\r\n".join("{}: {}".format(*h) for h in hdr) return message_from_bytes(bs.encode()) __all__ = ( 'AsyncHTTPSConnection', 'AsyncConnection', ) class AsyncHTTPResponse: """Async HTTP Response.""" def __init__(self, response): self.response = response self._msg = None self.version = 10 def read(self, *args, **kwargs): return self.response.body def getheader(self, name, default=None): return self.response.headers.get(name, default) def getheaders(self): return list(self.response.headers.items()) @property def msg(self): if self._msg is None: self._msg = MIMEMessage(message_from_headers(self.getheaders())) return self._msg @property def status(self): return self.response.code @property def reason(self): if self.response.error: return self.response.error.message return '' def __repr__(self): return repr(self.response) class AsyncHTTPSConnection: """Async HTTP Connection.""" Request = Request Response = AsyncHTTPResponse method = 'GET' path = '/' body = None default_ports = {'http': 80, 'https': 443} def __init__(self, strict=None, timeout=20.0, http_client=None): self.headers = [] self.timeout = timeout self.strict = strict self.http_client = http_client or get_client() def request(self, method, path, body=None, headers=None): self.path = path self.method = method if body is not None: try: read = body.read except AttributeError: self.body = body else: self.body = read() if headers is not None: self.headers.extend(list(headers.items())) def getrequest(self): headers = Headers(self.headers) return self.Request(self.path, method=self.method, headers=headers, body=self.body, connect_timeout=self.timeout, request_timeout=self.timeout, validate_cert=True, ca_certs=get_cert_path(True)) def getresponse(self, callback=None): request = self.getrequest() request.then(transform(self.Response, callback)) return self.http_client.add_request(request) def set_debuglevel(self, level): pass def connect(self): pass def close(self): pass def putrequest(self, method, path): self.method = method self.path = path def putheader(self, header, value): self.headers.append((header, value)) def endheaders(self): pass def send(self, data): if self.body: self.body += data else: self.body = data def __repr__(self): return f'' class AsyncConnection: """Async AWS Connection.""" def __init__(self, sqs_connection, http_client=None, **kwargs): self.sqs_connection = sqs_connection self._httpclient = http_client or get_client() def get_http_connection(self): return AsyncHTTPSConnection(http_client=self._httpclient) def _mexe(self, request, sender=None, callback=None): callback = callback or promise() conn = self.get_http_connection() if callable(sender): sender(conn, request.method, request.path, request.body, request.headers, callback) else: conn.request(request.method, request.url, request.body, request.headers) conn.getresponse(callback=callback) return callback class AsyncAWSQueryConnection(AsyncConnection): """Async AWS Query Connection.""" STATUS_CODE_OK = 200 STATUS_CODE_REQUEST_TIMEOUT = 408 STATUS_CODE_NETWORK_CONNECT_TIMEOUT_ERROR = 599 STATUS_CODE_INTERNAL_ERROR = 500 STATUS_CODE_BAD_GATEWAY = 502 STATUS_CODE_SERVICE_UNAVAILABLE_ERROR = 503 STATUS_CODE_GATEWAY_TIMEOUT = 504 STATUS_CODES_SERVER_ERRORS = ( STATUS_CODE_INTERNAL_ERROR, STATUS_CODE_BAD_GATEWAY, STATUS_CODE_SERVICE_UNAVAILABLE_ERROR ) STATUS_CODES_TIMEOUT = ( STATUS_CODE_REQUEST_TIMEOUT, STATUS_CODE_NETWORK_CONNECT_TIMEOUT_ERROR, STATUS_CODE_GATEWAY_TIMEOUT ) def __init__(self, sqs_connection, http_client=None, http_client_params=None, **kwargs): if not http_client_params: http_client_params = {} super().__init__(sqs_connection, http_client, **http_client_params) def make_request(self, operation, params_, path, verb, callback=None, protocol_params=None): params = params_.copy() params.update((protocol_params or {}).get('query', {})) if operation: params['Action'] = operation signer = self.sqs_connection._request_signer # defaults for non-get signing_type = 'standard' param_payload = {'data': params} if verb.lower() == 'get': # query-based opts signing_type = 'presign-url' param_payload = {'params': params} request = AWSRequest(method=verb, url=path, **param_payload) signer.sign(operation, request, signing_type=signing_type) prepared_request = request.prepare() return self._mexe(prepared_request, callback=callback) def get_list(self, operation, params, markers, path='/', parent=None, verb='POST', callback=None, protocol_params=None): return self.make_request( operation, params, path, verb, callback=transform( self._on_list_ready, callback, parent or self, markers, operation ), protocol_params=protocol_params, ) def get_object(self, operation, params, path='/', parent=None, verb='GET', callback=None, protocol_params=None): return self.make_request( operation, params, path, verb, callback=transform( self._on_obj_ready, callback, parent or self, operation ), protocol_params=protocol_params, ) def get_status(self, operation, params, path='/', parent=None, verb='GET', callback=None, protocol_params=None): return self.make_request( operation, params, path, verb, callback=transform( self._on_status_ready, callback, parent or self, operation ), protocol_params=protocol_params, ) def _on_list_ready(self, parent, markers, operation, response): service_model = self.sqs_connection.meta.service_model if response.status == self.STATUS_CODE_OK: _, parsed = get_response( service_model.operation_model(operation), response.response ) return parsed elif ( response.status in self.STATUS_CODES_TIMEOUT or response.status in self.STATUS_CODES_SERVER_ERRORS ): # When the server returns a timeout or 50X server error, # the response is interpreted as an empty list. # This prevents hanging the Celery worker. return [] else: raise self._for_status(response, response.read()) def _on_obj_ready(self, parent, operation, response): service_model = self.sqs_connection.meta.service_model if response.status == self.STATUS_CODE_OK: _, parsed = get_response( service_model.operation_model(operation), response.response ) return parsed else: raise self._for_status(response, response.read()) def _on_status_ready(self, parent, operation, response): service_model = self.sqs_connection.meta.service_model if response.status == self.STATUS_CODE_OK: httpres, _ = get_response( service_model.operation_model(operation), response.response ) return httpres.code else: raise self._for_status(response, response.read()) def _for_status(self, response, body): context = 'Empty body' if not body else 'HTTP Error' return Exception("Request {} HTTP {} {} ({})".format( context, response.status, response.reason, body )) kombu-5.5.3/kombu/asynchronous/aws/ext.py000066400000000000000000000011531477772317200204760ustar00rootroot00000000000000"""Amazon boto3 interface.""" from __future__ import annotations try: import boto3 from botocore import exceptions from botocore.awsrequest import AWSRequest from botocore.httpsession import get_cert_path from botocore.response import get_response except ImportError: boto3 = None class _void: pass class BotoCoreError(Exception): pass exceptions = _void() exceptions.BotoCoreError = BotoCoreError AWSRequest = _void() get_response = _void() get_cert_path = _void() __all__ = ( 'exceptions', 'AWSRequest', 'get_response', 'get_cert_path', ) kombu-5.5.3/kombu/asynchronous/aws/sqs/000077500000000000000000000000001477772317200201325ustar00rootroot00000000000000kombu-5.5.3/kombu/asynchronous/aws/sqs/__init__.py000066400000000000000000000000001477772317200222310ustar00rootroot00000000000000kombu-5.5.3/kombu/asynchronous/aws/sqs/connection.py000066400000000000000000000261031477772317200226450ustar00rootroot00000000000000"""Amazon SQS Connection.""" from __future__ import annotations import json from botocore.serialize import Serializer from vine import transform from kombu.asynchronous.aws.connection import AsyncAWSQueryConnection from kombu.asynchronous.aws.ext import AWSRequest from .ext import boto3 from .message import AsyncMessage from .queue import AsyncQueue __all__ = ('AsyncSQSConnection',) class AsyncSQSConnection(AsyncAWSQueryConnection): """Async SQS Connection.""" def __init__(self, sqs_connection, debug=0, region=None, fetch_message_attributes=None, **kwargs): if boto3 is None: raise ImportError('boto3 is not installed') super().__init__( sqs_connection, region_name=region, debug=debug, **kwargs ) self.fetch_message_attributes = ( fetch_message_attributes if fetch_message_attributes is not None else ["ApproximateReceiveCount"] ) def _create_query_request(self, operation, params, queue_url, method): params = params.copy() if operation: params['Action'] = operation # defaults for non-get param_payload = {'data': params} headers = {} if method.lower() == 'get': # query-based opts param_payload = {'params': params} if method.lower() == 'post': headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8' return AWSRequest(method=method, url=queue_url, headers=headers, **param_payload) def _create_json_request(self, operation, params, queue_url): params = params.copy() params['QueueUrl'] = queue_url service_model = self.sqs_connection.meta.service_model operation_model = service_model.operation_model(operation) url = self.sqs_connection._endpoint.host headers = {} # Content-Type json_version = operation_model.metadata['jsonVersion'] content_type = f'application/x-amz-json-{json_version}' headers['Content-Type'] = content_type # X-Amz-Target target = '{}.{}'.format( operation_model.metadata['targetPrefix'], operation_model.name, ) headers['X-Amz-Target'] = target param_payload = { 'data': json.dumps(params).encode(), 'headers': headers } method = operation_model.http.get('method', Serializer.DEFAULT_METHOD) return AWSRequest( method=method, url=url, **param_payload ) def make_request(self, operation_name, params, queue_url, verb, callback=None, protocol_params=None): """Override make_request to support different protocols. botocore has changed the default protocol of communicating with SQS backend from 'query' to 'json', so we need a special implementation of make_request for SQS. More information on this can be found in: https://github.com/celery/kombu/pull/1807. protocol_params: Optional[dict[str, dict]] of per-protocol additional parameters. Supported for the SQS query to json protocol transition. """ signer = self.sqs_connection._request_signer service_model = self.sqs_connection.meta.service_model protocol = service_model.protocol all_params = {**(params or {}), **protocol_params.get(protocol, {})} if protocol == 'query': request = self._create_query_request( operation_name, all_params, queue_url, verb) elif protocol == 'json': request = self._create_json_request( operation_name, all_params, queue_url) else: raise Exception(f'Unsupported protocol: {protocol}.') signing_type = 'presign-url' if request.method.lower() == 'get' \ else 'standard' signer.sign(operation_name, request, signing_type=signing_type) prepared_request = request.prepare() return self._mexe(prepared_request, callback=callback) def create_queue(self, queue_name, visibility_timeout=None, callback=None): params = {'QueueName': queue_name} if visibility_timeout: params['DefaultVisibilityTimeout'] = format( visibility_timeout, 'd', ) return self.get_object('CreateQueue', params, callback=callback) def delete_queue(self, queue, force_deletion=False, callback=None): return self.get_status('DeleteQueue', None, queue.id, callback=callback) def get_queue_url(self, queue): res = self.sqs_connection.get_queue_url(QueueName=queue) return res['QueueUrl'] def get_queue_attributes(self, queue, attribute='All', callback=None): return self.get_object( 'GetQueueAttributes', {'AttributeName': attribute}, queue.id, callback=callback, ) def set_queue_attribute(self, queue, attribute, value, callback=None): return self.get_status( 'SetQueueAttribute', {}, queue.id, callback=callback, protocol_params={ 'json': {'Attributes': {attribute: value}}, 'query': {'Attribute.Name': attribute, 'Attribute.Value': value}, }, ) def receive_message( self, queue, queue_url, number_messages=1, visibility_timeout=None, attributes=None, wait_time_seconds=None, callback=None ): params = {'MaxNumberOfMessages': number_messages} proto_params = {'query': {}, 'json': {}} attrs = attributes if attributes is not None else self.fetch_message_attributes if visibility_timeout: params['VisibilityTimeout'] = visibility_timeout if attrs: proto_params['json'].update({'AttributeNames': list(attrs)}) proto_params['query'].update(_query_object_encode({'AttributeName': list(attrs)})) if wait_time_seconds is not None: params['WaitTimeSeconds'] = wait_time_seconds return self.get_list( 'ReceiveMessage', params, [('Message', AsyncMessage)], queue_url, callback=callback, parent=queue, protocol_params=proto_params, ) def delete_message(self, queue, receipt_handle, callback=None): return self.delete_message_from_handle( queue, receipt_handle, callback, ) def delete_message_batch(self, queue, messages, callback=None): p_params = { 'json': { 'Entries': [{'Id': m.id, 'ReceiptHandle': m.receipt_handle} for m in messages], }, 'query': _query_object_encode({ 'DeleteMessageBatchRequestEntry': [ {'Id': m.id, 'ReceiptHandle': m.receipt_handle} for m in messages ], }), } return self.get_object( 'DeleteMessageBatch', {}, queue.id, verb='POST', callback=callback, protocol_params=p_params, ) def delete_message_from_handle(self, queue, receipt_handle, callback=None): return self.get_status( 'DeleteMessage', {'ReceiptHandle': receipt_handle}, queue, callback=callback, ) def send_message(self, queue, message_content, delay_seconds=None, callback=None): params = {'MessageBody': message_content} if delay_seconds: params['DelaySeconds'] = int(delay_seconds) return self.get_object( 'SendMessage', params, queue.id, verb='POST', callback=callback, ) def send_message_batch(self, queue, messages, callback=None): params = {} for i, msg in enumerate(messages): prefix = f'SendMessageBatchRequestEntry.{i + 1}' params.update({ f'{prefix}.Id': msg[0], f'{prefix}.MessageBody': msg[1], f'{prefix}.DelaySeconds': msg[2], }) return self.get_object( 'SendMessageBatch', params, queue.id, verb='POST', callback=callback, ) def change_message_visibility(self, queue, receipt_handle, visibility_timeout, callback=None): return self.get_status( 'ChangeMessageVisibility', {'ReceiptHandle': receipt_handle, 'VisibilityTimeout': visibility_timeout}, queue.id, callback=callback, ) def change_message_visibility_batch(self, queue, messages, callback=None): entries = [ {'Id': t[0].id, 'ReceiptHandle': t[0].receipt_handle, 'VisibilityTimeout': t[1]} for t in messages ] p_params = { 'json': {'Entries': entries}, 'query': _query_object_encode({'ChangeMessageVisibilityBatchRequestEntry': entries}), } return self.get_object( 'ChangeMessageVisibilityBatch', {}, queue.id, verb='POST', callback=callback, protocol_params=p_params, ) def get_all_queues(self, prefix='', callback=None): params = {} if prefix: params['QueueNamePrefix'] = prefix return self.get_list( 'ListQueues', params, [('QueueUrl', AsyncQueue)], callback=callback, ) def get_queue(self, queue_name, callback=None): # TODO Does not support owner_acct_id argument return self.get_all_queues( queue_name, transform(self._on_queue_ready, callback, queue_name), ) lookup = get_queue def _on_queue_ready(self, name, queues): return next( (q for q in queues if q.url.endswith(name)), None, ) def get_dead_letter_source_queues(self, queue, callback=None): return self.get_list( 'ListDeadLetterSourceQueues', {'QueueUrl': queue.url}, [('QueueUrl', AsyncQueue)], callback=callback, ) def add_permission(self, queue, label, aws_account_id, action_name, callback=None): return self.get_status( 'AddPermission', {'Label': label, 'AWSAccountId': aws_account_id, 'ActionName': action_name}, queue.id, callback=callback, ) def remove_permission(self, queue, label, callback=None): return self.get_status( 'RemovePermission', {'Label': label}, queue.id, callback=callback, ) def _query_object_encode(items): params = {} _query_object_encode_part(params, '', items) return {k: v for k, v in params.items()} def _query_object_encode_part(params, prefix, part): dotted = f'{prefix}.' if prefix else prefix if isinstance(part, (list, tuple)): for i, item in enumerate(part): _query_object_encode_part(params, f'{dotted}{i + 1}', item) elif isinstance(part, dict): for key, value in part.items(): _query_object_encode_part(params, f'{dotted}{key}', value) else: params[prefix] = str(part) kombu-5.5.3/kombu/asynchronous/aws/sqs/ext.py000066400000000000000000000002031477772317200212770ustar00rootroot00000000000000"""Amazon SQS boto3 interface.""" from __future__ import annotations try: import boto3 except ImportError: boto3 = None kombu-5.5.3/kombu/asynchronous/aws/sqs/message.py000066400000000000000000000015741477772317200221370ustar00rootroot00000000000000"""Amazon SQS message implementation.""" from __future__ import annotations import base64 from kombu.message import Message from kombu.utils.encoding import str_to_bytes class BaseAsyncMessage(Message): """Base class for messages received on async client.""" class AsyncRawMessage(BaseAsyncMessage): """Raw Message.""" class AsyncMessage(BaseAsyncMessage): """Serialized message.""" def encode(self, value): """Encode/decode the value using Base64 encoding.""" return base64.b64encode(str_to_bytes(value)).decode() def __getitem__(self, item): """Support Boto3-style access on a message.""" if item == 'ReceiptHandle': return self.receipt_handle elif item == 'Body': return self.get_body() elif item == 'queue': return self.queue else: raise KeyError(item) kombu-5.5.3/kombu/asynchronous/aws/sqs/queue.py000066400000000000000000000104621477772317200216330ustar00rootroot00000000000000"""Amazon SQS queue implementation.""" from __future__ import annotations from vine import transform from .message import AsyncMessage _all__ = ['AsyncQueue'] def list_first(rs): """Get the first item in a list, or None if list empty.""" return rs[0] if len(rs) == 1 else None class AsyncQueue: """Async SQS Queue.""" def __init__(self, connection=None, url=None, message_class=AsyncMessage): self.connection = connection self.url = url self.message_class = message_class self.visibility_timeout = None def _NA(self, *args, **kwargs): raise NotImplementedError() count_slow = dump = save_to_file = save_to_filename = save = \ save_to_s3 = load_from_s3 = load_from_file = load_from_filename = \ load = clear = _NA def get_attributes(self, attributes='All', callback=None): return self.connection.get_queue_attributes( self, attributes, callback, ) def set_attribute(self, attribute, value, callback=None): return self.connection.set_queue_attribute( self, attribute, value, callback, ) def get_timeout(self, callback=None, _attr='VisibilityTimeout'): return self.get_attributes( _attr, transform( self._coerce_field_value, callback, _attr, int, ), ) def _coerce_field_value(self, key, type, response): return type(response[key]) def set_timeout(self, visibility_timeout, callback=None): return self.set_attribute( 'VisibilityTimeout', visibility_timeout, transform( self._on_timeout_set, callback, ) ) def _on_timeout_set(self, visibility_timeout): if visibility_timeout: self.visibility_timeout = visibility_timeout return self.visibility_timeout def add_permission(self, label, aws_account_id, action_name, callback=None): return self.connection.add_permission( self, label, aws_account_id, action_name, callback, ) def remove_permission(self, label, callback=None): return self.connection.remove_permission(self, label, callback) def read(self, visibility_timeout=None, wait_time_seconds=None, callback=None): return self.get_messages( 1, visibility_timeout, wait_time_seconds=wait_time_seconds, callback=transform(list_first, callback), ) def write(self, message, delay_seconds=None, callback=None): return self.connection.send_message( self, message.get_body_encoded(), delay_seconds, callback=transform(self._on_message_sent, callback, message), ) def write_batch(self, messages, callback=None): return self.connection.send_message_batch( self, messages, callback=callback, ) def _on_message_sent(self, orig_message, new_message): orig_message.id = new_message.id orig_message.md5 = new_message.md5 return new_message def get_messages(self, num_messages=1, visibility_timeout=None, attributes=None, wait_time_seconds=None, callback=None): return self.connection.receive_message( self, number_messages=num_messages, visibility_timeout=visibility_timeout, attributes=attributes, wait_time_seconds=wait_time_seconds, callback=callback, ) def delete_message(self, message, callback=None): return self.connection.delete_message(self, message, callback) def delete_message_batch(self, messages, callback=None): return self.connection.delete_message_batch( self, messages, callback=callback, ) def change_message_visibility_batch(self, messages, callback=None): return self.connection.change_message_visibility_batch( self, messages, callback=callback, ) def delete(self, callback=None): return self.connection.delete_queue(self, callback=callback) def count(self, page_size=10, vtimeout=10, callback=None, _attr='ApproximateNumberOfMessages'): return self.get_attributes( _attr, callback=transform( self._coerce_field_value, callback, _attr, int, ), ) kombu-5.5.3/kombu/asynchronous/debug.py000066400000000000000000000033551477772317200202000ustar00rootroot00000000000000"""Event-loop debugging tools.""" from __future__ import annotations from kombu.utils.eventio import ERR, READ, WRITE from kombu.utils.functional import reprcall def repr_flag(flag): """Return description of event loop flag.""" return '{}{}{}'.format('R' if flag & READ else '', 'W' if flag & WRITE else '', '!' if flag & ERR else '') def _rcb(obj): if obj is None: return '' if isinstance(obj, str): return obj if isinstance(obj, tuple): cb, args = obj return reprcall(cb.__name__, args=args) return obj.__name__ def repr_active(h): """Return description of active readers and writers.""" return ', '.join(repr_readers(h) + repr_writers(h)) def repr_events(h, events): """Return description of events returned by poll.""" return ', '.join( '{}({})->{}'.format( _rcb(callback_for(h, fd, fl, '(GONE)')), fd, repr_flag(fl), ) for fd, fl in events ) def repr_readers(h): """Return description of pending readers.""" return [f'({fd}){_rcb(cb)}->{repr_flag(READ | ERR)}' for fd, cb in h.readers.items()] def repr_writers(h): """Return description of pending writers.""" return [f'({fd}){_rcb(cb)}->{repr_flag(WRITE)}' for fd, cb in h.writers.items()] def callback_for(h, fd, flag, *default): """Return the callback used for hub+fd+flag.""" try: if flag & READ: return h.readers[fd] if flag & WRITE: if fd in h.consolidate: return h.consolidate_callback return h.writers[fd] except KeyError: if default: return default[0] raise kombu-5.5.3/kombu/asynchronous/http/000077500000000000000000000000001477772317200175115ustar00rootroot00000000000000kombu-5.5.3/kombu/asynchronous/http/__init__.py000066400000000000000000000014341477772317200216240ustar00rootroot00000000000000from __future__ import annotations from kombu.asynchronous import get_event_loop from kombu.asynchronous.http.base import BaseClient, Headers, Request, Response from kombu.asynchronous.hub import Hub __all__ = ('Client', 'Headers', 'Response', 'Request', 'get_client') def Client(hub: Hub | None = None, **kwargs: int) -> BaseClient: """Create new HTTP client.""" from .urllib3_client import Urllib3Client return Urllib3Client(hub, **kwargs) def get_client(hub: Hub | None = None, **kwargs: int) -> BaseClient: """Get or create HTTP client bound to the current event loop.""" hub = hub or get_event_loop() try: return hub._current_http_client except AttributeError: client = hub._current_http_client = Client(hub, **kwargs) return client kombu-5.5.3/kombu/asynchronous/http/base.py000066400000000000000000000231031477772317200207740ustar00rootroot00000000000000"""Base async HTTP client implementation.""" from __future__ import annotations import sys from http.client import responses from typing import TYPE_CHECKING from vine import Thenable, maybe_promise, promise from kombu.exceptions import HttpError from kombu.utils.compat import coro from kombu.utils.encoding import bytes_to_str from kombu.utils.functional import maybe_list, memoize if TYPE_CHECKING: from types import TracebackType __all__ = ('Headers', 'Response', 'Request', 'BaseClient') PYPY = hasattr(sys, 'pypy_version_info') @memoize(maxsize=1000) def normalize_header(key): return '-'.join(p.capitalize() for p in key.split('-')) class Headers(dict): """Represents a mapping of HTTP headers.""" # TODO: This is just a regular dict and will not perform normalization # when looking up keys etc. #: Set when all of the headers have been read. complete = False #: Internal attribute used to keep track of continuation lines. _prev_key = None @Thenable.register class Request: """A HTTP Request. Arguments: --------- url (str): The URL to request. method (str): The HTTP method to use (defaults to ``GET``). Keyword Arguments: ----------------- headers (Dict, ~kombu.asynchronous.http.Headers): Optional headers for this request body (str): Optional body for this request. connect_timeout (float): Connection timeout in float seconds Default is 30.0. timeout (float): Time in float seconds before the request times out Default is 30.0. follow_redirects (bool): Specify if the client should follow redirects Enabled by default. max_redirects (int): Maximum number of redirects (default 6). use_gzip (bool): Allow the server to use gzip compression. Enabled by default. validate_cert (bool): Set to true if the server certificate should be verified when performing ``https://`` requests. Enabled by default. auth_username (str): Username for HTTP authentication. auth_password (str): Password for HTTP authentication. auth_mode (str): Type of HTTP authentication (``basic`` or ``digest``). user_agent (str): Custom user agent for this request. network_interface (str): Network interface to use for this request. on_ready (Callable): Callback to be called when the response has been received. Must accept single ``response`` argument. on_stream (Callable): Optional callback to be called every time body content has been read from the socket. If specified then the response body and buffer attributes will not be available. on_timeout (callable): Optional callback to be called if the request times out. on_header (Callable): Optional callback to be called for every header line received from the server. The signature is ``(headers, line)`` and note that if you want ``response.headers`` to be populated then your callback needs to also call ``client.on_header(headers, line)``. on_prepare (Callable): Optional callback that is implementation specific (e.g. curl client will pass the ``curl`` instance to this callback). proxy_host (str): Optional proxy host. Note that a ``proxy_port`` must also be provided or a :exc:`ValueError` will be raised. proxy_username (str): Optional username to use when logging in to the proxy. proxy_password (str): Optional password to use when authenticating with the proxy server. ca_certs (str): Custom CA certificates file to use. client_key (str): Optional filename for client SSL key. client_cert (str): Optional filename for client SSL certificate. """ body = user_agent = network_interface = \ auth_username = auth_password = auth_mode = \ proxy_host = proxy_port = proxy_username = proxy_password = \ ca_certs = client_key = client_cert = None connect_timeout = 30.0 request_timeout = 30.0 follow_redirects = True max_redirects = 6 use_gzip = True validate_cert = True if not PYPY: # pragma: no cover __slots__ = ('url', 'method', 'on_ready', 'on_timeout', 'on_stream', 'on_prepare', 'on_header', 'headers', '__weakref__', '__dict__') def __init__(self, url, method='GET', on_ready=None, on_timeout=None, on_stream=None, on_prepare=None, on_header=None, headers=None, **kwargs): self.url = url self.method = method or self.method self.on_ready = maybe_promise(on_ready) or promise() self.on_timeout = maybe_promise(on_timeout) self.on_stream = maybe_promise(on_stream) self.on_prepare = maybe_promise(on_prepare) self.on_header = maybe_promise(on_header) if kwargs: for k, v in kwargs.items(): setattr(self, k, v) if not isinstance(headers, Headers): headers = Headers(headers or {}) self.headers = headers def then(self, callback, errback=None): self.on_ready.then(callback, errback) def __repr__(self): return ''.format(self) class Response: """HTTP Response. Arguments --------- request (~kombu.asynchronous.http.Request): See :attr:`request`. code (int): See :attr:`code`. headers (~kombu.asynchronous.http.Headers): See :attr:`headers`. buffer (bytes): See :attr:`buffer` effective_url (str): See :attr:`effective_url`. status (str): See :attr:`status`. Attributes ---------- request (~kombu.asynchronous.http.Request): object used to get this response. code (int): HTTP response code (e.g. 200, 404, or 500). headers (~kombu.asynchronous.http.Headers): HTTP headers for this response. buffer (bytes): Socket read buffer. effective_url (str): The destination url for this request after following redirects. error (Exception): Error instance if the request resulted in a HTTP error code. status (str): Human equivalent of :attr:`code`, e.g. ``OK``, `Not found`, or 'Internal Server Error'. """ if not PYPY: # pragma: no cover __slots__ = ('request', 'code', 'headers', 'buffer', 'effective_url', 'error', 'status', '_body', '__weakref__') def __init__(self, request, code, headers=None, buffer=None, effective_url=None, error=None, status=None): self.request = request self.code = code self.headers = headers if headers is not None else Headers() self.buffer = buffer self.effective_url = effective_url or request.url self._body = None self.status = status or responses.get(self.code, 'Unknown') self.error = error if self.error is None and (self.code < 200 or self.code > 299): self.error = HttpError(self.code, self.status, self) def raise_for_error(self): """Raise if the request resulted in an HTTP error code. Raises ------ :class:`~kombu.exceptions.HttpError` """ if self.error: raise self.error @property def body(self): """The full contents of the response body. Note: ---- Accessing this property will evaluate the buffer and subsequent accesses will be cached. """ if self._body is None: if self.buffer is not None: self._body = self.buffer.getvalue() return self._body # these are for compatibility with Requests @property def status_code(self): return self.code @property def content(self): return self.body @coro def header_parser(keyt=normalize_header): while 1: (line, headers) = yield if line.startswith('HTTP/'): continue elif not line: headers.complete = True continue elif line[0].isspace(): pkey = headers._prev_key headers[pkey] = ' '.join([headers.get(pkey) or '', line.lstrip()]) else: key, value = line.split(':', 1) key = headers._prev_key = keyt(key) headers[key] = value.strip() class BaseClient: """Base class for HTTP clients. This class provides the basic structure and functionality for HTTP clients. Subclasses should implement specific HTTP client behavior. """ Headers = Headers Request = Request Response = Response def __init__(self, hub, **kwargs): self.hub = hub self._header_parser = header_parser() def perform(self, request, **kwargs): for req in maybe_list(request) or []: if not isinstance(req, self.Request): req = self.Request(req, **kwargs) self.add_request(req) def add_request(self, request): raise NotImplementedError('must implement add_request') def close(self): pass def on_header(self, headers, line): try: self._header_parser.send((bytes_to_str(line), headers)) except StopIteration: self._header_parser = header_parser() def __enter__(self): return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: self.close() kombu-5.5.3/kombu/asynchronous/http/urllib3_client.py000066400000000000000000000145051477772317200230020ustar00rootroot00000000000000from __future__ import annotations from collections import deque from io import BytesIO import urllib3 from kombu.asynchronous.hub import Hub, get_event_loop from kombu.exceptions import HttpError from .base import BaseClient, Request __all__ = ('Urllib3Client',) from ...utils.encoding import bytes_to_str DEFAULT_USER_AGENT = 'Mozilla/5.0 (compatible; urllib3)' EXTRA_METHODS = frozenset(['DELETE', 'OPTIONS', 'PATCH']) def _get_pool_key_parts(request: Request) -> list[str]: _pool_key_parts = [] if request.network_interface: _pool_key_parts.append(f"interface={request.network_interface}") if request.validate_cert: _pool_key_parts.append("validate_cert=True") else: _pool_key_parts.append("validate_cert=False") if request.ca_certs: _pool_key_parts.append(f"ca_certs={request.ca_certs}") if request.client_cert: _pool_key_parts.append(f"client_cert={request.client_cert}") if request.client_key: _pool_key_parts.append(f"client_key={request.client_key}") return _pool_key_parts class Urllib3Client(BaseClient): """Urllib3 HTTP Client.""" _pools = {} def __init__(self, hub: Hub | None = None, max_clients: int = 10): hub = hub or get_event_loop() super().__init__(hub) self.max_clients = max_clients self._pending = deque() self._timeout_check_tref = self.hub.call_repeatedly( 1.0, self._timeout_check, ) def pools_close(self): for pool in self._pools.values(): pool.close() self._pools.clear() def close(self): self._timeout_check_tref.cancel() self.pools_close() def add_request(self, request): self._pending.append(request) self._process_queue() return request def get_pool(self, request: Request): _pool_key_parts = _get_pool_key_parts(request=request) _proxy_url = None proxy_headers = None if request.proxy_host: _proxy_url = urllib3.util.Url( scheme=None, host=request.proxy_host, port=request.proxy_port, ) if request.proxy_username: proxy_headers = urllib3.make_headers( proxy_basic_auth=( f"{request.proxy_username}" f":{request.proxy_password}" ) ) else: proxy_headers = None _proxy_url = _proxy_url.url _pool_key_parts.append(f"proxy={_proxy_url}") if proxy_headers: _pool_key_parts.append(f"proxy_headers={str(proxy_headers)}") _pool_key = "|".join(_pool_key_parts) if _pool_key in self._pools: return self._pools[_pool_key] # create new pool if _proxy_url: _pool = urllib3.ProxyManager( proxy_url=_proxy_url, num_pools=self.max_clients, proxy_headers=proxy_headers ) else: _pool = urllib3.PoolManager(num_pools=self.max_clients) # Network Interface if request.network_interface: _pool.connection_pool_kw['source_address'] = ( request.network_interface, 0 ) # SSL Verification if request.validate_cert: _pool.connection_pool_kw['cert_reqs'] = 'CERT_REQUIRED' else: _pool.connection_pool_kw['cert_reqs'] = 'CERT_NONE' # CA Certificates if request.ca_certs is not None: _pool.connection_pool_kw['ca_certs'] = request.ca_certs elif request.validate_cert is True: try: from certifi import where _pool.connection_pool_kw['ca_certs'] = where() except ImportError: pass # Client Certificates if request.client_cert is not None: _pool.connection_pool_kw['cert_file'] = request.client_cert if request.client_key is not None: _pool.connection_pool_kw['key_file'] = request.client_key self._pools[_pool_key] = _pool return _pool def _timeout_check(self): self._process_pending_requests() def _process_pending_requests(self): while self._pending: request = self._pending.popleft() self._process_request(request) def _process_request(self, request: Request): # Prepare headers headers = request.headers headers.setdefault( 'User-Agent', bytes_to_str(request.user_agent or DEFAULT_USER_AGENT) ) headers.setdefault( 'Accept-Encoding', 'gzip,deflate' if request.use_gzip else 'none' ) # Authentication if request.auth_username is not None: headers.update( urllib3.util.make_headers( basic_auth=( f"{request.auth_username}" f":{request.auth_password or ''}" ) ) ) # Make the request using urllib3 try: _pool = self.get_pool(request=request) response = _pool.request( request.method, request.url, headers=headers, body=request.body, preload_content=False, redirect=request.follow_redirects, ) buffer = BytesIO(response.data) response_obj = self.Response( request=request, code=response.status, headers=response.headers, buffer=buffer, effective_url=response.geturl(), error=None ) except urllib3.exceptions.HTTPError as e: response_obj = self.Response( request=request, code=599, headers={}, buffer=None, effective_url=None, error=HttpError(599, str(e)) ) request.on_ready(response_obj) def _process_queue(self): self._process_pending_requests() def on_readable(self, fd): pass def on_writable(self, fd): pass def _setup_request(self, curl, request, buffer, headers): pass kombu-5.5.3/kombu/asynchronous/hub.py000066400000000000000000000272541477772317200176740ustar00rootroot00000000000000"""Event loop implementation.""" from __future__ import annotations import errno import threading from contextlib import contextmanager from copy import copy from queue import Empty from time import sleep from types import GeneratorType as generator from vine import Thenable, promise from kombu.log import get_logger from kombu.utils.compat import fileno from kombu.utils.eventio import ERR, READ, WRITE, poll from kombu.utils.objects import cached_property from .timer import Timer __all__ = ('Hub', 'get_event_loop', 'set_event_loop') logger = get_logger(__name__) _current_loop: Hub | None = None W_UNKNOWN_EVENT = """\ Received unknown event %r for fd %r, please contact support!\ """ class Stop(BaseException): """Stops the event loop.""" def _raise_stop_error(): raise Stop() @contextmanager def _dummy_context(*args, **kwargs): yield def get_event_loop() -> Hub | None: """Get current event loop object.""" return _current_loop def set_event_loop(loop: Hub | None) -> Hub | None: """Set the current event loop object.""" global _current_loop _current_loop = loop return loop class Hub: """Event loop object. Arguments: --------- timer (kombu.asynchronous.Timer): Specify custom timer instance. """ #: Flag set if reading from an fd will not block. READ = READ #: Flag set if writing to an fd will not block. WRITE = WRITE #: Flag set on error, and the fd should be read from asap. ERR = ERR #: List of callbacks to be called when the loop is exiting, #: applied with the hub instance as sole argument. on_close = None def __init__(self, timer=None): self.timer = timer if timer is not None else Timer() self.readers = {} self.writers = {} self.on_tick = set() self.on_close = set() self._ready = set() self._ready_lock = threading.Lock() self._running = False self._loop = None # The eventloop (in celery.worker.loops) # will merge fds in this set and then instead of calling # the callback for each ready fd it will call the # :attr:`consolidate_callback` with the list of ready_fds # as an argument. This API is internal and is only # used by the multiprocessing pool to find inqueues # that are ready to write. self.consolidate = set() self.consolidate_callback = None self.propagate_errors = () self._create_poller() @property def poller(self): if not self._poller: self._create_poller() return self._poller @poller.setter def poller(self, value): self._poller = value def reset(self): self.close() self._create_poller() def _create_poller(self): self._poller = poll() self._register_fd = self._poller.register self._unregister_fd = self._poller.unregister def _close_poller(self): if self._poller is not None: self._poller.close() self._poller = None self._register_fd = None self._unregister_fd = None def stop(self): self.call_soon(_raise_stop_error) def __repr__(self): return ''.format( id(self), len(self.readers), len(self.writers), ) def fire_timers(self, min_delay=1, max_delay=10, max_timers=10, propagate=()): timer = self.timer delay = None if timer and timer._queue: for i in range(max_timers): delay, entry = next(self.scheduler) if entry is None: break try: entry() except propagate: raise except (MemoryError, AssertionError): raise except OSError as exc: if exc.errno == errno.ENOMEM: raise logger.error('Error in timer: %r', exc, exc_info=1) except Exception as exc: logger.error('Error in timer: %r', exc, exc_info=1) return min(delay or min_delay, max_delay) def _remove_from_loop(self, fd): try: self._unregister(fd) finally: self._discard(fd) def add(self, fd, callback, flags, args=(), consolidate=False): fd = fileno(fd) try: self.poller.register(fd, flags) except ValueError: self._remove_from_loop(fd) raise else: dest = self.readers if flags & READ else self.writers if consolidate: self.consolidate.add(fd) dest[fd] = None else: dest[fd] = callback, args def remove(self, fd): fd = fileno(fd) self._remove_from_loop(fd) def run_forever(self): self._running = True try: while 1: try: self.run_once() except Stop: break finally: self._running = False def run_once(self): try: next(self.loop) except StopIteration: self._loop = None def call_soon(self, callback, *args): if not isinstance(callback, Thenable): callback = promise(callback, args) with self._ready_lock: self._ready.add(callback) return callback def call_later(self, delay, callback, *args): return self.timer.call_after(delay, callback, args) def call_at(self, when, callback, *args): return self.timer.call_at(when, callback, args) def call_repeatedly(self, delay, callback, *args): return self.timer.call_repeatedly(delay, callback, args) def add_reader(self, fds, callback, *args): return self.add(fds, callback, READ | ERR, args) def add_writer(self, fds, callback, *args): return self.add(fds, callback, WRITE, args) def remove_reader(self, fd): writable = fd in self.writers on_write = self.writers.get(fd) try: self._remove_from_loop(fd) finally: if writable: cb, args = on_write self.add(fd, cb, WRITE, args) def remove_writer(self, fd): readable = fd in self.readers on_read = self.readers.get(fd) try: self._remove_from_loop(fd) finally: if readable: cb, args = on_read self.add(fd, cb, READ | ERR, args) def _unregister(self, fd): try: self.poller.unregister(fd) except (AttributeError, KeyError, OSError): pass def _pop_ready(self): with self._ready_lock: ready = self._ready self._ready = set() return ready def close(self, *args): [self._unregister(fd) for fd in self.readers] self.readers.clear() [self._unregister(fd) for fd in self.writers] self.writers.clear() self.consolidate.clear() self._close_poller() for callback in self.on_close: callback(self) # Complete remaining todo before Hub close # Eg: Acknowledge message # To avoid infinite loop where one of the callables adds items # to self._ready (via call_soon or otherwise). # we create new list with current self._ready todos = self._pop_ready() for item in todos: item() def _discard(self, fd): fd = fileno(fd) self.readers.pop(fd, None) self.writers.pop(fd, None) self.consolidate.discard(fd) def on_callback_error(self, callback, exc): logger.error( 'Callback %r raised exception: %r', callback, exc, exc_info=1, ) def create_loop(self, generator=generator, sleep=sleep, min=min, next=next, Empty=Empty, StopIteration=StopIteration, KeyError=KeyError, READ=READ, WRITE=WRITE, ERR=ERR): readers, writers = self.readers, self.writers poll = self.poller.poll fire_timers = self.fire_timers hub_remove = self.remove scheduled = self.timer._queue consolidate = self.consolidate consolidate_callback = self.consolidate_callback propagate = self.propagate_errors while 1: todo = self._pop_ready() for item in todo: if item: item() poll_timeout = fire_timers(propagate=propagate) if scheduled else 1 for tick_callback in copy(self.on_tick): tick_callback() # print('[[[HUB]]]: %s' % (self.repr_active(),)) if readers or writers: to_consolidate = [] try: events = poll(poll_timeout) # print('[EVENTS]: %s' % (self.repr_events(events),)) except ValueError: # Issue celery/#882 return for fd, event in events or (): general_error = False if fd in consolidate and \ writers.get(fd) is None: to_consolidate.append(fd) continue cb = cbargs = None if event & READ: try: cb, cbargs = readers[fd] except KeyError: self.remove_reader(fd) continue elif event & WRITE: try: cb, cbargs = writers[fd] except KeyError: self.remove_writer(fd) continue elif event & ERR: general_error = True else: logger.info(W_UNKNOWN_EVENT, event, fd) general_error = True if general_error: try: cb, cbargs = (readers.get(fd) or writers.get(fd)) except TypeError: pass if cb is None: self.remove(fd) continue if isinstance(cb, generator): try: next(cb) except OSError as exc: if exc.errno != errno.EBADF: raise hub_remove(fd) except StopIteration: pass except Exception: hub_remove(fd) raise else: try: cb(*cbargs) except Empty: pass if to_consolidate: consolidate_callback(to_consolidate) else: # no sockets yet, startup is probably not done. sleep(min(poll_timeout, 0.1)) yield def repr_active(self): from .debug import repr_active return repr_active(self) def repr_events(self, events): from .debug import repr_events return repr_events(self, events or []) @cached_property def scheduler(self): return iter(self.timer) @property def loop(self): if self._loop is None: self._loop = self.create_loop() return self._loop kombu-5.5.3/kombu/asynchronous/semaphore.py000066400000000000000000000067511477772317200211000ustar00rootroot00000000000000"""Semaphores and concurrency primitives.""" from __future__ import annotations import sys from collections import deque from typing import TYPE_CHECKING if TYPE_CHECKING: from types import TracebackType from typing import Callable, Deque if sys.version_info < (3, 10): from typing_extensions import ParamSpec else: from typing import ParamSpec P = ParamSpec("P") __all__ = ('DummyLock', 'LaxBoundedSemaphore') class LaxBoundedSemaphore: """Asynchronous Bounded Semaphore. Lax means that the value will stay within the specified range even if released more times than it was acquired. Example: ------- >>> x = LaxBoundedSemaphore(2) >>> x.acquire(print, 'HELLO 1') HELLO 1 >>> x.acquire(print, 'HELLO 2') HELLO 2 >>> x.acquire(print, 'HELLO 3') >>> x._waiters # private, do not access directly [print, ('HELLO 3',)] >>> x.release() HELLO 3 """ def __init__(self, value: int) -> None: self.initial_value = self.value = value self._waiting: Deque[tuple] = deque() self._add_waiter = self._waiting.append self._pop_waiter = self._waiting.popleft def acquire( self, callback: Callable[P, None], *partial_args: P.args, **partial_kwargs: P.kwargs ) -> bool: """Acquire semaphore. This will immediately apply ``callback`` if the resource is available, otherwise the callback is suspended until the semaphore is released. Arguments: --------- callback (Callable): The callback to apply. *partial_args (Any): partial arguments to callback. """ value = self.value if value <= 0: self._add_waiter((callback, partial_args, partial_kwargs)) return False else: self.value = max(value - 1, 0) callback(*partial_args, **partial_kwargs) return True def release(self) -> None: """Release semaphore. Note: ---- If there are any waiters this will apply the first waiter that is waiting for the resource (FIFO order). """ try: waiter, args, kwargs = self._pop_waiter() except IndexError: self.value = min(self.value + 1, self.initial_value) else: waiter(*args, **kwargs) def grow(self, n: int = 1) -> None: """Change the size of the semaphore to accept more users.""" self.initial_value += n self.value += n for _ in range(n): self.release() def shrink(self, n: int = 1) -> None: """Change the size of the semaphore to accept less users.""" self.initial_value = max(self.initial_value - n, 0) self.value = max(self.value - n, 0) def clear(self) -> None: """Reset the semaphore, which also wipes out any waiting callbacks.""" self._waiting.clear() self.value = self.initial_value def __repr__(self) -> str: return '<{} at {:#x} value:{} waiting:{}>'.format( self.__class__.__name__, id(self), self.value, len(self._waiting), ) class DummyLock: """Pretending to be a lock.""" def __enter__(self) -> DummyLock: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: pass kombu-5.5.3/kombu/asynchronous/timer.py000066400000000000000000000154121477772317200202270ustar00rootroot00000000000000"""Timer scheduling Python callbacks.""" from __future__ import annotations import heapq import sys from collections import namedtuple from datetime import datetime from functools import total_ordering from time import monotonic from time import time as _time from typing import TYPE_CHECKING from weakref import proxy as weakrefproxy from vine.utils import wraps from kombu.log import get_logger if sys.version_info >= (3, 9): from zoneinfo import ZoneInfo else: from backports.zoneinfo import ZoneInfo if TYPE_CHECKING: from types import TracebackType __all__ = ('Entry', 'Timer', 'to_timestamp') logger = get_logger(__name__) DEFAULT_MAX_INTERVAL = 2 EPOCH = datetime.fromtimestamp(0, ZoneInfo("UTC")) IS_PYPY = hasattr(sys, 'pypy_version_info') scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry')) def to_timestamp(d, default_timezone=ZoneInfo("UTC"), time=monotonic): """Convert datetime to timestamp. If d' is already a timestamp, then that will be used. """ if isinstance(d, datetime): if d.tzinfo is None: d = d.replace(tzinfo=default_timezone) diff = _time() - time() return max((d - EPOCH).total_seconds() - diff, 0) return d @total_ordering class Entry: """Schedule Entry.""" if not IS_PYPY: # pragma: no cover __slots__ = ( 'fun', 'args', 'kwargs', 'tref', 'canceled', '_last_run', '__weakref__', ) def __init__(self, fun, args=None, kwargs=None): self.fun = fun self.args = args or [] self.kwargs = kwargs or {} self.tref = weakrefproxy(self) self._last_run = None self.canceled = False def __call__(self): return self.fun(*self.args, **self.kwargs) def cancel(self): try: self.tref.canceled = True except ReferenceError: # pragma: no cover pass def __repr__(self): return ' None: self.stop() def call_at(self, eta, fun, args=(), kwargs=None, priority=0): kwargs = {} if not kwargs else kwargs return self.enter_at(self.Entry(fun, args, kwargs), eta, priority) def call_after(self, secs, fun, args=(), kwargs=None, priority=0): kwargs = {} if not kwargs else kwargs return self.enter_after(secs, self.Entry(fun, args, kwargs), priority) def call_repeatedly(self, secs, fun, args=(), kwargs=None, priority=0): kwargs = {} if not kwargs else kwargs tref = self.Entry(fun, args, kwargs) @wraps(fun) def _reschedules(*args, **kwargs): last, now = tref._last_run, monotonic() lsince = (now - tref._last_run) if last else secs try: if lsince and lsince >= secs: tref._last_run = now return fun(*args, **kwargs) finally: if not tref.canceled: last = tref._last_run next = secs - (now - last) if last else secs self.enter_after(next, tref, priority) tref.fun = _reschedules tref._last_run = None return self.enter_after(secs, tref, priority) def enter_at(self, entry, eta=None, priority=0, time=monotonic): """Enter function into the scheduler. Arguments: --------- entry (~kombu.asynchronous.timer.Entry): Item to enter. eta (datetime.datetime): Scheduled time. priority (int): Unused. """ if eta is None: eta = time() if isinstance(eta, datetime): try: eta = to_timestamp(eta) except Exception as exc: if not self.handle_error(exc): raise return return self._enter(eta, priority, entry) def enter_after(self, secs, entry, priority=0, time=monotonic): return self.enter_at(entry, time() + float(secs), priority) def _enter(self, eta, priority, entry, push=heapq.heappush): push(self._queue, scheduled(eta, priority, entry)) return entry def apply_entry(self, entry): try: entry() except Exception as exc: if not self.handle_error(exc): logger.error('Error in timer: %r', exc, exc_info=True) def handle_error(self, exc_info): if self.on_error: self.on_error(exc_info) return True def stop(self): pass def __iter__(self, min=min, nowfun=monotonic, pop=heapq.heappop, push=heapq.heappush): """Iterate over schedule. This iterator yields a tuple of ``(wait_seconds, entry)``, where if entry is :const:`None` the caller should wait for ``wait_seconds`` until it polls the schedule again. """ max_interval = self.max_interval queue = self._queue while 1: if queue: eventA = queue[0] now, eta = nowfun(), eventA[0] if now < eta: yield min(eta - now, max_interval), None else: eventB = pop(queue) if eventB is eventA: entry = eventA[2] if not entry.canceled: yield None, entry continue else: push(queue, eventB) else: yield None, None def clear(self): self._queue[:] = [] # atomic, without creating a new list. def cancel(self, tref): tref.cancel() def __len__(self): return len(self._queue) def __nonzero__(self): return True @property def queue(self, _pop=heapq.heappop): """Snapshot of underlying datastructure.""" events = list(self._queue) return [_pop(v) for v in [events] * len(events)] @property def schedule(self): return self kombu-5.5.3/kombu/clocks.py000066400000000000000000000113311477772317200156260ustar00rootroot00000000000000"""Logical Clocks and Synchronization.""" from __future__ import annotations from itertools import islice from operator import itemgetter from threading import Lock from typing import Any __all__ = ('LamportClock', 'timetuple') R_CLOCK = '_lamport(clock={0}, timestamp={1}, id={2} {3!r})' class timetuple(tuple): """Tuple of event clock information. Can be used as part of a heap to keep events ordered. Arguments: --------- clock (Optional[int]): Event clock value. timestamp (float): Event UNIX timestamp value. id (str): Event host id (e.g. ``hostname:pid``). obj (Any): Optional obj to associate with this event. """ __slots__ = () def __new__( cls, clock: int | None, timestamp: float, id: str, obj: Any = None ) -> timetuple: return tuple.__new__(cls, (clock, timestamp, id, obj)) def __repr__(self) -> str: return R_CLOCK.format(*self) def __getnewargs__(self) -> tuple: return tuple(self) def __lt__(self, other: tuple) -> bool: # 0: clock 1: timestamp 3: process id try: A, B = self[0], other[0] # uses logical clock value first if A and B: # use logical clock if available if A == B: # equal clocks use lower process id return self[2] < other[2] return A < B return self[1] < other[1] # ... or use timestamp except IndexError: return NotImplemented def __gt__(self, other: tuple) -> bool: return other < self def __le__(self, other: tuple) -> bool: return not other < self def __ge__(self, other: tuple) -> bool: return not self < other clock = property(itemgetter(0)) timestamp = property(itemgetter(1)) id = property(itemgetter(2)) obj = property(itemgetter(3)) class LamportClock: """Lamport's logical clock. From Wikipedia: A Lamport logical clock is a monotonically incrementing software counter maintained in each process. It follows some simple rules: * A process increments its counter before each event in that process; * When a process sends a message, it includes its counter value with the message; * On receiving a message, the receiver process sets its counter to be greater than the maximum of its own value and the received value before it considers the message received. Conceptually, this logical clock can be thought of as a clock that only has meaning in relation to messages moving between processes. When a process receives a message, it resynchronizes its logical clock with the sender. See Also -------- * `Lamport timestamps`_ * `Lamports distributed mutex`_ .. _`Lamport Timestamps`: https://en.wikipedia.org/wiki/Lamport_timestamps .. _`Lamports distributed mutex`: https://bit.ly/p99ybE *Usage* When sending a message use :meth:`forward` to increment the clock, when receiving a message use :meth:`adjust` to sync with the time stamp of the incoming message. """ #: The clocks current value. value = 0 def __init__( self, initial_value: int = 0, Lock: type[Lock] = Lock ) -> None: self.value = initial_value self.mutex = Lock() def adjust(self, other: int) -> int: with self.mutex: value = self.value = max(self.value, other) + 1 return value def forward(self) -> int: with self.mutex: self.value += 1 return self.value def sort_heap(self, h: list[tuple[int, str]]) -> tuple[int, str]: """Sort heap of events. List of tuples containing at least two elements, representing an event, where the first element is the event's scalar clock value, and the second element is the id of the process (usually ``"hostname:pid"``): ``sh([(clock, processid, ...?), (...)])`` The list must already be sorted, which is why we refer to it as a heap. The tuple will not be unpacked, so more than two elements can be present. Will return the latest event. """ if h[0][0] == h[1][0]: same = [] for PN in zip(h, islice(h, 1, None)): if PN[0][0] != PN[1][0]: break # Prev and Next's clocks differ same.append(PN[0]) # return first item sorted by process id return sorted(same, key=lambda event: event[1])[0] # clock values unique, return first item return h[0] def __str__(self) -> str: return str(self.value) def __repr__(self) -> str: return f'' kombu-5.5.3/kombu/common.py000066400000000000000000000327071477772317200156520ustar00rootroot00000000000000"""Common Utilities.""" from __future__ import annotations import os import socket import threading from collections import deque from contextlib import contextmanager from functools import partial from itertools import count from uuid import NAMESPACE_OID, uuid3, uuid4, uuid5 from amqp import ChannelError, RecoverableConnectionError from .entity import Exchange, Queue from .log import get_logger from .serialization import registry as serializers from .utils.uuid import uuid __all__ = ('Broadcast', 'maybe_declare', 'uuid', 'itermessages', 'send_reply', 'collect_replies', 'insured', 'drain_consumer', 'eventloop') #: Prefetch count can't exceed short. PREFETCH_COUNT_MAX = 0xFFFF logger = get_logger(__name__) _node_id = None def get_node_id(): global _node_id if _node_id is None: _node_id = uuid4().int return _node_id def generate_oid(node_id, process_id, thread_id, instance): ent = '{:x}-{:x}-{:x}-{:x}'.format( node_id, process_id, thread_id, id(instance)) try: ret = str(uuid3(NAMESPACE_OID, ent)) except ValueError: ret = str(uuid5(NAMESPACE_OID, ent)) return ret def oid_from(instance, threads=True): return generate_oid( get_node_id(), os.getpid(), threading.get_ident() if threads else 0, instance, ) class Broadcast(Queue): """Broadcast queue. Convenience class used to define broadcast queues. Every queue instance will have a unique name, and both the queue and exchange is configured with auto deletion. Arguments: --------- name (str): This is used as the name of the exchange. queue (str): By default a unique id is used for the queue name for every consumer. You can specify a custom queue name here. unique (bool): Always create a unique queue even if a queue name is supplied. **kwargs (Any): See :class:`~kombu.Queue` for a list of additional keyword arguments supported. """ attrs = Queue.attrs + (('queue', None),) def __init__(self, name=None, queue=None, unique=False, auto_delete=True, exchange=None, alias=None, **kwargs): if unique: queue = '{}.{}'.format(queue or 'bcast', uuid()) else: queue = queue or f'bcast.{uuid()}' super().__init__( alias=alias or name, queue=queue, name=queue, auto_delete=auto_delete, exchange=(exchange if exchange is not None else Exchange(name, type='fanout')), **kwargs ) def declaration_cached(entity, channel): return entity in channel.connection.client.declared_entities def maybe_declare(entity, channel=None, retry=False, **retry_policy): """Declare entity (cached).""" if retry: return _imaybe_declare(entity, channel, **retry_policy) return _maybe_declare(entity, channel) def _ensure_channel_is_bound(entity, channel): """Make sure the channel is bound to the entity. :param entity: generic kombu nomenclature, generally an exchange or queue :param channel: channel to bind to the entity :return: the updated entity """ is_bound = entity.is_bound if not is_bound: if not channel: raise ChannelError( f"Cannot bind channel {channel} to entity {entity}") entity = entity.bind(channel) return entity def _maybe_declare(entity, channel): # _maybe_declare sets name on original for autogen queues orig = entity _ensure_channel_is_bound(entity, channel) if channel is None or channel.connection is None: # If this was called from the `ensure()` method then the channel could have been invalidated # and the correct channel was re-bound to the entity by calling the `entity.revive()` method. if not entity.is_bound: raise ChannelError( f"channel is None and entity {entity} not bound.") channel = entity.channel declared = ident = None if channel.connection and entity.can_cache_declaration: declared = channel.connection.client.declared_entities ident = hash(entity) if ident in declared: return False if not channel.connection: raise RecoverableConnectionError('channel disconnected') entity.declare(channel=channel) if declared is not None and ident: declared.add(ident) if orig is not None: orig.name = entity.name return True def _imaybe_declare(entity, channel, **retry_policy): entity = _ensure_channel_is_bound(entity, channel) if not entity.channel.connection: raise RecoverableConnectionError('channel disconnected') return entity.channel.connection.client.ensure( entity, _maybe_declare, **retry_policy)(entity, channel) def drain_consumer(consumer, limit=1, timeout=None, callbacks=None): """Drain messages from consumer instance.""" acc = deque() def on_message(body, message): acc.append((body, message)) consumer.callbacks = [on_message] + (callbacks or []) with consumer: for _ in eventloop(consumer.channel.connection.client, limit=limit, timeout=timeout, ignore_timeouts=True): try: yield acc.popleft() except IndexError: pass def itermessages(conn, channel, queue, limit=1, timeout=None, callbacks=None, **kwargs): """Iterator over messages.""" return drain_consumer( conn.Consumer(queues=[queue], channel=channel, **kwargs), limit=limit, timeout=timeout, callbacks=callbacks, ) def eventloop(conn, limit=None, timeout=None, ignore_timeouts=False): """Best practice generator wrapper around ``Connection.drain_events``. Able to drain events forever, with a limit, and optionally ignoring timeout errors (a timeout of 1 is often used in environments where the socket can get "stuck", and is a best practice for Kombu consumers). ``eventloop`` is a generator. Examples -------- >>> from kombu.common import eventloop >>> def run(conn): ... it = eventloop(conn, timeout=1, ignore_timeouts=True) ... next(it) # one event consumed, or timed out. ... ... for _ in eventloop(conn, timeout=1, ignore_timeouts=True): ... pass # loop forever. It also takes an optional limit parameter, and timeout errors are propagated by default:: for _ in eventloop(connection, limit=1, timeout=1): pass See Also -------- :func:`itermessages`, which is an event loop bound to one or more consumers, that yields any messages received. """ for i in limit and range(limit) or count(): try: yield conn.drain_events(timeout=timeout) except socket.timeout: if timeout and not ignore_timeouts: # pragma: no cover raise def send_reply(exchange, req, msg, producer=None, retry=False, retry_policy=None, **props): """Send reply for request. Arguments: --------- exchange (kombu.Exchange, str): Reply exchange req (~kombu.Message): Original request, a message with a ``reply_to`` property. producer (kombu.Producer): Producer instance retry (bool): If true must retry according to the ``reply_policy`` argument. retry_policy (Dict): Retry settings. **props (Any): Extra properties. """ return producer.publish( msg, exchange=exchange, retry=retry, retry_policy=retry_policy, **dict({'routing_key': req.properties['reply_to'], 'correlation_id': req.properties.get('correlation_id'), 'serializer': serializers.type_to_name[req.content_type], 'content_encoding': req.content_encoding}, **props) ) def collect_replies(conn, channel, queue, *args, **kwargs): """Generator collecting replies from ``queue``.""" no_ack = kwargs.setdefault('no_ack', True) received = False try: for body, message in itermessages(conn, channel, queue, *args, **kwargs): if not no_ack: message.ack() received = True yield body finally: if received: channel.after_reply_message_received(queue.name) def _ensure_errback(exc, interval): logger.error( 'Connection error: %r. Retry in %ss\n', exc, interval, exc_info=True, ) @contextmanager def _ignore_errors(conn): try: yield except conn.connection_errors + conn.channel_errors: pass def ignore_errors(conn, fun=None, *args, **kwargs): """Ignore connection and channel errors. The first argument must be a connection object, or any other object with ``connection_error`` and ``channel_error`` attributes. Can be used as a function: .. code-block:: python def example(connection): ignore_errors(connection, consumer.channel.close) or as a context manager: .. code-block:: python def example(connection): with ignore_errors(connection): consumer.channel.close() Note: ---- Connection and channel errors should be properly handled, and not ignored. Using this function is only acceptable in a cleanup phase, like when a connection is lost or at shutdown. """ if fun: with _ignore_errors(conn): return fun(*args, **kwargs) return _ignore_errors(conn) def revive_connection(connection, channel, on_revive=None): if on_revive: on_revive(channel) def insured(pool, fun, args, kwargs, errback=None, on_revive=None, **opts): """Function wrapper to handle connection errors. Ensures function performing broker commands completes despite intermittent connection failures. """ errback = errback or _ensure_errback with pool.acquire(block=True) as conn: conn.ensure_connection(errback=errback) # we cache the channel for subsequent calls, this has to be # reset on revival. channel = conn.default_channel revive = partial(revive_connection, conn, on_revive=on_revive) insured = conn.autoretry(fun, channel, errback=errback, on_revive=revive, **opts) retval, _ = insured(*args, **dict(kwargs, connection=conn)) return retval class QoS: """Thread safe increment/decrement of a channels prefetch_count. Arguments: --------- callback (Callable): Function used to set new prefetch count, e.g. ``consumer.qos`` or ``channel.basic_qos``. Will be called with a single ``prefetch_count`` keyword argument. initial_value (int): Initial prefetch count value.. Example: ------- >>> from kombu import Consumer, Connection >>> connection = Connection('amqp://') >>> consumer = Consumer(connection) >>> qos = QoS(consumer.qos, initial_prefetch_count=2) >>> qos.update() # set initial >>> qos.value 2 >>> def in_some_thread(): ... qos.increment_eventually() >>> def in_some_other_thread(): ... qos.decrement_eventually() >>> while 1: ... if qos.prev != qos.value: ... qos.update() # prefetch changed so update. It can be used with any function supporting a ``prefetch_count`` keyword argument:: >>> channel = connection.channel() >>> QoS(channel.basic_qos, 10) >>> def set_qos(prefetch_count): ... print('prefetch count now: %r' % (prefetch_count,)) >>> QoS(set_qos, 10) """ prev = None def __init__(self, callback, initial_value): self.callback = callback self._mutex = threading.RLock() self.value = initial_value or 0 def increment_eventually(self, n=1): """Increment the value, but do not update the channels QoS. Note: ---- The MainThread will be responsible for calling :meth:`update` when necessary. """ with self._mutex: if self.value: self.value = self.value + max(n, 0) return self.value def decrement_eventually(self, n=1): """Decrement the value, but do not update the channels QoS. Note: ---- The MainThread will be responsible for calling :meth:`update` when necessary. """ with self._mutex: if self.value: self.value -= n if self.value < 1: self.value = 1 return self.value def set(self, pcount): """Set channel prefetch_count setting.""" if pcount != self.prev: new_value = pcount if pcount > PREFETCH_COUNT_MAX: logger.warning('QoS: Disabled: prefetch_count exceeds %r', PREFETCH_COUNT_MAX) new_value = 0 logger.debug('basic.qos: prefetch_count->%s', new_value) self.callback(prefetch_count=new_value) self.prev = pcount return pcount def update(self): """Update prefetch count with current value.""" with self._mutex: return self.set(self.value) kombu-5.5.3/kombu/compat.py000066400000000000000000000151401477772317200156350ustar00rootroot00000000000000"""Carrot compatibility interface. See https://pypi.org/project/carrot/ for documentation. """ from __future__ import annotations from itertools import count from typing import TYPE_CHECKING from . import messaging from .entity import Exchange, Queue if TYPE_CHECKING: from types import TracebackType __all__ = ('Publisher', 'Consumer') # XXX compat attribute entry_to_queue = Queue.from_dict def _iterconsume(connection, consumer, no_ack=False, limit=None): consumer.consume(no_ack=no_ack) for iteration in count(0): # for infinity if limit and iteration >= limit: break yield connection.drain_events() class Publisher(messaging.Producer): """Carrot compatible producer.""" exchange = '' exchange_type = 'direct' routing_key = '' durable = True auto_delete = False _closed = False def __init__(self, connection, exchange=None, routing_key=None, exchange_type=None, durable=None, auto_delete=None, channel=None, **kwargs): if channel: connection = channel self.exchange = exchange or self.exchange self.exchange_type = exchange_type or self.exchange_type self.routing_key = routing_key or self.routing_key if auto_delete is not None: self.auto_delete = auto_delete if durable is not None: self.durable = durable if not isinstance(self.exchange, Exchange): self.exchange = Exchange(name=self.exchange, type=self.exchange_type, routing_key=self.routing_key, auto_delete=self.auto_delete, durable=self.durable) super().__init__(connection, self.exchange, **kwargs) def send(self, *args, **kwargs): return self.publish(*args, **kwargs) def close(self): super().close() self._closed = True def __enter__(self): return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: self.close() @property def backend(self): return self.channel class Consumer(messaging.Consumer): """Carrot compatible consumer.""" queue = '' exchange = '' routing_key = '' exchange_type = 'direct' durable = True exclusive = False auto_delete = False _closed = False def __init__(self, connection, queue=None, exchange=None, routing_key=None, exchange_type=None, durable=None, exclusive=None, auto_delete=None, **kwargs): self.backend = connection.channel() if durable is not None: self.durable = durable if exclusive is not None: self.exclusive = exclusive if auto_delete is not None: self.auto_delete = auto_delete self.queue = queue or self.queue self.exchange = exchange or self.exchange self.exchange_type = exchange_type or self.exchange_type self.routing_key = routing_key or self.routing_key exchange = Exchange(self.exchange, type=self.exchange_type, routing_key=self.routing_key, auto_delete=self.auto_delete, durable=self.durable) queue = Queue(self.queue, exchange=exchange, routing_key=self.routing_key, durable=self.durable, exclusive=self.exclusive, auto_delete=self.auto_delete) super().__init__(self.backend, queue, **kwargs) def revive(self, channel): self.backend = channel super().revive(channel) def close(self): self.cancel() self.backend.close() self._closed = True def __enter__(self): return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: self.close() def __iter__(self): return self.iterqueue(infinite=True) def fetch(self, no_ack=None, enable_callbacks=False): if no_ack is None: no_ack = self.no_ack message = self.queues[0].get(no_ack) if message: if enable_callbacks: self.receive(message.payload, message) return message def process_next(self): raise NotImplementedError('Use fetch(enable_callbacks=True)') def discard_all(self, filterfunc=None): if filterfunc is not None: raise NotImplementedError( 'discard_all does not implement filters') return self.purge() def iterconsume(self, limit=None, no_ack=None): return _iterconsume(self.connection, self, no_ack, limit) def wait(self, limit=None): it = self.iterconsume(limit) return list(it) def iterqueue(self, limit=None, infinite=False): for items_since_start in count(): # for infinity item = self.fetch() if (not infinite and item is None) or \ (limit and items_since_start >= limit): break yield item class ConsumerSet(messaging.Consumer): def __init__(self, connection, from_dict=None, consumers=None, channel=None, **kwargs): if channel: self._provided_channel = True self.backend = channel else: self._provided_channel = False self.backend = connection.channel() queues = [] if consumers: for consumer in consumers: queues.extend(consumer.queues) if from_dict: for queue_name, queue_options in from_dict.items(): queues.append(Queue.from_dict(queue_name, **queue_options)) super().__init__(self.backend, queues, **kwargs) def iterconsume(self, limit=None, no_ack=False): return _iterconsume(self.connection, self, no_ack, limit) def discard_all(self): return self.purge() def add_consumer_from_dict(self, queue, **options): return self.add_queue(Queue.from_dict(queue, **options)) def add_consumer(self, consumer): for queue in consumer.queues: self.add_queue(queue) def revive(self, channel): self.backend = channel super().revive(channel) def close(self): self.cancel() if not self._provided_channel: self.channel.close() kombu-5.5.3/kombu/compression.py000066400000000000000000000056501477772317200167200ustar00rootroot00000000000000"""Compression utilities.""" from __future__ import annotations import zlib from kombu.utils.encoding import ensure_bytes _aliases = {} _encoders = {} _decoders = {} __all__ = ('register', 'encoders', 'get_encoder', 'get_decoder', 'compress', 'decompress') def register(encoder, decoder, content_type, aliases=None): """Register new compression method. Arguments: --------- encoder (Callable): Function used to compress text. decoder (Callable): Function used to decompress previously compressed text. content_type (str): The mime type this compression method identifies as. aliases (Sequence[str]): A list of names to associate with this compression method. """ _encoders[content_type] = encoder _decoders[content_type] = decoder if aliases: _aliases.update((alias, content_type) for alias in aliases) def encoders(): """Return a list of available compression methods.""" return list(_encoders) def get_encoder(t): """Get encoder by alias name.""" t = _aliases.get(t, t) return _encoders[t], t def get_decoder(t): """Get decoder by alias name.""" return _decoders[_aliases.get(t, t)] def compress(body, content_type): """Compress text. Arguments: --------- body (AnyStr): The text to compress. content_type (str): mime-type of compression method to use. """ encoder, content_type = get_encoder(content_type) return encoder(ensure_bytes(body)), content_type def decompress(body, content_type): """Decompress compressed text. Arguments: --------- body (AnyStr): Previously compressed text to uncompress. content_type (str): mime-type of compression method used. """ return get_decoder(content_type)(body) register(zlib.compress, zlib.decompress, 'application/x-gzip', aliases=['gzip', 'zlib']) try: import bz2 except ImportError: # pragma: no cover pass # No bz2 support else: register(bz2.compress, bz2.decompress, 'application/x-bz2', aliases=['bzip2', 'bzip']) try: import brotli except ImportError: # pragma: no cover pass else: register(brotli.compress, brotli.decompress, 'application/x-brotli', aliases=['brotli']) try: import lzma except ImportError: # pragma: no cover pass # no lzma support else: register(lzma.compress, lzma.decompress, 'application/x-lzma', aliases=['lzma', 'xz']) try: import zstandard as zstd except ImportError: # pragma: no cover pass else: def zstd_compress(body): c = zstd.ZstdCompressor() return c.compress(body) def zstd_decompress(body): d = zstd.ZstdDecompressor() return d.decompress(body) register(zstd_compress, zstd_decompress, 'application/zstd', aliases=['zstd', 'zstandard']) kombu-5.5.3/kombu/connection.py000066400000000000000000001210001477772317200165020ustar00rootroot00000000000000"""Client (Connection).""" from __future__ import annotations import os import socket import sys from contextlib import contextmanager from itertools import count, cycle from operator import itemgetter from typing import TYPE_CHECKING, Any try: from ssl import CERT_NONE ssl_available = True except ImportError: # pragma: no cover CERT_NONE = None ssl_available = False # jython breaks on relative import for .exceptions for some reason # (Issue #112) from kombu import exceptions from .log import get_logger from .resource import Resource from .transport import get_transport_cls, supports_librabbitmq from .utils.collections import HashedSeq from .utils.functional import dictfilter, lazy, retry_over_time, shufflecycle from .utils.objects import cached_property from .utils.url import as_url, maybe_sanitize_url, parse_url, quote, urlparse if TYPE_CHECKING: from kombu.transport.virtual import Channel if sys.version_info < (3, 10): from typing_extensions import TypeGuard else: from typing import TypeGuard from types import TracebackType __all__ = ('Connection', 'ConnectionPool', 'ChannelPool') logger = get_logger(__name__) roundrobin_failover = cycle resolve_aliases = { 'pyamqp': 'amqp', 'librabbitmq': 'amqp', } failover_strategies = { 'round-robin': roundrobin_failover, 'shuffle': shufflecycle, } _log_connection = os.environ.get('KOMBU_LOG_CONNECTION', False) _log_channel = os.environ.get('KOMBU_LOG_CHANNEL', False) class Connection: """A connection to the broker. Example: ------- >>> Connection('amqp://guest:guest@localhost:5672//') >>> Connection('amqp://foo;amqp://bar', ... failover_strategy='round-robin') >>> Connection('redis://', transport_options={ ... 'visibility_timeout': 3000, ... }) >>> import ssl >>> Connection('amqp://', login_method='EXTERNAL', ssl={ ... 'ca_certs': '/etc/pki/tls/certs/something.crt', ... 'keyfile': '/etc/something/system.key', ... 'certfile': '/etc/something/system.cert', ... 'cert_reqs': ssl.CERT_REQUIRED, ... }) Note: ---- SSL currently only works with the py-amqp, qpid and redis transports. For other transports you can use stunnel. Arguments: --------- URL (str, Sequence): Broker URL, or a list of URLs. Keyword Arguments: ----------------- ssl (bool/dict): Use SSL to connect to the server. Default is ``False``. May not be supported by the specified transport. transport (Transport): Default transport if not specified in the URL. connect_timeout (float): Timeout in seconds for connecting to the server. May not be supported by the specified transport. transport_options (Dict): A dict of additional connection arguments to pass to alternate kombu channel implementations. Consult the transport documentation for available options. heartbeat (float): Heartbeat interval in int/float seconds. Note that if heartbeats are enabled then the :meth:`heartbeat_check` method must be called regularly, around once per second. Note: ---- The connection is established lazily when needed. If you need the connection to be established, then force it by calling :meth:`connect`:: >>> conn = Connection('amqp://') >>> conn.connect() and always remember to close the connection:: >>> conn.release() These options have been replaced by the URL argument, but are still supported for backwards compatibility: :keyword hostname: Host name/address. NOTE: You cannot specify both the URL argument and use the hostname keyword argument at the same time. :keyword userid: Default user name if not provided in the URL. :keyword password: Default password if not provided in the URL. :keyword virtual_host: Default virtual host if not provided in the URL. :keyword port: Default port if not provided in the URL. """ port = None virtual_host = '/' connect_timeout = 5 _closed = None _connection = None _default_channel = None _transport = None _logger = False uri_prefix = None #: The cache of declared entities is per connection, #: in case the server loses data. declared_entities = None #: Iterator returning the next broker URL to try in the event #: of connection failure (initialized by :attr:`failover_strategy`). cycle = None #: Additional transport specific options, #: passed on to the transport instance. transport_options = None #: Strategy used to select new hosts when reconnecting after connection #: failure. One of "round-robin", "shuffle" or any custom iterator #: constantly yielding new URLs to try. failover_strategy = 'round-robin' #: Heartbeat value, currently only supported by the py-amqp transport. heartbeat = None resolve_aliases = resolve_aliases failover_strategies = failover_strategies hostname = userid = password = ssl = login_method = None def __init__(self, hostname='localhost', userid=None, password=None, virtual_host=None, port=None, insist=False, ssl=False, transport=None, connect_timeout=5, transport_options=None, login_method=None, uri_prefix=None, heartbeat=0, failover_strategy='round-robin', alternates=None, **kwargs): alt = [] if alternates is None else alternates # have to spell the args out, just to get nice docstrings :( params = self._initial_params = { 'hostname': hostname, 'userid': userid, 'password': password, 'virtual_host': virtual_host, 'port': port, 'insist': insist, 'ssl': ssl, 'transport': transport, 'connect_timeout': connect_timeout, 'login_method': login_method, 'heartbeat': heartbeat } if hostname and not isinstance(hostname, str): alt.extend(hostname) hostname = alt[0] params.update(hostname=hostname) if hostname: if ';' in hostname: alt = hostname.split(';') + alt hostname = alt[0] params.update(hostname=hostname) if '://' in hostname and '+' in hostname[:hostname.index('://')]: # e.g. sqla+mysql://root:masterkey@localhost/ params['transport'], params['hostname'] = \ hostname.split('+', 1) self.uri_prefix = params['transport'] elif '://' in hostname: transport = transport or urlparse(hostname).scheme if not get_transport_cls(transport).can_parse_url: # we must parse the URL url_params = parse_url(hostname) params.update( dictfilter(url_params), hostname=url_params['hostname'], ) params['transport'] = transport self._init_params(**params) # fallback hosts self.alt = alt # keep text representation for .info # only temporary solution as this won't work when # passing a custom object (Issue celery/celery#3320). self._failover_strategy = failover_strategy or 'round-robin' self.failover_strategy = self.failover_strategies.get( self._failover_strategy) or self._failover_strategy if self.alt: self.cycle = self.failover_strategy(self.alt) next(self.cycle) # skip first entry if transport_options is None: transport_options = {} self.transport_options = transport_options if _log_connection: # pragma: no cover self._logger = True if uri_prefix: self.uri_prefix = uri_prefix self.declared_entities = set() def switch(self, conn_str): """Switch connection parameters to use a new URL or hostname. Note: ---- Does not reconnect! Arguments: --------- conn_str (str): either a hostname or URL. """ self.close() self.declared_entities.clear() self._closed = False conn_params = ( parse_url(conn_str) if "://" in conn_str else {"hostname": conn_str} ) self._init_params(**dict(self._initial_params, **conn_params)) def maybe_switch_next(self): """Switch to next URL given by the current failover strategy.""" if self.cycle: self.switch(next(self.cycle)) def _init_params(self, hostname, userid, password, virtual_host, port, insist, ssl, transport, connect_timeout, login_method, heartbeat): transport = transport or 'amqp' if transport == 'amqp' and supports_librabbitmq(): transport = 'librabbitmq' if transport == 'rediss' and ssl_available and not ssl: logger.warning( 'Secure redis scheme specified (rediss) with no ssl ' 'options, defaulting to insecure SSL behaviour.' ) ssl = {'ssl_cert_reqs': CERT_NONE} self.hostname = hostname self.userid = userid self.password = password self.login_method = login_method self.virtual_host = virtual_host or self.virtual_host self.port = port or self.port self.insist = insist self.connect_timeout = connect_timeout self.ssl = ssl self.transport_cls = transport self.heartbeat = heartbeat and float(heartbeat) def register_with_event_loop(self, loop): self.transport.register_with_event_loop(self.connection, loop) def _debug(self, msg, *args, **kwargs): if self._logger: # pragma: no cover fmt = '[Kombu connection:{id:#x}] {msg}' logger.debug(fmt.format(id=id(self), msg=str(msg)), *args, **kwargs) def connect(self): """Establish connection to server immediately.""" return self._ensure_connection( max_retries=1, reraise_as_library_errors=False ) def channel(self): """Create and return a new channel.""" self._debug('create channel') chan = self.transport.create_channel(self.connection) if _log_channel: # pragma: no cover from .utils.debug import Logwrapped return Logwrapped(chan, 'kombu.channel', '[Kombu channel:{0.channel_id}] ') return chan def heartbeat_check(self, rate=2): """Check heartbeats. Allow the transport to perform any periodic tasks required to make heartbeats work. This should be called approximately every second. If the current transport does not support heartbeats then this is a noop operation. Arguments: --------- rate (int): Rate is how often the tick is called compared to the actual heartbeat value. E.g. if the heartbeat is set to 3 seconds, and the tick is called every 3 / 2 seconds, then the rate is 2. This value is currently unused by any transports. """ return self.transport.heartbeat_check(self.connection, rate=rate) def drain_events(self, **kwargs): """Wait for a single event from the server. Arguments: --------- timeout (float): Timeout in seconds before we give up. Raises ------ socket.timeout: if the timeout is exceeded. """ return self.transport.drain_events(self.connection, **kwargs) def maybe_close_channel(self, channel): """Close given channel, but ignore connection and channel errors.""" try: channel.close() except (self.connection_errors + self.channel_errors): pass def _do_close_self(self): # Close only connection and channel(s), but not transport. self.declared_entities.clear() if self._default_channel: self.maybe_close_channel(self._default_channel) if self._connection: try: self.transport.close_connection(self._connection) except self.connection_errors + (AttributeError, socket.error): pass self._connection = None def _close(self): """Really close connection, even if part of a connection pool.""" self._do_close_self() self._do_close_transport() self._debug('closed') self._closed = True def _do_close_transport(self): if self._transport: self._transport.client = None self._transport = None def collect(self, socket_timeout=None): # amqp requires communication to close, we don't need that just # to clear out references, Transport._collect can also be implemented # by other transports that want fast after fork try: gc_transport = self._transport._collect except AttributeError: _timeo = socket.getdefaulttimeout() socket.setdefaulttimeout(socket_timeout) try: self._do_close_self() except socket.timeout: pass finally: socket.setdefaulttimeout(_timeo) else: gc_transport(self._connection) self._do_close_transport() self.declared_entities.clear() self._connection = None def release(self): """Close the connection (if open).""" self._close() close = release def ensure_connection(self, *args, **kwargs): """Public interface of _ensure_connection for retro-compatibility. Returns kombu.Connection instance. """ self._ensure_connection(*args, **kwargs) return self def _ensure_connection( self, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30, callback=None, reraise_as_library_errors=True, timeout=None ): """Ensure we have a connection to the server. If not retry establishing the connection with the settings specified. Arguments: --------- errback (Callable): Optional callback called each time the connection can't be established. Arguments provided are the exception raised and the interval that will be slept ``(exc, interval)``. max_retries (int): Maximum number of times to retry. If this limit is exceeded the connection error will be re-raised. interval_start (float): The number of seconds we start sleeping for. interval_step (float): How many seconds added to the interval for each retry. interval_max (float): Maximum number of seconds to sleep between each retry. callback (Callable): Optional callback that is called for every internal iteration (1 s). timeout (int): Maximum amount of time in seconds to spend attempting to connect, total over all retries. """ if self.connected: return self._connection def on_error(exc, intervals, retries, interval=0): round = self.completes_cycle(retries) if round: interval = next(intervals) if errback: errback(exc, interval) self.maybe_switch_next() # select next host return interval if round else 0 ctx = self._reraise_as_library_errors if not reraise_as_library_errors: ctx = self._dummy_context with ctx(): return retry_over_time( self._connection_factory, self.recoverable_connection_errors, (), {}, on_error, max_retries, interval_start, interval_step, interval_max, callback, timeout=timeout ) @contextmanager def _reraise_as_library_errors( self, ConnectionError=exceptions.OperationalError, ChannelError=exceptions.OperationalError): try: yield except (ConnectionError, ChannelError): raise except self.recoverable_connection_errors as exc: raise ConnectionError(str(exc)) from exc except self.recoverable_channel_errors as exc: raise ChannelError(str(exc)) from exc @contextmanager def _dummy_context(self): yield def completes_cycle(self, retries): """Return true if the cycle is complete after number of `retries`.""" return not (retries + 1) % len(self.alt) if self.alt else True def revive(self, new_channel): """Revive connection after connection re-established.""" if self._default_channel and new_channel is not self._default_channel: self.maybe_close_channel(self._default_channel) self._default_channel = None def ensure(self, obj, fun, errback=None, max_retries=None, interval_start=1, interval_step=1, interval_max=1, on_revive=None, retry_errors=None): """Ensure operation completes. Regardless of any channel/connection errors occurring. Retries by establishing the connection, and reapplying the function. Arguments: --------- obj: The object to ensure an action on. fun (Callable): Method to apply. errback (Callable): Optional callback called each time the connection can't be established. Arguments provided are the exception raised and the interval that will be slept ``(exc, interval)``. max_retries (int): Maximum number of times to retry. If this limit is exceeded the connection error will be re-raised. interval_start (float): The number of seconds we start sleeping for. interval_step (float): How many seconds added to the interval for each retry. interval_max (float): Maximum number of seconds to sleep between each retry. on_revive (Callable): Optional callback called whenever revival completes successfully retry_errors (tuple): Optional list of errors to retry on regardless of the connection state. Examples -------- >>> from kombu import Connection, Producer >>> conn = Connection('amqp://') >>> producer = Producer(conn) >>> def errback(exc, interval): ... logger.error('Error: %r', exc, exc_info=1) ... logger.info('Retry in %s seconds.', interval) >>> publish = conn.ensure(producer, producer.publish, ... errback=errback, max_retries=3) >>> publish({'hello': 'world'}, routing_key='dest') """ if retry_errors is None: retry_errors = tuple() def _ensured(*args, **kwargs): got_connection = 0 conn_errors = self.recoverable_connection_errors chan_errors = self.recoverable_channel_errors has_modern_errors = hasattr( self.transport, 'recoverable_connection_errors', ) with self._reraise_as_library_errors(): for retries in count(0): # for infinity try: return fun(*args, **kwargs) except retry_errors as exc: if max_retries is not None and retries >= max_retries: raise self._debug('ensure retry policy error: %r', exc, exc_info=1) except conn_errors as exc: if got_connection and not has_modern_errors: # transport can not distinguish between # recoverable/irrecoverable errors, so we propagate # the error if it persists after a new connection # was successfully established. raise if max_retries is not None and retries >= max_retries: raise self._debug('ensure connection error: %r', exc, exc_info=1) self.collect() errback and errback(exc, 0) remaining_retries = None if max_retries is not None: remaining_retries = max(max_retries - retries, 1) self._ensure_connection( errback, remaining_retries, interval_start, interval_step, interval_max, reraise_as_library_errors=False, ) channel = self.default_channel obj.revive(channel) if on_revive: on_revive(channel) got_connection += 1 except chan_errors as exc: if max_retries is not None and retries > max_retries: raise self._debug('ensure channel error: %r', exc, exc_info=1) errback and errback(exc, 0) _ensured.__name__ = f'{fun.__name__}(ensured)' _ensured.__doc__ = fun.__doc__ _ensured.__module__ = fun.__module__ return _ensured def autoretry(self, fun, channel=None, **ensure_options): """Decorator for functions supporting a ``channel`` keyword argument. The resulting callable will retry calling the function if it raises connection or channel related errors. The return value will be a tuple of ``(retval, last_created_channel)``. If a ``channel`` is not provided, then one will be automatically acquired (remember to close it afterwards). See Also -------- :meth:`ensure` for the full list of supported keyword arguments. Example: ------- >>> channel = connection.channel() >>> try: ... ret, channel = connection.autoretry( ... publish_messages, channel) ... finally: ... channel.close() """ channels = [channel] class Revival: __name__ = getattr(fun, '__name__', None) __module__ = getattr(fun, '__module__', None) __doc__ = getattr(fun, '__doc__', None) def __init__(self, connection): self.connection = connection def revive(self, channel): channels[0] = channel def __call__(self, *args, **kwargs): if channels[0] is None: self.revive(self.connection.default_channel) return fun(*args, channel=channels[0], **kwargs), channels[0] revive = Revival(self) return self.ensure(revive, revive, **ensure_options) def create_transport(self): return self.get_transport_cls()(client=self) def get_transport_cls(self): """Get the currently used transport class.""" transport_cls = self.transport_cls if not transport_cls or isinstance(transport_cls, str): transport_cls = get_transport_cls(transport_cls) return transport_cls def clone(self, **kwargs): """Create a copy of the connection with same settings.""" return self.__class__(**dict(self._info(resolve=False), **kwargs)) def get_heartbeat_interval(self): return self.transport.get_heartbeat_interval(self.connection) def _info(self, resolve=True): transport_cls = self.transport_cls if resolve: transport_cls = self.resolve_aliases.get( transport_cls, transport_cls) D = self.transport.default_connection_params if not self.hostname and D.get('hostname'): logger.warning( "No hostname was supplied. " f"Reverting to default '{D.get('hostname')}'") hostname = D.get('hostname') else: hostname = self.hostname if self.uri_prefix: hostname = f'{self.uri_prefix}+{hostname}' info = ( ('hostname', hostname), ('userid', self.userid or D.get('userid')), ('password', self.password or D.get('password')), ('virtual_host', self.virtual_host or D.get('virtual_host')), ('port', self.port or D.get('port')), ('insist', self.insist), ('ssl', self.ssl), ('transport', transport_cls), ('connect_timeout', self.connect_timeout), ('transport_options', self.transport_options), ('login_method', self.login_method or D.get('login_method')), ('uri_prefix', self.uri_prefix), ('heartbeat', self.heartbeat), ('failover_strategy', self._failover_strategy), ('alternates', self.alt), ) return info def info(self): """Get connection info.""" return dict(self._info()) def __eqhash__(self): return HashedSeq(self.transport_cls, self.hostname, self.userid, self.password, self.virtual_host, self.port, repr(self.transport_options)) def as_uri(self, include_password=False, mask='**', getfields=itemgetter('port', 'userid', 'password', 'virtual_host', 'transport')) -> str: """Convert connection parameters to URL form.""" hostname = self.hostname or 'localhost' if self.transport.can_parse_url: connection_as_uri = self.hostname try: return self.transport.as_uri( connection_as_uri, include_password, mask) except NotImplementedError: pass if self.uri_prefix: connection_as_uri = f'{self.uri_prefix}+{hostname}' if not include_password: connection_as_uri = maybe_sanitize_url(connection_as_uri) return connection_as_uri if self.uri_prefix: connection_as_uri = f'{self.uri_prefix}+{hostname}' if not include_password: connection_as_uri = maybe_sanitize_url(connection_as_uri) return connection_as_uri fields = self.info() port, userid, password, vhost, transport = getfields(fields) return as_url( transport, hostname, port, userid, password, quote(vhost), sanitize=not include_password, mask=mask, ) def Pool(self, limit=None, **kwargs): """Pool of connections. See Also -------- :class:`ConnectionPool`. Arguments: --------- limit (int): Maximum number of active connections. Default is no limit. Example: ------- >>> connection = Connection('amqp://') >>> pool = connection.Pool(2) >>> c1 = pool.acquire() >>> c2 = pool.acquire() >>> c3 = pool.acquire() Traceback (most recent call last): File "", line 1, in File "kombu/connection.py", line 354, in acquire raise ConnectionLimitExceeded(self.limit) kombu.exceptions.ConnectionLimitExceeded: 2 >>> c1.release() >>> c3 = pool.acquire() """ return ConnectionPool(self, limit, **kwargs) def ChannelPool(self, limit=None, **kwargs): """Pool of channels. See Also -------- :class:`ChannelPool`. Arguments: --------- limit (int): Maximum number of active channels. Default is no limit. Example: ------- >>> connection = Connection('amqp://') >>> pool = connection.ChannelPool(2) >>> c1 = pool.acquire() >>> c2 = pool.acquire() >>> c3 = pool.acquire() Traceback (most recent call last): File "", line 1, in File "kombu/connection.py", line 354, in acquire raise ChannelLimitExceeded(self.limit) kombu.connection.ChannelLimitExceeded: 2 >>> c1.release() >>> c3 = pool.acquire() """ return ChannelPool(self, limit, **kwargs) def Producer(self, channel=None, *args, **kwargs): """Create new :class:`kombu.Producer` instance.""" from .messaging import Producer return Producer(channel or self, *args, **kwargs) def Consumer(self, queues=None, channel=None, *args, **kwargs): """Create new :class:`kombu.Consumer` instance.""" from .messaging import Consumer return Consumer(channel or self, queues, *args, **kwargs) def SimpleQueue(self, name, no_ack=None, queue_opts=None, queue_args=None, exchange_opts=None, channel=None, **kwargs): """Simple persistent queue API. Create new :class:`~kombu.simple.SimpleQueue`, using a channel from this connection. If ``name`` is a string, a queue and exchange will be automatically created using that name as the name of the queue and exchange, also it will be used as the default routing key. Arguments: --------- name (str, kombu.Queue): Name of the queue/or a queue. no_ack (bool): Disable acknowledgments. Default is false. queue_opts (Dict): Additional keyword arguments passed to the constructor of the automatically created :class:`~kombu.Queue`. queue_args (Dict): Additional keyword arguments passed to the constructor of the automatically created :class:`~kombu.Queue` for setting implementation extensions (e.g., in RabbitMQ). exchange_opts (Dict): Additional keyword arguments passed to the constructor of the automatically created :class:`~kombu.Exchange`. channel (ChannelT): Custom channel to use. If not specified the connection default channel is used. """ from .simple import SimpleQueue return SimpleQueue(channel or self, name, no_ack, queue_opts, queue_args, exchange_opts, **kwargs) def SimpleBuffer(self, name, no_ack=None, queue_opts=None, queue_args=None, exchange_opts=None, channel=None, **kwargs): """Simple ephemeral queue API. Create new :class:`~kombu.simple.SimpleQueue` using a channel from this connection. See Also -------- Same as :meth:`SimpleQueue`, but configured with buffering semantics. The resulting queue and exchange will not be durable, also auto delete is enabled. Messages will be transient (not persistent), and acknowledgments are disabled (``no_ack``). """ from .simple import SimpleBuffer return SimpleBuffer(channel or self, name, no_ack, queue_opts, queue_args, exchange_opts, **kwargs) def _establish_connection(self): self._debug('establishing connection...') conn = self.transport.establish_connection() self._debug('connection established: %r', self) return conn def supports_exchange_type(self, exchange_type): return exchange_type in self.transport.implements.exchange_type def __repr__(self): return f'' def __copy__(self): return self.clone() def __reduce__(self): return self.__class__, tuple(self.info().values()), None def __enter__(self): return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: self.release() @property def qos_semantics_matches_spec(self): return self.transport.qos_semantics_matches_spec(self.connection) def _extract_failover_opts(self): conn_opts = {'timeout': self.connect_timeout} transport_opts = self.transport_options if transport_opts: if 'max_retries' in transport_opts: conn_opts['max_retries'] = transport_opts['max_retries'] if 'interval_start' in transport_opts: conn_opts['interval_start'] = transport_opts['interval_start'] if 'interval_step' in transport_opts: conn_opts['interval_step'] = transport_opts['interval_step'] if 'interval_max' in transport_opts: conn_opts['interval_max'] = transport_opts['interval_max'] if 'connect_retries_timeout' in transport_opts: conn_opts['timeout'] = \ transport_opts['connect_retries_timeout'] if 'errback' in transport_opts: conn_opts['errback'] = transport_opts['errback'] if 'callback' in transport_opts: conn_opts['callback'] = transport_opts['callback'] return conn_opts @property def connected(self): """Return true if the connection has been established.""" return (not self._closed and self._connection is not None and self.transport.verify_connection(self._connection)) @property def connection(self): """The underlying connection object. Warning: ------- This instance is transport specific, so do not depend on the interface of this object. """ if not self._closed: if not self.connected: return self._ensure_connection( max_retries=1, reraise_as_library_errors=False ) return self._connection def _connection_factory(self): self.declared_entities.clear() self._default_channel = None self._connection = self._establish_connection() self._closed = False return self._connection @property def default_channel(self) -> Channel: """Default channel. Created upon access and closed when the connection is closed. Note: ---- Can be used for automatic channel handling when you only need one channel, and also it is the channel implicitly used if a connection is passed instead of a channel, to functions that require a channel. """ # make sure we're still connected, and if not refresh. conn_opts = self._extract_failover_opts() self._ensure_connection(**conn_opts) if self._default_channel is None: self._default_channel = self.channel() return self._default_channel @property def host(self): """The host as a host name/port pair separated by colon.""" return ':'.join([self.hostname, str(self.port)]) @property def transport(self): if self._transport is None: self._transport = self.create_transport() return self._transport @cached_property def manager(self): """AMQP Management API. Experimental manager that can be used to manage/monitor the broker instance. Not available for all transports. """ return self.transport.manager def get_manager(self, *args, **kwargs): return self.transport.get_manager(*args, **kwargs) @cached_property def recoverable_connection_errors(self): """Recoverable connection errors. List of connection related exceptions that can be recovered from, but where the connection must be closed and re-established first. """ try: return self.get_transport_cls().recoverable_connection_errors except AttributeError: # There were no such classification before, # and all errors were assumed to be recoverable, # so this is a fallback for transports that do # not support the new recoverable/irrecoverable classes. return self.connection_errors + self.channel_errors @cached_property def recoverable_channel_errors(self): """Recoverable channel errors. List of channel related exceptions that can be automatically recovered from without re-establishing the connection. """ try: return self.get_transport_cls().recoverable_channel_errors except AttributeError: return () @cached_property def connection_errors(self): """List of exceptions that may be raised by the connection.""" return self.get_transport_cls().connection_errors @cached_property def channel_errors(self): """List of exceptions that may be raised by the channel.""" return self.get_transport_cls().channel_errors @property def supports_heartbeats(self): return self.transport.implements.heartbeats @property def is_evented(self): return self.transport.implements.asynchronous BrokerConnection = Connection class PooledConnection(Connection): """Wraps :class:`kombu.Connection`. This wrapper modifies :meth:`kombu.Connection.__exit__` to close the connection in case any exception occurred while the context was active. """ def __init__(self, pool, **kwargs): self._pool = pool super().__init__(**kwargs) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None and self._pool.limit: self._pool.replace(self) return super().__exit__(exc_type, exc_val, exc_tb) class ConnectionPool(Resource): """Pool of connections.""" LimitExceeded = exceptions.ConnectionLimitExceeded close_after_fork = True def __init__(self, connection, limit=None, **kwargs): self.connection = connection super().__init__(limit=limit) def new(self): return PooledConnection(self, **dict(self.connection._info(resolve=False))) def release_resource(self, resource): try: resource._debug('released') except AttributeError: pass def close_resource(self, resource): resource._close() def collect_resource(self, resource, socket_timeout=0.1): if not isinstance(resource, lazy): return resource.collect(socket_timeout) @contextmanager def acquire_channel(self, block=False): with self.acquire(block=block) as connection: yield connection, connection.default_channel def setup(self): if self.limit: q = self._resource.queue # Keep in mind dirty/used resources while len(q) < self.limit - len(self._dirty): self._resource.put_nowait(lazy(self.new)) def prepare(self, resource): if callable(resource): resource = resource() resource._debug('acquired') return resource class ChannelPool(Resource): """Pool of channels.""" LimitExceeded = exceptions.ChannelLimitExceeded def __init__(self, connection, limit=None, **kwargs): self.connection = connection super().__init__(limit=limit) def new(self): return lazy(self.connection.channel) def setup(self): channel = self.new() if self.limit: q = self._resource.queue # Keep in mind dirty/used resources while len(q) < self.limit - len(self._dirty): self._resource.put_nowait(lazy(channel)) def prepare(self, channel): if callable(channel): channel = channel() return channel def maybe_channel(channel: Channel | Connection) -> Channel: """Get channel from object. Return the default channel if argument is a connection instance, otherwise just return the channel given. """ if is_connection(channel): return channel.default_channel return channel def is_connection(obj: Any) -> TypeGuard[Connection]: return isinstance(obj, Connection) kombu-5.5.3/kombu/entity.py000066400000000000000000001006611477772317200156710ustar00rootroot00000000000000"""Exchange and Queue declarations.""" from __future__ import annotations import numbers from .abstract import MaybeChannelBound, Object from .exceptions import ContentDisallowed from .serialization import prepare_accept_content TRANSIENT_DELIVERY_MODE = 1 PERSISTENT_DELIVERY_MODE = 2 DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE, 'persistent': PERSISTENT_DELIVERY_MODE} __all__ = ('Exchange', 'Queue', 'binding', 'maybe_delivery_mode') INTERNAL_EXCHANGE_PREFIX = ('amq.',) def _reprstr(s): s = repr(s) if isinstance(s, str) and s.startswith("u'"): return s[2:-1] return s[1:-1] def pretty_bindings(bindings): return '[{}]'.format(', '.join(map(str, bindings))) def maybe_delivery_mode( v, modes=None, default=PERSISTENT_DELIVERY_MODE): """Get delivery mode by name (or none if undefined).""" modes = DELIVERY_MODES if not modes else modes if v: return v if isinstance(v, numbers.Integral) else modes[v] return default class Exchange(MaybeChannelBound): """An Exchange declaration. Arguments: --------- name (str): See :attr:`name`. type (str): See :attr:`type`. channel (kombu.Connection, ChannelT): See :attr:`channel`. durable (bool): See :attr:`durable`. auto_delete (bool): See :attr:`auto_delete`. delivery_mode (enum): See :attr:`delivery_mode`. arguments (Dict): See :attr:`arguments`. no_declare (bool): See :attr:`no_declare` Attributes ---------- name (str): Name of the exchange. Default is no name (the default exchange). type (str): *This description of AMQP exchange types was shamelessly stolen from the blog post `AMQP in 10 minutes: Part 4`_ by Rajith Attapattu. Reading this article is recommended if you're new to amqp.* "AMQP defines four default exchange types (routing algorithms) that covers most of the common messaging use cases. An AMQP broker can also define additional exchange types, so see your broker manual for more information about available exchange types. * `direct` (*default*) Direct match between the routing key in the message, and the routing criteria used when a queue is bound to this exchange. * `topic` Wildcard match between the routing key and the routing pattern specified in the exchange/queue binding. The routing key is treated as zero or more words delimited by `"."` and supports special wildcard characters. `"*"` matches a single word and `"#"` matches zero or more words. * `fanout` Queues are bound to this exchange with no arguments. Hence any message sent to this exchange will be forwarded to all queues bound to this exchange. * `headers` Queues are bound to this exchange with a table of arguments containing headers and values (optional). A special argument named "x-match" determines the matching algorithm, where `"all"` implies an `AND` (all pairs must match) and `"any"` implies `OR` (at least one pair must match). :attr:`arguments` is used to specify the arguments. .. _`AMQP in 10 minutes: Part 4`: https://bit.ly/2rcICv5 channel (ChannelT): The channel the exchange is bound to (if bound). durable (bool): Durable exchanges remain active when a server restarts. Non-durable exchanges (transient exchanges) are purged when a server restarts. Default is :const:`True`. auto_delete (bool): If set, the exchange is deleted when all queues have finished using it. Default is :const:`False`. delivery_mode (enum): The default delivery mode used for messages. The value is an integer, or alias string. * 1 or `"transient"` The message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. * 2 or "persistent" (*default*) The message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. The default value is 2 (persistent). arguments (Dict): Additional arguments to specify when the exchange is declared. no_declare (bool): Never declare this exchange (:meth:`declare` does nothing). """ TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE name = '' type = 'direct' durable = True auto_delete = False passive = False delivery_mode = None no_declare = False attrs = ( ('name', None), ('type', None), ('arguments', None), ('durable', bool), ('passive', bool), ('auto_delete', bool), ('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m), ('no_declare', bool), ) def __init__(self, name='', type='', channel=None, **kwargs): super().__init__(**kwargs) self.name = name or self.name self.type = type or self.type self.maybe_bind(channel) def __hash__(self): return hash(f'E|{self.name}') def _can_declare(self): return not self.no_declare and ( self.name and not self.name.startswith( INTERNAL_EXCHANGE_PREFIX)) def declare(self, nowait=False, passive=None, channel=None): """Declare the exchange. Creates the exchange on the broker, unless passive is set in which case it will only assert that the exchange exists. Argument: nowait (bool): If set the server will not respond, and a response will not be waited for. Default is :const:`False`. """ if self._can_declare(): passive = self.passive if passive is None else passive return (channel or self.channel).exchange_declare( exchange=self.name, type=self.type, durable=self.durable, auto_delete=self.auto_delete, arguments=self.arguments, nowait=nowait, passive=passive, ) def bind_to(self, exchange='', routing_key='', arguments=None, nowait=False, channel=None, **kwargs): """Bind the exchange to another exchange. Arguments: --------- nowait (bool): If set the server will not respond, and the call will not block waiting for a response. Default is :const:`False`. """ if isinstance(exchange, Exchange): exchange = exchange.name return (channel or self.channel).exchange_bind( destination=self.name, source=exchange, routing_key=routing_key, nowait=nowait, arguments=arguments, ) def unbind_from(self, source='', routing_key='', nowait=False, arguments=None, channel=None): """Delete previously created exchange binding from the server.""" if isinstance(source, Exchange): source = source.name return (channel or self.channel).exchange_unbind( destination=self.name, source=source, routing_key=routing_key, nowait=nowait, arguments=arguments, ) def Message(self, body, delivery_mode=None, properties=None, **kwargs): """Create message instance to be sent with :meth:`publish`. Arguments: --------- body (Any): Message body. delivery_mode (bool): Set custom delivery mode. Defaults to :attr:`delivery_mode`. priority (int): Message priority, 0 to broker configured max priority, where higher is better. content_type (str): The messages content_type. If content_type is set, no serialization occurs as it is assumed this is either a binary object, or you've done your own serialization. Leave blank if using built-in serialization as our library properly sets content_type. content_encoding (str): The character set in which this object is encoded. Use "binary" if sending in raw binary objects. Leave blank if using built-in serialization as our library properly sets content_encoding. properties (Dict): Message properties. headers (Dict): Message headers. """ properties = {} if properties is None else properties properties['delivery_mode'] = maybe_delivery_mode(self.delivery_mode) if (isinstance(body, str) and properties.get('content_encoding', None)) is None: kwargs['content_encoding'] = 'utf-8' return self.channel.prepare_message( body, properties=properties, **kwargs) def publish(self, message, routing_key=None, mandatory=False, immediate=False, exchange=None): """Publish message. Arguments: --------- message (Union[kombu.Message, str, bytes]): Message to publish. routing_key (str): Message routing key. mandatory (bool): Currently not supported. immediate (bool): Currently not supported. """ if isinstance(message, str): message = self.Message(message) exchange = exchange or self.name return self.channel.basic_publish( message, exchange=exchange, routing_key=routing_key, mandatory=mandatory, immediate=immediate, ) def delete(self, if_unused=False, nowait=False): """Delete the exchange declaration on server. Arguments: --------- if_unused (bool): Delete only if the exchange has no bindings. Default is :const:`False`. nowait (bool): If set the server will not respond, and a response will not be waited for. Default is :const:`False`. """ return self.channel.exchange_delete(exchange=self.name, if_unused=if_unused, nowait=nowait) def binding(self, routing_key='', arguments=None, unbind_arguments=None): return binding(self, routing_key, arguments, unbind_arguments) def __eq__(self, other): if isinstance(other, Exchange): return (self.name == other.name and self.type == other.type and self.arguments == other.arguments and self.durable == other.durable and self.auto_delete == other.auto_delete and self.delivery_mode == other.delivery_mode) return NotImplemented def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return self._repr_entity(self) def __str__(self): return 'Exchange {}({})'.format( _reprstr(self.name) or repr(''), self.type, ) @property def can_cache_declaration(self): return not self.auto_delete class binding(Object): """Represents a queue or exchange binding. Arguments: --------- exchange (Exchange): Exchange to bind to. routing_key (str): Routing key used as binding key. arguments (Dict): Arguments for bind operation. unbind_arguments (Dict): Arguments for unbind operation. """ attrs = ( ('exchange', None), ('routing_key', None), ('arguments', None), ('unbind_arguments', None) ) def __init__(self, exchange=None, routing_key='', arguments=None, unbind_arguments=None): self.exchange = exchange self.routing_key = routing_key self.arguments = arguments self.unbind_arguments = unbind_arguments def declare(self, channel, nowait=False): """Declare destination exchange.""" if self.exchange and self.exchange.name: self.exchange.declare(channel=channel, nowait=nowait) def bind(self, entity, nowait=False, channel=None): """Bind entity to this binding.""" entity.bind_to(exchange=self.exchange, routing_key=self.routing_key, arguments=self.arguments, nowait=nowait, channel=channel) def unbind(self, entity, nowait=False, channel=None): """Unbind entity from this binding.""" entity.unbind_from(self.exchange, routing_key=self.routing_key, arguments=self.unbind_arguments, nowait=nowait, channel=channel) def __repr__(self): return f'' def __str__(self): return '{}->{}'.format( _reprstr(self.exchange.name), _reprstr(self.routing_key), ) class Queue(MaybeChannelBound): """A Queue declaration. Arguments: --------- name (str): See :attr:`name`. exchange (Exchange, str): See :attr:`exchange`. routing_key (str): See :attr:`routing_key`. channel (kombu.Connection, ChannelT): See :attr:`channel`. durable (bool): See :attr:`durable`. exclusive (bool): See :attr:`exclusive`. auto_delete (bool): See :attr:`auto_delete`. queue_arguments (Dict): See :attr:`queue_arguments`. binding_arguments (Dict): See :attr:`binding_arguments`. consumer_arguments (Dict): See :attr:`consumer_arguments`. no_declare (bool): See :attr:`no_declare`. on_declared (Callable): See :attr:`on_declared`. expires (float): See :attr:`expires`. message_ttl (float): See :attr:`message_ttl`. max_length (int): See :attr:`max_length`. max_length_bytes (int): See :attr:`max_length_bytes`. max_priority (int): See :attr:`max_priority`. Attributes ---------- name (str): Name of the queue. Default is no name (default queue destination). exchange (Exchange): The :class:`Exchange` the queue binds to. routing_key (str): The routing key (if any), also called *binding key*. The interpretation of the routing key depends on the :attr:`Exchange.type`. * direct exchange Matches if the routing key property of the message and the :attr:`routing_key` attribute are identical. * fanout exchange Always matches, even if the binding does not have a key. * topic exchange Matches the routing key property of the message by a primitive pattern matching scheme. The message routing key then consists of words separated by dots (`"."`, like domain names), and two special characters are available; star (`"*"`) and hash (`"#"`). The star matches any word, and the hash matches zero or more words. For example `"*.stock.#"` matches the routing keys `"usd.stock"` and `"eur.stock.db"` but not `"stock.nasdaq"`. channel (ChannelT): The channel the Queue is bound to (if bound). durable (bool): Durable queues remain active when a server restarts. Non-durable queues (transient queues) are purged if/when a server restarts. Note that durable queues do not necessarily hold persistent messages, although it does not make sense to send persistent messages to a transient queue. Default is :const:`True`. exclusive (bool): Exclusive queues may only be consumed from by the current connection. Setting the 'exclusive' flag always implies 'auto-delete'. Default is :const:`False`. auto_delete (bool): If set, the queue is deleted when all consumers have finished using it. Last consumer can be canceled either explicitly or because its channel is closed. If there was no consumer ever on the queue, it won't be deleted. expires (float): Set the expiry time (in seconds) for when this queue should expire. The expiry time decides how long the queue can stay unused before it's automatically deleted. *Unused* means the queue has no consumers, the queue has not been redeclared, and ``Queue.get`` has not been invoked for a duration of at least the expiration period. See https://www.rabbitmq.com/ttl.html#queue-ttl **RabbitMQ extension**: Only available when using RabbitMQ. message_ttl (float): Message time to live in seconds. This setting controls how long messages can stay in the queue unconsumed. If the expiry time passes before a message consumer has received the message, the message is deleted and no consumer will see the message. See https://www.rabbitmq.com/ttl.html#per-queue-message-ttl **RabbitMQ extension**: Only available when using RabbitMQ. max_length (int): Set the maximum number of messages that the queue can hold. If the number of messages in the queue size exceeds this limit, new messages will be dropped (or dead-lettered if a dead letter exchange is active). See https://www.rabbitmq.com/maxlength.html **RabbitMQ extension**: Only available when using RabbitMQ. max_length_bytes (int): Set the max size (in bytes) for the total of messages in the queue. If the total size of all the messages in the queue exceeds this limit, new messages will be dropped (or dead-lettered if a dead letter exchange is active). **RabbitMQ extension**: Only available when using RabbitMQ. max_priority (int): Set the highest priority number for this queue. For example if the value is 10, then messages can delivered to this queue can have a ``priority`` value between 0 and 10, where 10 is the highest priority. RabbitMQ queues without a max priority set will ignore the priority field in the message, so if you want priorities you need to set the max priority field to declare the queue as a priority queue. **RabbitMQ extension**: Only available when using RabbitMQ. queue_arguments (Dict): Additional arguments used when declaring the queue. Can be used to to set the arguments value for RabbitMQ/AMQP's ``queue.declare``. binding_arguments (Dict): Additional arguments used when binding the queue. Can be used to to set the arguments value for RabbitMQ/AMQP's ``queue.declare``. consumer_arguments (Dict): Additional arguments used when consuming from this queue. Can be used to to set the arguments value for RabbitMQ/AMQP's ``basic.consume``. alias (str): Unused in Kombu, but applications can take advantage of this, for example to give alternate names to queues with automatically generated queue names. on_declared (Callable): Optional callback to be applied when the queue has been declared (the ``queue_declare`` operation is complete). This must be a function with a signature that accepts at least 3 positional arguments: ``(name, messages, consumers)``. no_declare (bool): Never declare this queue, nor related entities (:meth:`declare` does nothing). """ ContentDisallowed = ContentDisallowed name = '' exchange = Exchange('') routing_key = '' durable = True exclusive = False auto_delete = False no_ack = False attrs = ( ('name', None), ('exchange', None), ('routing_key', None), ('queue_arguments', None), ('binding_arguments', None), ('consumer_arguments', None), ('durable', bool), ('exclusive', bool), ('auto_delete', bool), ('no_ack', None), ('alias', None), ('bindings', list), ('no_declare', bool), ('expires', float), ('message_ttl', float), ('max_length', int), ('max_length_bytes', int), ('max_priority', int) ) def __init__(self, name='', exchange=None, routing_key='', channel=None, bindings=None, on_declared=None, **kwargs): super().__init__(**kwargs) self.name = name or self.name if isinstance(exchange, str): self.exchange = Exchange(exchange) elif isinstance(exchange, Exchange): self.exchange = exchange self.routing_key = routing_key or self.routing_key self.bindings = set(bindings or []) self.on_declared = on_declared # allows Queue('name', [binding(...), binding(...), ...]) if isinstance(exchange, (list, tuple, set)): self.bindings |= set(exchange) if self.bindings: self.exchange = None # exclusive implies auto-delete. if self.exclusive: self.auto_delete = True self.maybe_bind(channel) def bind(self, channel): on_declared = self.on_declared bound = super().bind(channel) bound.on_declared = on_declared return bound def __hash__(self): return hash(f'Q|{self.name}') def when_bound(self): if self.exchange: self.exchange = self.exchange(self.channel) def declare(self, nowait=False, channel=None): """Declare queue and exchange then binds queue to exchange.""" if not self.no_declare: # - declare main binding. self._create_exchange(nowait=nowait, channel=channel) self._create_queue(nowait=nowait, channel=channel) self._create_bindings(nowait=nowait, channel=channel) return self.name def _create_exchange(self, nowait=False, channel=None): if self.exchange: self.exchange.declare(nowait=nowait, channel=channel) def _create_queue(self, nowait=False, channel=None): self.queue_declare(nowait=nowait, passive=False, channel=channel) if self.exchange and self.exchange.name: self.queue_bind(nowait=nowait, channel=channel) def _create_bindings(self, nowait=False, channel=None): for B in self.bindings: channel = channel or self.channel B.declare(channel) B.bind(self, nowait=nowait, channel=channel) def queue_declare(self, nowait=False, passive=False, channel=None): """Declare queue on the server. Arguments: --------- nowait (bool): Do not wait for a reply. passive (bool): If set, the server will not create the queue. The client can use this to check whether a queue exists without modifying the server state. """ channel = channel or self.channel queue_arguments = channel.prepare_queue_arguments( self.queue_arguments or {}, expires=self.expires, message_ttl=self.message_ttl, max_length=self.max_length, max_length_bytes=self.max_length_bytes, max_priority=self.max_priority, ) ret = channel.queue_declare( queue=self.name, passive=passive, durable=self.durable, exclusive=self.exclusive, auto_delete=self.auto_delete, arguments=queue_arguments, nowait=nowait, ) if not self.name: self.name = ret[0] if self.on_declared: self.on_declared(*ret) return ret def queue_bind(self, nowait=False, channel=None): """Create the queue binding on the server.""" return self.bind_to(self.exchange, self.routing_key, self.binding_arguments, channel=channel, nowait=nowait) def bind_to(self, exchange='', routing_key='', arguments=None, nowait=False, channel=None): if isinstance(exchange, Exchange): exchange = exchange.name return (channel or self.channel).queue_bind( queue=self.name, exchange=exchange, routing_key=routing_key, arguments=arguments, nowait=nowait, ) def get(self, no_ack=None, accept=None): """Poll the server for a new message. This method provides direct access to the messages in a queue using a synchronous dialogue, designed for specific types of applications where synchronous functionality is more important than performance. Returns ------- ~kombu.Message: if a message was available, or :const:`None` otherwise. Arguments: --------- no_ack (bool): If enabled the broker will automatically ack messages. accept (Set[str]): Custom list of accepted content types. """ no_ack = self.no_ack if no_ack is None else no_ack message = self.channel.basic_get(queue=self.name, no_ack=no_ack) if message is not None: m2p = getattr(self.channel, 'message_to_python', None) if m2p: message = m2p(message) if message.errors: message._reraise_error() message.accept = prepare_accept_content(accept) return message def purge(self, nowait=False): """Remove all ready messages from the queue.""" return self.channel.queue_purge(queue=self.name, nowait=nowait) or 0 def consume(self, consumer_tag='', callback=None, no_ack=None, nowait=False, on_cancel=None): """Start a queue consumer. Consumers last as long as the channel they were created on, or until the client cancels them. Arguments: --------- consumer_tag (str): Unique identifier for the consumer. The consumer tag is local to a connection, so two clients can use the same consumer tags. If this field is empty the server will generate a unique tag. no_ack (bool): If enabled the broker will automatically ack messages. nowait (bool): Do not wait for a reply. callback (Callable): callback called for each delivered message. on_cancel (Callable): callback called on cancel notify received from broker. """ if no_ack is None: no_ack = self.no_ack return self.channel.basic_consume( queue=self.name, no_ack=no_ack, consumer_tag=consumer_tag or '', callback=callback, nowait=nowait, arguments=self.consumer_arguments, on_cancel=on_cancel, ) def cancel(self, consumer_tag): """Cancel a consumer by consumer tag.""" return self.channel.basic_cancel(consumer_tag) def delete(self, if_unused=False, if_empty=False, nowait=False): """Delete the queue. Arguments: --------- if_unused (bool): If set, the server will only delete the queue if it has no consumers. A channel error will be raised if the queue has consumers. if_empty (bool): If set, the server will only delete the queue if it is empty. If it is not empty a channel error will be raised. nowait (bool): Do not wait for a reply. """ return self.channel.queue_delete(queue=self.name, if_unused=if_unused, if_empty=if_empty, nowait=nowait) def queue_unbind(self, arguments=None, nowait=False, channel=None): return self.unbind_from(self.exchange, self.routing_key, arguments, nowait, channel) def unbind_from(self, exchange='', routing_key='', arguments=None, nowait=False, channel=None): """Unbind queue by deleting the binding from the server.""" return (channel or self.channel).queue_unbind( queue=self.name, exchange=exchange.name, routing_key=routing_key, arguments=arguments, nowait=nowait, ) def __eq__(self, other): if isinstance(other, Queue): return (self.name == other.name and self.exchange == other.exchange and self.routing_key == other.routing_key and self.queue_arguments == other.queue_arguments and self.binding_arguments == other.binding_arguments and self.consumer_arguments == other.consumer_arguments and self.durable == other.durable and self.exclusive == other.exclusive and self.auto_delete == other.auto_delete) return NotImplemented def __ne__(self, other): return not self.__eq__(other) def __repr__(self): if self.bindings: return self._repr_entity('Queue {name} -> {bindings}'.format( name=_reprstr(self.name), bindings=pretty_bindings(self.bindings), )) return self._repr_entity( 'Queue {name} -> {0.exchange!r} -> {routing_key}'.format( self, name=_reprstr(self.name), routing_key=_reprstr(self.routing_key), ), ) @property def can_cache_declaration(self): if self.queue_arguments: expiring_queue = "x-expires" in self.queue_arguments else: expiring_queue = False return not expiring_queue and not self.auto_delete @classmethod def from_dict(cls, queue, **options): binding_key = options.get('binding_key') or options.get('routing_key') e_durable = options.get('exchange_durable') if e_durable is None: e_durable = options.get('durable') e_auto_delete = options.get('exchange_auto_delete') if e_auto_delete is None: e_auto_delete = options.get('auto_delete') q_durable = options.get('queue_durable') if q_durable is None: q_durable = options.get('durable') q_auto_delete = options.get('queue_auto_delete') if q_auto_delete is None: q_auto_delete = options.get('auto_delete') e_arguments = options.get('exchange_arguments') q_arguments = options.get('queue_arguments') b_arguments = options.get('binding_arguments') c_arguments = options.get('consumer_arguments') bindings = options.get('bindings') exchange = Exchange(options.get('exchange'), type=options.get('exchange_type'), delivery_mode=options.get('delivery_mode'), routing_key=options.get('routing_key'), durable=e_durable, auto_delete=e_auto_delete, arguments=e_arguments) return Queue(queue, exchange=exchange, routing_key=binding_key, durable=q_durable, exclusive=options.get('exclusive'), auto_delete=q_auto_delete, no_ack=options.get('no_ack'), queue_arguments=q_arguments, binding_arguments=b_arguments, consumer_arguments=c_arguments, bindings=bindings) def as_dict(self, recurse=False): res = super().as_dict(recurse) if not recurse: return res bindings = res.get('bindings') if bindings: res['bindings'] = [b.as_dict(recurse=True) for b in bindings] return res kombu-5.5.3/kombu/exceptions.py000066400000000000000000000054261477772317200165410ustar00rootroot00000000000000"""Exceptions.""" from __future__ import annotations from socket import timeout as TimeoutError from types import TracebackType from typing import TYPE_CHECKING, TypeVar from amqp import ChannelError, ConnectionError, ResourceError if TYPE_CHECKING: from kombu.asynchronous.http import Response __all__ = ( 'reraise', 'KombuError', 'OperationalError', 'NotBoundError', 'MessageStateError', 'TimeoutError', 'LimitExceeded', 'ConnectionLimitExceeded', 'ChannelLimitExceeded', 'ConnectionError', 'ChannelError', 'VersionMismatch', 'SerializerNotInstalled', 'ResourceError', 'SerializationError', 'EncodeError', 'DecodeError', 'HttpError', 'InconsistencyError', ) BaseExceptionType = TypeVar('BaseExceptionType', bound=BaseException) def reraise( tp: type[BaseExceptionType], value: BaseExceptionType, tb: TracebackType | None = None ) -> BaseExceptionType: """Reraise exception.""" if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value class KombuError(Exception): """Common subclass for all Kombu exceptions.""" class OperationalError(KombuError): """Recoverable message transport connection error.""" class SerializationError(KombuError): """Failed to serialize/deserialize content.""" class EncodeError(SerializationError): """Cannot encode object.""" class DecodeError(SerializationError): """Cannot decode object.""" class NotBoundError(KombuError): """Trying to call channel dependent method on unbound entity.""" class MessageStateError(KombuError): """The message has already been acknowledged.""" class LimitExceeded(KombuError): """Limit exceeded.""" class ConnectionLimitExceeded(LimitExceeded): """Maximum number of simultaneous connections exceeded.""" class ChannelLimitExceeded(LimitExceeded): """Maximum number of simultaneous channels exceeded.""" class VersionMismatch(KombuError): """Library dependency version mismatch.""" class SerializerNotInstalled(KombuError): """Support for the requested serialization type is not installed.""" class ContentDisallowed(SerializerNotInstalled): """Consumer does not allow this content-type.""" class InconsistencyError(ConnectionError): """Data or environment has been found to be inconsistent. Depending on the cause it may be possible to retry the operation. """ class HttpError(Exception): """HTTP Client Error.""" def __init__( self, code: int, message: str | None = None, response: Response | None = None ) -> None: self.code = code self.message = message self.response = response super().__init__(code, message, response) def __str__(self) -> str: return 'HTTP {0.code}: {0.message}'.format(self) kombu-5.5.3/kombu/log.py000066400000000000000000000101031477772317200151250ustar00rootroot00000000000000"""Logging Utilities.""" from __future__ import annotations import logging import numbers import os import sys from logging.handlers import WatchedFileHandler from typing import TYPE_CHECKING from .utils.encoding import safe_repr, safe_str from .utils.functional import maybe_evaluate from .utils.objects import cached_property if TYPE_CHECKING: from logging import Logger __all__ = ('LogMixin', 'LOG_LEVELS', 'get_loglevel', 'setup_logging') LOG_LEVELS = dict(logging._nameToLevel) LOG_LEVELS.update(logging._levelToName) LOG_LEVELS.setdefault('FATAL', logging.FATAL) LOG_LEVELS.setdefault(logging.FATAL, 'FATAL') DISABLE_TRACEBACKS = os.environ.get('DISABLE_TRACEBACKS') def get_logger(logger: str | Logger): """Get logger by name.""" if isinstance(logger, str): logger = logging.getLogger(logger) if not logger.handlers: logger.addHandler(logging.NullHandler()) return logger def get_loglevel(level): """Get loglevel by name.""" if isinstance(level, str): return LOG_LEVELS[level] return level def naive_format_parts(fmt): parts = fmt.split('%') for i, e in enumerate(parts[1:]): yield None if not e or not parts[i - 1] else e[0] def safeify_format(fmt, args, filters=None): filters = {'s': safe_str, 'r': safe_repr} if not filters else filters for index, type in enumerate(naive_format_parts(fmt)): filt = filters.get(type) yield filt(args[index]) if filt else args[index] class LogMixin: """Mixin that adds severity methods to any class.""" def debug(self, *args, **kwargs): return self.log(logging.DEBUG, *args, **kwargs) def info(self, *args, **kwargs): return self.log(logging.INFO, *args, **kwargs) def warn(self, *args, **kwargs): return self.log(logging.WARN, *args, **kwargs) def error(self, *args, **kwargs): kwargs.setdefault('exc_info', True) return self.log(logging.ERROR, *args, **kwargs) def critical(self, *args, **kwargs): kwargs.setdefault('exc_info', True) return self.log(logging.CRITICAL, *args, **kwargs) def annotate(self, text): return f'{self.logger_name} - {text}' def log(self, severity, *args, **kwargs): if DISABLE_TRACEBACKS: kwargs.pop('exc_info', None) if self.logger.isEnabledFor(severity): log = self.logger.log if len(args) > 1 and isinstance(args[0], str): expand = [maybe_evaluate(arg) for arg in args[1:]] return log(severity, self.annotate(args[0].replace('%r', '%s')), *list(safeify_format(args[0], expand)), **kwargs) else: return self.logger.log( severity, self.annotate(' '.join(map(safe_str, args))), **kwargs) def get_logger(self): return get_logger(self.logger_name) def is_enabled_for(self, level): return self.logger.isEnabledFor(self.get_loglevel(level)) def get_loglevel(self, level): if not isinstance(level, numbers.Integral): return LOG_LEVELS[level] return level @cached_property def logger(self): return self.get_logger() @property def logger_name(self): return self.__class__.__name__ class Log(LogMixin): def __init__(self, name, logger=None): self._logger_name = name self._logger = logger def get_logger(self): if self._logger: return self._logger return super().get_logger() @property def logger_name(self): return self._logger_name def setup_logging(loglevel=None, logfile=None): """Setup logging.""" logger = logging.getLogger() loglevel = get_loglevel(loglevel or 'ERROR') logfile = logfile if logfile else sys.__stderr__ if not logger.handlers: if hasattr(logfile, 'write'): handler = logging.StreamHandler(logfile) else: handler = WatchedFileHandler(logfile) logger.addHandler(handler) logger.setLevel(loglevel) return logger kombu-5.5.3/kombu/matcher.py000066400000000000000000000102561477772317200160000ustar00rootroot00000000000000"""Pattern matching registry.""" from __future__ import annotations from fnmatch import fnmatch from re import match as rematch from typing import Callable, cast from .utils.compat import entrypoints from .utils.encoding import bytes_to_str MatcherFunction = Callable[[str, str], bool] class MatcherNotInstalled(Exception): """Matcher not installed/found.""" class MatcherRegistry: """Pattern matching function registry.""" MatcherNotInstalled = MatcherNotInstalled matcher_pattern_first = ["pcre", ] def __init__(self) -> None: self._matchers: dict[str, MatcherFunction] = {} self._default_matcher: MatcherFunction | None = None def register(self, name: str, matcher: MatcherFunction) -> None: """Add matcher by name to the registry.""" self._matchers[name] = matcher def unregister(self, name: str) -> None: """Remove matcher by name from the registry.""" try: self._matchers.pop(name) except KeyError: raise self.MatcherNotInstalled( f'No matcher installed for {name}' ) def _set_default_matcher(self, name: str) -> None: """Set the default matching method. :param name: The name of the registered matching method. For example, `glob` (default), `pcre`, or any custom methods registered using :meth:`register`. :raises MatcherNotInstalled: If the matching method requested is not available. """ try: self._default_matcher = self._matchers[name] except KeyError: raise self.MatcherNotInstalled( f'No matcher installed for {name}' ) def match( self, data: bytes, pattern: bytes, matcher: str | None = None, matcher_kwargs: dict[str, str] | None = None ) -> bool: """Call the matcher.""" if matcher and not self._matchers.get(matcher): raise self.MatcherNotInstalled( f'No matcher installed for {matcher}' ) match_func = self._matchers[matcher or 'glob'] if matcher in self.matcher_pattern_first: first_arg = bytes_to_str(pattern) second_arg = bytes_to_str(data) else: first_arg = bytes_to_str(data) second_arg = bytes_to_str(pattern) return match_func(first_arg, second_arg, **matcher_kwargs or {}) #: Global registry of matchers. registry = MatcherRegistry() """ .. function:: match(data, pattern, matcher=default_matcher, matcher_kwargs=None): Match `data` by `pattern` using `matcher`. :param data: The data that should be matched. Must be string. :param pattern: The pattern that should be applied. Must be string. :keyword matcher: An optional string representing the matching method (for example, `glob` or `pcre`). If :const:`None` (default), then `glob` will be used. :keyword matcher_kwargs: Additional keyword arguments that will be passed to the specified `matcher`. :returns: :const:`True` if `data` matches pattern, :const:`False` otherwise. :raises MatcherNotInstalled: If the matching method requested is not available. """ match = registry.match """ .. function:: register(name, matcher): Register a new matching method. :param name: A convenient name for the matching method. :param matcher: A method that will be passed data and pattern. """ register = registry.register """ .. function:: unregister(name): Unregister registered matching method. :param name: Registered matching method name. """ unregister = registry.unregister def register_glob() -> None: """Register glob into default registry.""" registry.register('glob', fnmatch) def register_pcre() -> None: """Register pcre into default registry.""" registry.register('pcre', cast(MatcherFunction, rematch)) # Register the base matching methods. register_glob() register_pcre() # Default matching method is 'glob' registry._set_default_matcher('glob') # Load entrypoints from installed extensions for ep, args in entrypoints('kombu.matchers'): register(ep.name, *args) kombu-5.5.3/kombu/message.py000066400000000000000000000177271477772317200160130ustar00rootroot00000000000000"""Message class.""" from __future__ import annotations import sys from .compression import decompress from .exceptions import MessageStateError, reraise from .serialization import loads from .utils.functional import dictfilter __all__ = ('Message',) ACK_STATES = {'ACK', 'REJECTED', 'REQUEUED'} IS_PYPY = hasattr(sys, 'pypy_version_info') class Message: """Base class for received messages. Keyword Arguments: ----------------- channel (ChannelT): If message was received, this should be the channel that the message was received on. body (str): Message body. delivery_mode (bool): Set custom delivery mode. Defaults to :attr:`delivery_mode`. priority (int): Message priority, 0 to broker configured max priority, where higher is better. content_type (str): The messages content_type. If content_type is set, no serialization occurs as it is assumed this is either a binary object, or you've done your own serialization. Leave blank if using built-in serialization as our library properly sets content_type. content_encoding (str): The character set in which this object is encoded. Use "binary" if sending in raw binary objects. Leave blank if using built-in serialization as our library properly sets content_encoding. properties (Dict): Message properties. headers (Dict): Message headers. """ MessageStateError = MessageStateError errors = None if not IS_PYPY: # pragma: no cover __slots__ = ( '_state', 'channel', 'delivery_tag', 'content_type', 'content_encoding', 'delivery_info', 'headers', 'properties', 'body', '_decoded_cache', 'accept', '__dict__', ) def __init__(self, body=None, delivery_tag=None, content_type=None, content_encoding=None, delivery_info=None, properties=None, headers=None, postencode=None, accept=None, channel=None, **kwargs): delivery_info = {} if not delivery_info else delivery_info self.errors = [] if self.errors is None else self.errors self.channel = channel self.delivery_tag = delivery_tag self.content_type = content_type self.content_encoding = content_encoding self.delivery_info = delivery_info self.headers = headers or {} self.properties = properties or {} self._decoded_cache = None self._state = 'RECEIVED' self.accept = accept compression = self.headers.get('compression') if not self.errors and compression: try: body = decompress(body, compression) except Exception: self.errors.append(sys.exc_info()) if not self.errors and postencode and isinstance(body, str): try: body = body.encode(postencode) except Exception: self.errors.append(sys.exc_info()) self.body = body def _reraise_error(self, callback=None): try: reraise(*self.errors[0]) except Exception as exc: if not callback: raise callback(self, exc) def ack(self, multiple=False): """Acknowledge this message as being processed. This will remove the message from the queue. Raises ------ MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.channel is None: raise self.MessageStateError( 'This message does not have a receiving channel') if self.channel.no_ack_consumers is not None: try: consumer_tag = self.delivery_info['consumer_tag'] except KeyError: pass else: if consumer_tag in self.channel.no_ack_consumers: return if self.acknowledged: raise self.MessageStateError( 'Message already acknowledged with state: {0._state}'.format( self)) self.channel.basic_ack(self.delivery_tag, multiple=multiple) self._state = 'ACK' def ack_log_error(self, logger, errors, multiple=False): try: self.ack(multiple=multiple) except BrokenPipeError as exc: logger.critical("Couldn't ack %r, reason:%r", self.delivery_tag, exc, exc_info=True) raise except errors as exc: logger.critical("Couldn't ack %r, reason:%r", self.delivery_tag, exc, exc_info=True) def reject_log_error(self, logger, errors, requeue=False): try: self.reject(requeue=requeue) except errors as exc: logger.critical("Couldn't reject %r, reason: %r", self.delivery_tag, exc, exc_info=True) def reject(self, requeue=False): """Reject this message. The message will be discarded by the server. Raises ------ MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.channel is None: raise self.MessageStateError( 'This message does not have a receiving channel') if self.acknowledged: raise self.MessageStateError( 'Message already acknowledged with state: {0._state}'.format( self)) self.channel.basic_reject(self.delivery_tag, requeue=requeue) self._state = 'REJECTED' def requeue(self): """Reject this message and put it back on the queue. Warning: ------- You must not use this method as a means of selecting messages to process. Raises ------ MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.channel is None: raise self.MessageStateError( 'This message does not have a receiving channel') if self.acknowledged: raise self.MessageStateError( 'Message already acknowledged with state: {0._state}'.format( self)) self.channel.basic_reject(self.delivery_tag, requeue=True) self._state = 'REQUEUED' def decode(self): """Deserialize the message body. Returning the original python structure sent by the publisher. Note: ---- The return value is memoized, use `_decode` to force re-evaluation. """ if not self._decoded_cache: self._decoded_cache = self._decode() return self._decoded_cache def _decode(self): return loads(self.body, self.content_type, self.content_encoding, accept=self.accept) @property def acknowledged(self): """Set to true if the message has been acknowledged.""" return self._state in ACK_STATES @property def payload(self): """The decoded message body.""" return self._decoded_cache if self._decoded_cache else self.decode() def __repr__(self): return '<{} object at {:#x} with details {!r}>'.format( type(self).__name__, id(self), dictfilter( state=self._state, content_type=self.content_type, delivery_tag=self.delivery_tag, body_length=len(self.body) if self.body is not None else None, properties=dictfilter( correlation_id=self.properties.get('correlation_id'), type=self.properties.get('type'), ), delivery_info=dictfilter( exchange=self.delivery_info.get('exchange'), routing_key=self.delivery_info.get('routing_key'), ), ), ) kombu-5.5.3/kombu/messaging.py000066400000000000000000000610511477772317200163310ustar00rootroot00000000000000"""Sending and receiving messages.""" from __future__ import annotations from itertools import count from typing import TYPE_CHECKING from .common import maybe_declare from .compression import compress from .connection import PooledConnection, is_connection, maybe_channel from .entity import Exchange, Queue, maybe_delivery_mode from .exceptions import ContentDisallowed from .serialization import dumps, prepare_accept_content from .utils.functional import ChannelPromise, maybe_list if TYPE_CHECKING: from types import TracebackType __all__ = ('Exchange', 'Queue', 'Producer', 'Consumer') class Producer: """Message Producer. Arguments: --------- channel (kombu.Connection, ChannelT): Connection or channel. exchange (kombu.entity.Exchange, str): Optional default exchange. routing_key (str): Optional default routing key. serializer (str): Default serializer. Default is `"json"`. compression (str): Default compression method. Default is no compression. auto_declare (bool): Automatically declare the default exchange at instantiation. Default is :const:`True`. on_return (Callable): Callback to call for undeliverable messages, when the `mandatory` or `immediate` arguments to :meth:`publish` is used. This callback needs the following signature: `(exception, exchange, routing_key, message)`. Note that the producer needs to drain events to use this feature. """ #: Default exchange exchange = None #: Default routing key. routing_key = '' #: Default serializer to use. Default is JSON. serializer = None #: Default compression method. Disabled by default. compression = None #: By default, if a default exchange is set, #: that exchange will be declare when publishing a message. auto_declare = True #: Basic return callback. on_return = None #: Set if channel argument was a Connection instance (using #: default_channel). __connection__ = None def __init__(self, channel, exchange=None, routing_key=None, serializer=None, auto_declare=None, compression=None, on_return=None): self._channel = channel self.exchange = exchange self.routing_key = routing_key or self.routing_key self.serializer = serializer or self.serializer self.compression = compression or self.compression self.on_return = on_return or self.on_return self._channel_promise = None if self.exchange is None: self.exchange = Exchange('') if auto_declare is not None: self.auto_declare = auto_declare if self._channel: self.revive(self._channel) def __repr__(self): return f'' def __reduce__(self): return self.__class__, self.__reduce_args__() def __reduce_args__(self): return (None, self.exchange, self.routing_key, self.serializer, self.auto_declare, self.compression) def declare(self): """Declare the exchange. Note: ---- This happens automatically at instantiation when the :attr:`auto_declare` flag is enabled. """ if self.exchange.name: self.exchange.declare() def maybe_declare(self, entity, retry=False, **retry_policy): """Declare exchange if not already declared during this session.""" if entity: return maybe_declare(entity, self.channel, retry, **retry_policy) def _delivery_details(self, exchange, delivery_mode=None, maybe_delivery_mode=maybe_delivery_mode, Exchange=Exchange): if isinstance(exchange, Exchange): return exchange.name, maybe_delivery_mode( delivery_mode or exchange.delivery_mode, ) # exchange is string, so inherit the delivery # mode of our default exchange. return exchange, maybe_delivery_mode( delivery_mode or self.exchange.delivery_mode, ) def publish(self, body, routing_key=None, delivery_mode=None, mandatory=False, immediate=False, priority=0, content_type=None, content_encoding=None, serializer=None, headers=None, compression=None, exchange=None, retry=False, retry_policy=None, declare=None, expiration=None, timeout=None, confirm_timeout=None, **properties): """Publish message to the specified exchange. Arguments: --------- body (Any): Message body. routing_key (str): Message routing key. delivery_mode (enum): See :attr:`delivery_mode`. mandatory (bool): Currently not supported. immediate (bool): Currently not supported. priority (int): Message priority. A number between 0 and 9. content_type (str): Content type. Default is auto-detect. content_encoding (str): Content encoding. Default is auto-detect. serializer (str): Serializer to use. Default is auto-detect. compression (str): Compression method to use. Default is none. headers (Dict): Mapping of arbitrary headers to pass along with the message body. exchange (kombu.entity.Exchange, str): Override the exchange. Note that this exchange must have been declared. declare (Sequence[EntityT]): Optional list of required entities that must have been declared before publishing the message. The entities will be declared using :func:`~kombu.common.maybe_declare`. retry (bool): Retry publishing, or declaring entities if the connection is lost. retry_policy (Dict): Retry configuration, this is the keywords supported by :meth:`~kombu.Connection.ensure`. expiration (float): A TTL in seconds can be specified per message. Default is no expiration. timeout (float): Set timeout to wait maximum timeout second for message to publish. confirm_timeout (float): Set confirm timeout to wait maximum timeout second for message to confirm publishing if the channel is set to confirm publish mode. **properties (Any): Additional message properties, see AMQP spec. """ _publish = self._publish declare = [] if declare is None else declare headers = {} if headers is None else headers retry_policy = {} if retry_policy is None else retry_policy routing_key = self.routing_key if routing_key is None else routing_key compression = self.compression if compression is None else compression exchange_name, properties['delivery_mode'] = self._delivery_details( exchange or self.exchange, delivery_mode, ) if expiration is not None: properties['expiration'] = str(int(expiration * 1000)) body, content_type, content_encoding = self._prepare( body, serializer, content_type, content_encoding, compression, headers) if self.auto_declare and self.exchange.name: if self.exchange not in declare: # XXX declare should be a Set. declare.append(self.exchange) if retry: self.connection.transport_options.update(retry_policy) _publish = self.connection.ensure(self, _publish, **retry_policy) return _publish( body, priority, content_type, content_encoding, headers, properties, routing_key, mandatory, immediate, exchange_name, declare, timeout, confirm_timeout, retry, retry_policy ) def _publish(self, body, priority, content_type, content_encoding, headers, properties, routing_key, mandatory, immediate, exchange, declare, timeout=None, confirm_timeout=None, retry=False, retry_policy=None): retry_policy = {} if retry_policy is None else retry_policy channel = self.channel message = channel.prepare_message( body, priority, content_type, content_encoding, headers, properties, ) if declare: maybe_declare = self.maybe_declare for entity in declare: maybe_declare(entity, retry=retry, **retry_policy) # handle autogenerated queue names for reply_to reply_to = properties.get('reply_to') if isinstance(reply_to, Queue): properties['reply_to'] = reply_to.name return channel.basic_publish( message, exchange=exchange, routing_key=routing_key, mandatory=mandatory, immediate=immediate, timeout=timeout, confirm_timeout=confirm_timeout ) def _get_channel(self): channel = self._channel if isinstance(channel, ChannelPromise): channel = self._channel = channel() self.exchange.revive(channel) if self.on_return: channel.events['basic_return'].add(self.on_return) return channel def _set_channel(self, channel): self._channel = channel channel = property(_get_channel, _set_channel) def revive(self, channel): """Revive the producer after connection loss.""" if is_connection(channel): connection = channel self.__connection__ = connection channel = ChannelPromise(lambda: connection.default_channel) if isinstance(channel, ChannelPromise): self._channel = channel self.exchange = self.exchange(channel) else: # Channel already concrete self._channel = channel if self.on_return: self._channel.events['basic_return'].add(self.on_return) self.exchange = self.exchange(channel) def __enter__(self): return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: # In case the connection is part of a pool it needs to be # replaced in case of an exception if self.__connection__ is not None and exc_type is not None: if isinstance(self.__connection__, PooledConnection): self.__connection__._pool.replace(self.__connection__) self.release() def release(self): pass close = release def _prepare(self, body, serializer=None, content_type=None, content_encoding=None, compression=None, headers=None): # No content_type? Then we're serializing the data internally. if not content_type: serializer = serializer or self.serializer (content_type, content_encoding, body) = dumps(body, serializer=serializer) else: # If the programmer doesn't want us to serialize, # make sure content_encoding is set. if isinstance(body, str): if not content_encoding: content_encoding = 'utf-8' body = body.encode(content_encoding) # If they passed in a string, we can't know anything # about it. So assume it's binary data. elif not content_encoding: content_encoding = 'binary' if compression: body, headers['compression'] = compress(body, compression) return body, content_type, content_encoding @property def connection(self): try: return self.__connection__ or self.channel.connection.client except AttributeError: pass class Consumer: """Message consumer. Arguments: --------- channel (kombu.Connection, ChannelT): see :attr:`channel`. queues (Sequence[kombu.Queue]): see :attr:`queues`. no_ack (bool): see :attr:`no_ack`. auto_declare (bool): see :attr:`auto_declare` callbacks (Sequence[Callable]): see :attr:`callbacks`. on_message (Callable): See :attr:`on_message` on_decode_error (Callable): see :attr:`on_decode_error`. prefetch_count (int): see :attr:`prefetch_count`. """ ContentDisallowed = ContentDisallowed #: The connection/channel to use for this consumer. channel = None #: A single :class:`~kombu.Queue`, or a list of queues to #: consume from. queues = None #: Flag for automatic message acknowledgment. #: If enabled the messages are automatically acknowledged by the #: broker. This can increase performance but means that you #: have no control of when the message is removed. #: #: Disabled by default. no_ack = None #: By default all entities will be declared at instantiation, if you #: want to handle this manually you can set this to :const:`False`. auto_declare = True #: List of callbacks called in order when a message is received. #: #: The signature of the callbacks must take two arguments: #: `(body, message)`, which is the decoded message body and #: the :class:`~kombu.Message` instance. callbacks = None #: Optional function called whenever a message is received. #: #: When defined this function will be called instead of the #: :meth:`receive` method, and :attr:`callbacks` will be disabled. #: #: So this can be used as an alternative to :attr:`callbacks` when #: you don't want the body to be automatically decoded. #: Note that the message will still be decompressed if the message #: has the ``compression`` header set. #: #: The signature of the callback must take a single argument, #: which is the :class:`~kombu.Message` object. #: #: Also note that the ``message.body`` attribute, which is the raw #: contents of the message body, may in some cases be a read-only #: :class:`buffer` object. on_message = None #: Callback called when a message can't be decoded. #: #: The signature of the callback must take two arguments: `(message, #: exc)`, which is the message that can't be decoded and the exception #: that occurred while trying to decode it. on_decode_error = None #: List of accepted content-types. #: #: An exception will be raised if the consumer receives #: a message with an untrusted content type. #: By default all content-types are accepted, but not if #: :func:`kombu.disable_untrusted_serializers` was called, #: in which case only json is allowed. accept = None #: Initial prefetch count #: #: If set, the consumer will set the prefetch_count QoS value at startup. #: Can also be changed using :meth:`qos`. prefetch_count = None #: Mapping of queues we consume from. _queues = None _tags = count(1) # global def __init__(self, channel, queues=None, no_ack=None, auto_declare=None, callbacks=None, on_decode_error=None, on_message=None, accept=None, prefetch_count=None, tag_prefix=None): self.channel = channel self.queues = maybe_list(queues or []) self.no_ack = self.no_ack if no_ack is None else no_ack self.callbacks = (self.callbacks or [] if callbacks is None else callbacks) self.on_message = on_message self.tag_prefix = tag_prefix self._active_tags = {} if auto_declare is not None: self.auto_declare = auto_declare if on_decode_error is not None: self.on_decode_error = on_decode_error self.accept = prepare_accept_content(accept) self.prefetch_count = prefetch_count if self.channel: self.revive(self.channel) @property def queues(self): # noqa return list(self._queues.values()) @queues.setter def queues(self, queues): self._queues = {q.name: q for q in queues} def revive(self, channel): """Revive consumer after connection loss.""" self._active_tags.clear() channel = self.channel = maybe_channel(channel) # modify dict size while iterating over it is not allowed for qname, queue in list(self._queues.items()): # name may have changed after declare self._queues.pop(qname, None) queue = self._queues[queue.name] = queue(self.channel) queue.revive(channel) if self.auto_declare: self.declare() if self.prefetch_count is not None: self.qos(prefetch_count=self.prefetch_count) def declare(self): """Declare queues, exchanges and bindings. Note: ---- This is done automatically at instantiation when :attr:`auto_declare` is set. """ for queue in self._queues.values(): queue.declare() def register_callback(self, callback): """Register a new callback to be called when a message is received. Note: ---- The signature of the callback needs to accept two arguments: `(body, message)`, which is the decoded message body and the :class:`~kombu.Message` instance. """ self.callbacks.append(callback) def __enter__(self): self.consume() return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: if self.channel and self.channel.connection: conn_errors = self.channel.connection.client.connection_errors if not isinstance(exc_val, conn_errors): try: self.cancel() except Exception: pass def add_queue(self, queue): """Add a queue to the list of queues to consume from. Note: ---- This will not start consuming from the queue, for that you will have to call :meth:`consume` after. """ queue = queue(self.channel) if self.auto_declare: queue.declare() self._queues[queue.name] = queue return queue def consume(self, no_ack=None): """Start consuming messages. Can be called multiple times, but note that while it will consume from new queues added since the last call, it will not cancel consuming from removed queues ( use :meth:`cancel_by_queue`). Arguments: --------- no_ack (bool): See :attr:`no_ack`. """ queues = list(self._queues.values()) if queues: no_ack = self.no_ack if no_ack is None else no_ack H, T = queues[:-1], queues[-1] for queue in H: self._basic_consume(queue, no_ack=no_ack, nowait=True) self._basic_consume(T, no_ack=no_ack, nowait=False) def cancel(self): """End all active queue consumers. Note: ---- This does not affect already delivered messages, but it does mean the server will not send any more messages for this consumer. """ cancel = self.channel.basic_cancel for tag in self._active_tags.values(): cancel(tag) self._active_tags.clear() close = cancel def cancel_by_queue(self, queue): """Cancel consumer by queue name.""" qname = queue.name if isinstance(queue, Queue) else queue try: tag = self._active_tags.pop(qname) except KeyError: pass else: self.channel.basic_cancel(tag) finally: self._queues.pop(qname, None) def consuming_from(self, queue): """Return :const:`True` if currently consuming from queue'.""" name = queue if isinstance(queue, Queue): name = queue.name return name in self._active_tags def purge(self): """Purge messages from all queues. Warning: ------- This will *delete all ready messages*, there is no undo operation. """ return sum(queue.purge() for queue in self._queues.values()) def flow(self, active): """Enable/disable flow from peer. This is a simple flow-control mechanism that a peer can use to avoid overflowing its queues or otherwise finding itself receiving more messages than it can process. The peer that receives a request to stop sending content will finish sending the current content (if any), and then wait until flow is reactivated. """ self.channel.flow(active) def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): """Specify quality of service. The client can request that messages should be sent in advance so that when the client finishes processing a message, the following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. The prefetch window is Ignored if the :attr:`no_ack` option is set. Arguments: --------- prefetch_size (int): Specify the prefetch window in octets. The server will send a message in advance if it is equal to or smaller in size than the available prefetch size (and also falls within other prefetch limits). May be set to zero, meaning "no specific limit", although other prefetch limits may still apply. prefetch_count (int): Specify the prefetch window in terms of whole messages. apply_global (bool): Apply new settings globally on all channels. """ return self.channel.basic_qos(prefetch_size, prefetch_count, apply_global) def recover(self, requeue=False): """Redeliver unacknowledged messages. Asks the broker to redeliver all unacknowledged messages on the specified channel. Arguments: --------- requeue (bool): By default the messages will be redelivered to the original recipient. With `requeue` set to true, the server will attempt to requeue the message, potentially then delivering it to an alternative subscriber. """ return self.channel.basic_recover(requeue=requeue) def receive(self, body, message): """Method called when a message is received. This dispatches to the registered :attr:`callbacks`. Arguments: --------- body (Any): The decoded message body. message (~kombu.Message): The message instance. Raises ------ NotImplementedError: If no consumer callbacks have been registered. """ callbacks = self.callbacks if not callbacks: raise NotImplementedError('Consumer does not have any callbacks') [callback(body, message) for callback in callbacks] def _basic_consume(self, queue, consumer_tag=None, no_ack=no_ack, nowait=True): tag = self._active_tags.get(queue.name) if tag is None: tag = self._add_tag(queue, consumer_tag) queue.consume(tag, self._receive_callback, no_ack=no_ack, nowait=nowait) return tag def _add_tag(self, queue, consumer_tag=None): tag = consumer_tag or '{}{}'.format( self.tag_prefix, next(self._tags)) self._active_tags[queue.name] = tag return tag def _receive_callback(self, message): accept = self.accept on_m, channel, decoded = self.on_message, self.channel, None try: m2p = getattr(channel, 'message_to_python', None) if m2p: message = m2p(message) if accept is not None: message.accept = accept if message.errors: return message._reraise_error(self.on_decode_error) decoded = None if on_m else message.decode() except Exception as exc: if not self.on_decode_error: raise self.on_decode_error(message, exc) else: return on_m(message) if on_m else self.receive(decoded, message) def __repr__(self): return f'<{type(self).__name__}: {self.queues}>' @property def connection(self): try: return self.channel.connection.client except AttributeError: pass kombu-5.5.3/kombu/mixins.py000066400000000000000000000227451477772317200156720ustar00rootroot00000000000000"""Mixins.""" from __future__ import annotations import socket from contextlib import contextmanager from functools import partial from itertools import count from time import sleep from .common import ignore_errors from .log import get_logger from .messaging import Consumer, Producer from .utils.compat import nested from .utils.encoding import safe_repr from .utils.limits import TokenBucket from .utils.objects import cached_property __all__ = ('ConsumerMixin', 'ConsumerProducerMixin') logger = get_logger(__name__) debug, info, warn, error = ( logger.debug, logger.info, logger.warning, logger.error ) W_CONN_LOST = """\ Connection to broker lost, trying to re-establish connection...\ """ W_CONN_ERROR = """\ Broker connection error, trying again in %s seconds: %r.\ """ class ConsumerMixin: """Convenience mixin for implementing consumer programs. It can be used outside of threads, with threads, or greenthreads (eventlet/gevent) too. The basic class would need a :attr:`connection` attribute which must be a :class:`~kombu.Connection` instance, and define a :meth:`get_consumers` method that returns a list of :class:`kombu.Consumer` instances to use. Supporting multiple consumers is important so that multiple channels can be used for different QoS requirements. Example: ------- .. code-block:: python class Worker(ConsumerMixin): task_queue = Queue('tasks', Exchange('tasks'), 'tasks') def __init__(self, connection): self.connection = None def get_consumers(self, Consumer, channel): return [Consumer(queues=[self.task_queue], callbacks=[self.on_task])] def on_task(self, body, message): print('Got task: {0!r}'.format(body)) message.ack() Methods ------- * :meth:`extra_context` Optional extra context manager that will be entered after the connection and consumers have been set up. Takes arguments ``(connection, channel)``. * :meth:`on_connection_error` Handler called if the connection is lost/ or is unavailable. Takes arguments ``(exc, interval)``, where interval is the time in seconds when the connection will be retried. The default handler will log the exception. * :meth:`on_connection_revived` Handler called as soon as the connection is re-established after connection failure. Takes no arguments. * :meth:`on_consume_ready` Handler called when the consumer is ready to accept messages. Takes arguments ``(connection, channel, consumers)``. Also keyword arguments to ``consume`` are forwarded to this handler. * :meth:`on_consume_end` Handler called after the consumers are canceled. Takes arguments ``(connection, channel)``. * :meth:`on_iteration` Handler called for every iteration while draining events. Takes no arguments. * :meth:`on_decode_error` Handler called if a consumer was unable to decode the body of a message. Takes arguments ``(message, exc)`` where message is the original message object. The default handler will log the error and acknowledge the message, so if you override make sure to call super, or perform these steps yourself. """ #: maximum number of retries trying to re-establish the connection, #: if the connection is lost/unavailable. connect_max_retries = None #: When this is set to true the consumer should stop consuming #: and return, so that it can be joined if it is the implementation #: of a thread. should_stop = False def get_consumers(self, Consumer, channel): raise NotImplementedError('Subclass responsibility') def on_connection_revived(self): pass def on_consume_ready(self, connection, channel, consumers, **kwargs): pass def on_consume_end(self, connection, channel): pass def on_iteration(self): pass def on_decode_error(self, message, exc): error("Can't decode message body: %r (type:%r encoding:%r raw:%r')", exc, message.content_type, message.content_encoding, safe_repr(message.body)) message.ack() def on_connection_error(self, exc, interval): warn(W_CONN_ERROR, interval, exc, exc_info=1) @contextmanager def extra_context(self, connection, channel): yield def run(self, _tokens=1, **kwargs): restart_limit = self.restart_limit errors = (self.connection.connection_errors + self.connection.channel_errors) while not self.should_stop: try: if restart_limit.can_consume(_tokens): # pragma: no cover for _ in self.consume(limit=None, **kwargs): pass else: sleep(restart_limit.expected_time(_tokens)) except errors: warn(W_CONN_LOST, exc_info=1) @contextmanager def consumer_context(self, **kwargs): with self.Consumer() as (connection, channel, consumers): with self.extra_context(connection, channel): self.on_consume_ready(connection, channel, consumers, **kwargs) yield connection, channel, consumers def consume(self, limit=None, timeout=None, safety_interval=1, **kwargs): elapsed = 0 with self.consumer_context(**kwargs) as (conn, channel, consumers): for i in limit and range(limit) or count(): if self.should_stop: break self.on_iteration() try: conn.drain_events(timeout=safety_interval) except socket.timeout: conn.heartbeat_check() elapsed += safety_interval if timeout and elapsed >= timeout: raise except OSError: if not self.should_stop: raise else: yield elapsed = 0 debug('consume exiting') def maybe_conn_error(self, fun): """Use :func:`kombu.common.ignore_errors` instead.""" return ignore_errors(self, fun) def create_connection(self): return self.connection.clone() @contextmanager def establish_connection(self): with self.create_connection() as conn: conn.ensure_connection(self.on_connection_error, self.connect_max_retries) yield conn @contextmanager def Consumer(self): with self.establish_connection() as conn: self.on_connection_revived() info('Connected to %s', conn.as_uri()) channel = conn.default_channel cls = partial(Consumer, channel, on_decode_error=self.on_decode_error) with self._consume_from(*self.get_consumers(cls, channel)) as c: yield conn, channel, c debug('Consumers canceled') self.on_consume_end(conn, channel) debug('Connection closed') def _consume_from(self, *consumers): return nested(*consumers) @cached_property def restart_limit(self): return TokenBucket(1) @cached_property def connection_errors(self): return self.connection.connection_errors @cached_property def channel_errors(self): return self.connection.channel_errors class ConsumerProducerMixin(ConsumerMixin): """Consumer and Producer mixin. Version of ConsumerMixin having separate connection for also publishing messages. Example: ------- .. code-block:: python class Worker(ConsumerProducerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [Consumer(queues=Queue('foo'), on_message=self.handle_message, accept='application/json', prefetch_count=10)] def handle_message(self, message): self.producer.publish( {'message': 'hello to you'}, exchange='', routing_key=message.properties['reply_to'], correlation_id=message.properties['correlation_id'], retry=True, ) """ _producer_connection = None def on_consume_end(self, connection, channel): if self._producer_connection is not None: self._producer_connection.close() self._producer_connection = None @property def producer(self): return Producer(self.producer_connection) @property def producer_connection(self): if self._producer_connection is None: conn = self.connection.clone() conn.ensure_connection(self.on_connection_error, self.connect_max_retries) self._producer_connection = conn return self._producer_connection kombu-5.5.3/kombu/pidbox.py000066400000000000000000000347051477772317200156470ustar00rootroot00000000000000"""Generic process mailbox.""" from __future__ import annotations import socket import warnings from collections import defaultdict, deque from contextlib import contextmanager from copy import copy from itertools import count from time import time from . import Consumer, Exchange, Producer, Queue from .clocks import LamportClock from .common import maybe_declare, oid_from from .exceptions import InconsistencyError from .log import get_logger from .matcher import match from .utils.functional import maybe_evaluate, reprcall from .utils.objects import cached_property from .utils.uuid import uuid REPLY_QUEUE_EXPIRES = 10 W_PIDBOX_IN_USE = """\ A node named {node.hostname} is already using this process mailbox! Maybe you forgot to shutdown the other node or did not do so properly? Or if you meant to start multiple nodes on the same host please make sure you give each node a unique node name! """ __all__ = ('Node', 'Mailbox') logger = get_logger(__name__) debug, error = logger.debug, logger.error class Node: """Mailbox node.""" #: hostname of the node. hostname = None #: the :class:`Mailbox` this is a node for. mailbox = None #: map of method name/handlers. handlers = None #: current context (passed on to handlers) state = None #: current channel. channel = None def __init__(self, hostname, state=None, channel=None, handlers=None, mailbox=None): self.channel = channel self.mailbox = mailbox self.hostname = hostname self.state = state self.adjust_clock = self.mailbox.clock.adjust if handlers is None: handlers = {} self.handlers = handlers def Consumer(self, channel=None, no_ack=True, accept=None, **options): queue = self.mailbox.get_queue(self.hostname) def verify_exclusive(name, messages, consumers): if consumers: warnings.warn(W_PIDBOX_IN_USE.format(node=self)) queue.on_declared = verify_exclusive return Consumer( channel or self.channel, [queue], no_ack=no_ack, accept=self.mailbox.accept if accept is None else accept, **options ) def handler(self, fun): self.handlers[fun.__name__] = fun return fun def on_decode_error(self, message, exc): error('Cannot decode message: %r', exc, exc_info=1) def listen(self, channel=None, callback=None): consumer = self.Consumer(channel=channel, callbacks=[callback or self.handle_message], on_decode_error=self.on_decode_error) consumer.consume() return consumer def dispatch(self, method, arguments=None, reply_to=None, ticket=None, **kwargs): arguments = arguments or {} debug('pidbox received method %s [reply_to:%s ticket:%s]', reprcall(method, (), kwargs=arguments), reply_to, ticket) handle = reply_to and self.handle_call or self.handle_cast try: reply = handle(method, arguments) except SystemExit: raise except Exception as exc: error('pidbox command error: %r', exc, exc_info=1) reply = {'error': repr(exc)} if reply_to: self.reply({self.hostname: reply}, exchange=reply_to['exchange'], routing_key=reply_to['routing_key'], ticket=ticket) return reply def handle(self, method, arguments=None): arguments = {} if not arguments else arguments return self.handlers[method](self.state, **arguments) def handle_call(self, method, arguments): return self.handle(method, arguments) def handle_cast(self, method, arguments): return self.handle(method, arguments) def handle_message(self, body, message=None): destination = body.get('destination') pattern = body.get('pattern') matcher = body.get('matcher') if message: self.adjust_clock(message.headers.get('clock') or 0) hostname = self.hostname run_dispatch = False if destination: if hostname in destination: run_dispatch = True elif pattern and matcher: if match(hostname, pattern, matcher): run_dispatch = True else: run_dispatch = True if run_dispatch: return self.dispatch(**body) dispatch_from_message = handle_message def reply(self, data, exchange, routing_key, ticket, **kwargs): self.mailbox._publish_reply(data, exchange, routing_key, ticket, channel=self.channel, serializer=self.mailbox.serializer) class Mailbox: """Process Mailbox.""" node_cls = Node exchange_fmt = '%s.pidbox' reply_exchange_fmt = 'reply.%s.pidbox' #: Name of application. namespace = None #: Connection (if bound). connection = None #: Exchange type (usually direct, or fanout for broadcast). type = 'direct' #: mailbox exchange (init by constructor). exchange = None #: exchange to send replies to. reply_exchange = None #: Only accepts json messages by default. accept = ['json'] #: Message serializer serializer = None def __init__(self, namespace, type='direct', connection=None, clock=None, accept=None, serializer=None, producer_pool=None, queue_ttl=None, queue_expires=None, reply_queue_ttl=None, reply_queue_expires=10.0): self.namespace = namespace self.connection = connection self.type = type self.clock = LamportClock() if clock is None else clock self.exchange = self._get_exchange(self.namespace, self.type) self.reply_exchange = self._get_reply_exchange(self.namespace) self.unclaimed = defaultdict(deque) self.accept = self.accept if accept is None else accept self.serializer = self.serializer if serializer is None else serializer self.queue_ttl = queue_ttl self.queue_expires = queue_expires self.reply_queue_ttl = reply_queue_ttl self.reply_queue_expires = reply_queue_expires self._producer_pool = producer_pool def __call__(self, connection): bound = copy(self) bound.connection = connection return bound def Node(self, hostname=None, state=None, channel=None, handlers=None): hostname = hostname or socket.gethostname() return self.node_cls(hostname, state, channel, handlers, mailbox=self) def call(self, destination, command, kwargs=None, timeout=None, callback=None, channel=None): kwargs = {} if not kwargs else kwargs return self._broadcast(command, kwargs, destination, reply=True, timeout=timeout, callback=callback, channel=channel) def cast(self, destination, command, kwargs=None): kwargs = {} if not kwargs else kwargs return self._broadcast(command, kwargs, destination, reply=False) def abcast(self, command, kwargs=None): kwargs = {} if not kwargs else kwargs return self._broadcast(command, kwargs, reply=False) def multi_call(self, command, kwargs=None, timeout=1, limit=None, callback=None, channel=None): kwargs = {} if not kwargs else kwargs return self._broadcast(command, kwargs, reply=True, timeout=timeout, limit=limit, callback=callback, channel=channel) def get_reply_queue(self): oid = self.oid return Queue( f'{oid}.{self.reply_exchange.name}', exchange=self.reply_exchange, routing_key=oid, durable=False, auto_delete=True, expires=self.reply_queue_expires, message_ttl=self.reply_queue_ttl, ) @cached_property def reply_queue(self): return self.get_reply_queue() def get_queue(self, hostname): return Queue( f'{hostname}.{self.namespace}.pidbox', exchange=self.exchange, durable=False, auto_delete=True, expires=self.queue_expires, message_ttl=self.queue_ttl, ) @contextmanager def producer_or_acquire(self, producer=None, channel=None): if producer: yield producer elif self.producer_pool: with self.producer_pool.acquire() as producer: yield producer else: yield Producer(channel, auto_declare=False) def _publish_reply(self, reply, exchange, routing_key, ticket, channel=None, producer=None, **opts): chan = channel or self.connection.default_channel exchange = Exchange(exchange, exchange_type='direct', delivery_mode='transient', durable=False) with self.producer_or_acquire(producer, chan) as producer: try: producer.publish( reply, exchange=exchange, routing_key=routing_key, declare=[exchange], headers={ 'ticket': ticket, 'clock': self.clock.forward(), }, retry=True, **opts ) except InconsistencyError: # queue probably deleted and no one is expecting a reply. pass def _publish(self, type, arguments, destination=None, reply_ticket=None, channel=None, timeout=None, serializer=None, producer=None, pattern=None, matcher=None): message = {'method': type, 'arguments': arguments, 'destination': destination, 'pattern': pattern, 'matcher': matcher} chan = channel or self.connection.default_channel exchange = self.exchange if reply_ticket: maybe_declare(self.reply_queue(chan)) message.update(ticket=reply_ticket, reply_to={'exchange': self.reply_exchange.name, 'routing_key': self.oid}) serializer = serializer or self.serializer with self.producer_or_acquire(producer, chan) as producer: producer.publish( message, exchange=exchange.name, declare=[exchange], headers={'clock': self.clock.forward(), 'expires': time() + timeout if timeout else 0}, serializer=serializer, retry=True, ) def _broadcast(self, command, arguments=None, destination=None, reply=False, timeout=1, limit=None, callback=None, channel=None, serializer=None, pattern=None, matcher=None): if destination is not None and \ not isinstance(destination, (list, tuple)): raise ValueError( 'destination must be a list/tuple not {}'.format( type(destination))) if (pattern is not None and not isinstance(pattern, str) and matcher is not None and not isinstance(matcher, str)): raise ValueError( 'pattern and matcher must be ' 'strings not {}, {}'.format(type(pattern), type(matcher)) ) arguments = arguments or {} reply_ticket = reply and uuid() or None chan = channel or self.connection.default_channel # Set reply limit to number of destinations (if specified) if limit is None and destination: limit = destination and len(destination) or None serializer = serializer or self.serializer self._publish(command, arguments, destination=destination, reply_ticket=reply_ticket, channel=chan, timeout=timeout, serializer=serializer, pattern=pattern, matcher=matcher) if reply_ticket: return self._collect(reply_ticket, limit=limit, timeout=timeout, callback=callback, channel=chan) def _collect(self, ticket, limit=None, timeout=1, callback=None, channel=None, accept=None): if accept is None: accept = self.accept chan = channel or self.connection.default_channel queue = self.reply_queue consumer = Consumer(chan, [queue], accept=accept, no_ack=True) responses = [] unclaimed = self.unclaimed adjust_clock = self.clock.adjust try: return unclaimed.pop(ticket) except KeyError: pass def on_message(body, message): # ticket header added in kombu 2.5 header = message.headers.get adjust_clock(header('clock') or 0) expires = header('expires') if expires and time() > expires: return this_id = header('ticket', ticket) if this_id == ticket: if callback: callback(body) responses.append(body) else: unclaimed[this_id].append(body) consumer.register_callback(on_message) try: with consumer: for i in limit and range(limit) or count(): try: self.connection.drain_events(timeout=timeout) except socket.timeout: break return responses finally: chan.after_reply_message_received(queue.name) def _get_exchange(self, namespace, type): return Exchange(self.exchange_fmt % namespace, type=type, durable=False, delivery_mode='transient') def _get_reply_exchange(self, namespace): return Exchange(self.reply_exchange_fmt % namespace, type='direct', durable=False, delivery_mode='transient') @property def oid(self): return oid_from(self) @cached_property def producer_pool(self): return maybe_evaluate(self._producer_pool) kombu-5.5.3/kombu/pools.py000066400000000000000000000076071477772317200155170ustar00rootroot00000000000000"""Public resource pools.""" from __future__ import annotations import os from itertools import chain from .connection import Resource from .messaging import Producer from .utils.collections import EqualityDict from .utils.compat import register_after_fork from .utils.functional import lazy __all__ = ('ProducerPool', 'PoolGroup', 'register_group', 'connections', 'producers', 'get_limit', 'set_limit', 'reset') _limit = [10] _groups = [] use_global_limit = object() disable_limit_protection = os.environ.get('KOMBU_DISABLE_LIMIT_PROTECTION') def _after_fork_cleanup_group(group): group.clear() class ProducerPool(Resource): """Pool of :class:`kombu.Producer` instances.""" Producer = Producer close_after_fork = True def __init__(self, connections, *args, **kwargs): self.connections = connections self.Producer = kwargs.pop('Producer', None) or self.Producer super().__init__(*args, **kwargs) def _acquire_connection(self): return self.connections.acquire(block=True) def create_producer(self): conn = self._acquire_connection() try: return self.Producer(conn) except BaseException: conn.release() raise def new(self): return lazy(self.create_producer) def setup(self): if self.limit: for _ in range(self.limit): self._resource.put_nowait(self.new()) def close_resource(self, resource): pass def prepare(self, p): if callable(p): p = p() if p._channel is None: conn = self._acquire_connection() try: p.revive(conn) except BaseException: conn.release() raise return p def release(self, resource): if resource.__connection__: resource.__connection__.release() resource.channel = None super().release(resource) class PoolGroup(EqualityDict): """Collection of resource pools.""" def __init__(self, limit=None, close_after_fork=True): self.limit = limit self.close_after_fork = close_after_fork if self.close_after_fork and register_after_fork is not None: register_after_fork(self, _after_fork_cleanup_group) def create(self, resource, limit): raise NotImplementedError('PoolGroups must define ``create``') def __missing__(self, resource): limit = self.limit if limit is use_global_limit: limit = get_limit() k = self[resource] = self.create(resource, limit) return k def register_group(group): """Register group (can be used as decorator).""" _groups.append(group) return group class Connections(PoolGroup): """Collection of connection pools.""" def create(self, connection, limit): return connection.Pool(limit=limit) connections = register_group(Connections(limit=use_global_limit)) class Producers(PoolGroup): """Collection of producer pools.""" def create(self, connection, limit): return ProducerPool(connections[connection], limit=limit) producers = register_group(Producers(limit=use_global_limit)) def _all_pools(): return chain(*((g.values() if g else iter([])) for g in _groups)) def get_limit(): """Get current connection pool limit.""" return _limit[0] def set_limit(limit, force=False, reset_after=False, ignore_errors=False): """Set new connection pool limit.""" limit = limit or 0 glimit = _limit[0] or 0 if limit != glimit: _limit[0] = limit for pool in _all_pools(): pool.resize(limit) return limit def reset(*args, **kwargs): """Reset all pools by closing open resources.""" for pool in _all_pools(): try: pool.force_close_all() except Exception: pass for group in _groups: group.clear() kombu-5.5.3/kombu/resource.py000066400000000000000000000176261477772317200162140ustar00rootroot00000000000000"""Generic resource pool implementation.""" from __future__ import annotations import os from collections import deque from queue import Empty from queue import LifoQueue as _LifoQueue from typing import TYPE_CHECKING from . import exceptions from .utils.compat import register_after_fork from .utils.functional import lazy if TYPE_CHECKING: from types import TracebackType def _after_fork_cleanup_resource(resource): try: resource.force_close_all() except Exception: pass class LifoQueue(_LifoQueue): """Last in first out version of Queue.""" def _init(self, maxsize): self.queue = deque() class Resource: """Pool of resources.""" LimitExceeded = exceptions.LimitExceeded close_after_fork = False def __init__(self, limit=None, preload=None, close_after_fork=None): self._limit = limit self.preload = preload or 0 self._closed = False self.close_after_fork = ( close_after_fork if close_after_fork is not None else self.close_after_fork ) self._resource = LifoQueue() self._dirty = set() if self.close_after_fork and register_after_fork is not None: register_after_fork(self, _after_fork_cleanup_resource) self.setup() def setup(self): raise NotImplementedError('subclass responsibility') def _add_when_empty(self): if self.limit and len(self._dirty) >= self.limit: raise self.LimitExceeded(self.limit) # All taken, put new on the queue and # try get again, this way the first in line # will get the resource. self._resource.put_nowait(self.new()) def acquire(self, block=False, timeout=None): """Acquire resource. Arguments: --------- block (bool): If the limit is exceeded, then block until there is an available item. timeout (float): Timeout to wait if ``block`` is true. Default is :const:`None` (forever). Raises ------ LimitExceeded: if block is false and the limit has been exceeded. """ if self._closed: raise RuntimeError('Acquire on closed pool') if self.limit: while 1: try: R = self._resource.get(block=block, timeout=timeout) except Empty: self._add_when_empty() else: try: R = self.prepare(R) except BaseException: if isinstance(R, lazy): # not evaluated yet, just put it back self._resource.put_nowait(R) else: # evaluated so must try to release/close first. self.release(R) raise self._dirty.add(R) break else: R = self.prepare(self.new()) def release(): """Release resource so it can be used by another thread. Warnings: -------- The caller is responsible for discarding the object, and to never use the resource again. A new resource must be acquired if so needed. """ self.release(R) R.release = release return R def prepare(self, resource): return resource def close_resource(self, resource): resource.close() def release_resource(self, resource): pass def replace(self, resource): """Replace existing resource with a new instance. This can be used in case of defective resources. """ if self.limit: self._dirty.discard(resource) self.close_resource(resource) def release(self, resource): if self.limit: self._dirty.discard(resource) self._resource.put_nowait(resource) self.release_resource(resource) else: self.close_resource(resource) def collect_resource(self, resource): pass def force_close_all(self, close_pool=True): """Close and remove all resources in the pool (also those in use). Used to close resources from parent processes after fork (e.g. sockets/connections). Arguments: --------- close_pool (bool): If True (default) then the pool is marked as closed. In case of False the pool can be reused. """ if self._closed: return self._closed = close_pool dirty = self._dirty resource = self._resource while 1: # - acquired try: dres = dirty.pop() except KeyError: break try: self.collect_resource(dres) except AttributeError: # Issue #78 pass while 1: # - available # deque supports '.clear', but lists do not, so for that # reason we use pop here, so that the underlying object can # be any object supporting '.pop' and '.append'. try: res = resource.queue.pop() except IndexError: break try: self.collect_resource(res) except AttributeError: pass # Issue #78 def resize(self, limit, force=False, ignore_errors=False, reset=False): prev_limit = self._limit if (self._dirty and 0 < limit < self._limit) and not ignore_errors: if not force: raise RuntimeError( "Can't shrink pool when in use: was={} now={}".format( self._limit, limit)) reset = True self._limit = limit if reset: try: self.force_close_all(close_pool=False) except Exception: pass self.setup() if limit < prev_limit: self._shrink_down(collect=limit > 0) def _shrink_down(self, collect=True): class Noop: def __enter__(self): pass def __exit__( self, exc_type: type, exc_val: Exception, exc_tb: TracebackType ) -> None: pass resource = self._resource # Items to the left are last recently used, so we remove those first. with getattr(resource, 'mutex', Noop()): # keep in mind the dirty resources are not shrinking while len(resource.queue) and \ (len(resource.queue) + len(self._dirty)) > self.limit: R = resource.queue.popleft() if collect: self.collect_resource(R) @property def limit(self): return self._limit @limit.setter def limit(self, limit): self.resize(limit) if os.environ.get('KOMBU_DEBUG_POOL'): # pragma: no cover _orig_acquire = acquire _orig_release = release _next_resource_id = 0 def acquire(self, *args, **kwargs): import traceback id = self._next_resource_id = self._next_resource_id + 1 print(f'+{id} ACQUIRE {self.__class__.__name__}') r = self._orig_acquire(*args, **kwargs) r._resource_id = id print(f'-{id} ACQUIRE {self.__class__.__name__}') if not hasattr(r, 'acquired_by'): r.acquired_by = [] r.acquired_by.append(traceback.format_stack()) return r def release(self, resource): id = resource._resource_id print(f'+{id} RELEASE {self.__class__.__name__}') r = self._orig_release(resource) print(f'-{id} RELEASE {self.__class__.__name__}') self._next_resource_id -= 1 return r kombu-5.5.3/kombu/serialization.py000066400000000000000000000361261477772317200172360ustar00rootroot00000000000000"""Serialization utilities.""" from __future__ import annotations import codecs import os import pickle import sys from collections import namedtuple from contextlib import contextmanager from io import BytesIO from .exceptions import (ContentDisallowed, DecodeError, EncodeError, SerializerNotInstalled, reraise) from .utils.compat import entrypoints from .utils.encoding import bytes_to_str, str_to_bytes __all__ = ('pickle', 'loads', 'dumps', 'register', 'unregister') SKIP_DECODE = frozenset(['binary', 'ascii-8bit']) TRUSTED_CONTENT = frozenset(['application/data', 'application/text']) if sys.platform.startswith('java'): # pragma: no cover def _decode(t, coding): return codecs.getdecoder(coding)(t)[0] else: _decode = codecs.decode pickle_load = pickle.load #: We have to use protocol 4 until we drop support for Python 3.6 and 3.7. pickle_protocol = int(os.environ.get('PICKLE_PROTOCOL', 4)) codec = namedtuple('codec', ('content_type', 'content_encoding', 'encoder')) @contextmanager def _reraise_errors(wrapper, include=(Exception,), exclude=(SerializerNotInstalled,)): try: yield except exclude: raise except include as exc: reraise(wrapper, wrapper(exc), sys.exc_info()[2]) def pickle_loads(s, load=pickle_load): # used to support buffer objects return load(BytesIO(s)) def parenthesize_alias(first, second): return f'{first} ({second})' if first else second class SerializerRegistry: """The registry keeps track of serialization methods.""" def __init__(self): self._encoders = {} self._decoders = {} self._default_encode = None self._default_content_type = None self._default_content_encoding = None self._disabled_content_types = set() self.type_to_name = {} self.name_to_type = {} def register(self, name, encoder, decoder, content_type, content_encoding='utf-8'): """Register a new encoder/decoder. Arguments: --------- name (str): A convenience name for the serialization method. encoder (callable): A method that will be passed a python data structure and should return a string representing the serialized data. If :const:`None`, then only a decoder will be registered. Encoding will not be possible. decoder (Callable): A method that will be passed a string representing serialized data and should return a python data structure. If :const:`None`, then only an encoder will be registered. Decoding will not be possible. content_type (str): The mime-type describing the serialized structure. content_encoding (str): The content encoding (character set) that the `decoder` method will be returning. Will usually be `utf-8`, `us-ascii`, or `binary`. """ if encoder: self._encoders[name] = codec( content_type, content_encoding, encoder, ) if decoder: self._decoders[content_type] = decoder self.type_to_name[content_type] = name self.name_to_type[name] = content_type def enable(self, name): if '/' not in name: name = self.name_to_type[name] self._disabled_content_types.discard(name) def disable(self, name): if '/' not in name: name = self.name_to_type[name] self._disabled_content_types.add(name) def unregister(self, name): """Unregister registered encoder/decoder. Arguments: --------- name (str): Registered serialization method name. Raises ------ SerializerNotInstalled: If a serializer by that name cannot be found. """ try: content_type = self.name_to_type[name] self._decoders.pop(content_type, None) self._encoders.pop(name, None) self.type_to_name.pop(content_type, None) self.name_to_type.pop(name, None) except KeyError: raise SerializerNotInstalled( f'No encoder/decoder installed for {name}') def _set_default_serializer(self, name): """Set the default serialization method used by this library. Arguments: --------- name (str): The name of the registered serialization method. For example, `json` (default), `pickle`, `yaml`, `msgpack`, or any custom methods registered using :meth:`register`. Raises ------ SerializerNotInstalled: If the serialization method requested is not available. """ try: (self._default_content_type, self._default_content_encoding, self._default_encode) = self._encoders[name] except KeyError: raise SerializerNotInstalled( f'No encoder installed for {name}') def dumps(self, data, serializer=None): """Encode data. Serialize a data structure into a string suitable for sending as an AMQP message body. Arguments: --------- data (List, Dict, str): The message data to send. serializer (str): An optional string representing the serialization method you want the data marshalled into. (For example, `json`, `raw`, or `pickle`). If :const:`None` (default), then json will be used, unless `data` is a :class:`str` or :class:`unicode` object. In this latter case, no serialization occurs as it would be unnecessary. Note that if `serializer` is specified, then that serialization method will be used even if a :class:`str` or :class:`unicode` object is passed in. Returns ------- Tuple[str, str, str]: A three-item tuple containing the content type (e.g., `application/json`), content encoding, (e.g., `utf-8`) and a string containing the serialized data. Raises ------ SerializerNotInstalled: If the serialization method requested is not available. """ if serializer == 'raw': return raw_encode(data) if serializer and not self._encoders.get(serializer): raise SerializerNotInstalled( f'No encoder installed for {serializer}') # If a raw string was sent, assume binary encoding # (it's likely either ASCII or a raw binary file, and a character # set of 'binary' will encompass both, even if not ideal. if not serializer and isinstance(data, bytes): # In Python 3+, this would be "bytes"; allow binary data to be # sent as a message without getting encoder errors return 'application/data', 'binary', data # For Unicode objects, force it into a string if not serializer and isinstance(data, str): with _reraise_errors(EncodeError, exclude=()): payload = data.encode('utf-8') return 'text/plain', 'utf-8', payload if serializer: content_type, content_encoding, encoder = \ self._encoders[serializer] else: encoder = self._default_encode content_type = self._default_content_type content_encoding = self._default_content_encoding with _reraise_errors(EncodeError): payload = encoder(data) return content_type, content_encoding, payload def loads(self, data, content_type, content_encoding, accept=None, force=False, _trusted_content=TRUSTED_CONTENT): """Decode serialized data. Deserialize a data stream as serialized using `dumps` based on `content_type`. Arguments: --------- data (bytes, buffer, str): The message data to deserialize. content_type (str): The content-type of the data. (e.g., `application/json`). content_encoding (str): The content-encoding of the data. (e.g., `utf-8`, `binary`, or `us-ascii`). accept (Set): List of content-types to accept. Raises ------ ContentDisallowed: If the content-type is not accepted. Returns ------- Any: The unserialized data. """ content_type = (bytes_to_str(content_type) if content_type else 'application/data') if accept is not None: if content_type not in _trusted_content \ and content_type not in accept: raise self._for_untrusted_content(content_type, 'untrusted') else: if content_type in self._disabled_content_types and not force: raise self._for_untrusted_content(content_type, 'disabled') content_encoding = (content_encoding or 'utf-8').lower() if data: decode = self._decoders.get(content_type) if decode: with _reraise_errors(DecodeError): return decode(data) if content_encoding not in SKIP_DECODE and \ not isinstance(data, str): with _reraise_errors(DecodeError): return _decode(data, content_encoding) return data def _for_untrusted_content(self, ctype, why): return ContentDisallowed( 'Refusing to deserialize {} content of type {}'.format( why, parenthesize_alias(self.type_to_name.get(ctype, ctype), ctype), ), ) #: Global registry of serializers/deserializers. registry = SerializerRegistry() dumps = registry.dumps loads = registry.loads register = registry.register unregister = registry.unregister def raw_encode(data): """Special case serializer.""" content_type = 'application/data' payload = data if isinstance(payload, str): content_encoding = 'utf-8' with _reraise_errors(EncodeError, exclude=()): payload = payload.encode(content_encoding) else: content_encoding = 'binary' return content_type, content_encoding, payload def register_json(): """Register a encoder/decoder for JSON serialization.""" from kombu.utils import json as _json registry.register('json', _json.dumps, _json.loads, content_type='application/json', content_encoding='utf-8') def register_yaml(): """Register a encoder/decoder for YAML serialization. It is slower than JSON, but allows for more data types to be serialized. Useful if you need to send data such as dates """ try: import yaml registry.register('yaml', yaml.safe_dump, yaml.safe_load, content_type='application/x-yaml', content_encoding='utf-8') except ImportError: def not_available(*args, **kwargs): """Raise SerializerNotInstalled. Used in case a client receives a yaml message, but yaml isn't installed. """ raise SerializerNotInstalled( 'No decoder installed for YAML. Install the PyYAML library') registry.register('yaml', None, not_available, 'application/x-yaml') def unpickle(s): return pickle_loads(str_to_bytes(s)) def register_pickle(): """Register pickle serializer. The fastest serialization method, but restricts you to python clients. """ def pickle_dumps(obj, dumper=pickle.dumps): return dumper(obj, protocol=pickle_protocol) registry.register('pickle', pickle_dumps, unpickle, content_type='application/x-python-serialize', content_encoding='binary') def register_msgpack(): """Register msgpack serializer. See Also -------- https://msgpack.org/. """ pack = unpack = None try: import msgpack if msgpack.version >= (0, 4): from msgpack import packb, unpackb def pack(s): # noqa return packb(s, use_bin_type=True) def unpack(s): # noqa return unpackb(s, raw=False) else: def version_mismatch(*args, **kwargs): raise SerializerNotInstalled( 'msgpack requires msgpack-python >= 0.4.0') pack = unpack = version_mismatch except (ImportError, ValueError): def not_available(*args, **kwargs): raise SerializerNotInstalled( 'No decoder installed for msgpack. ' 'Please install the msgpack-python library') pack = unpack = not_available registry.register( 'msgpack', pack, unpack, content_type='application/x-msgpack', content_encoding='binary', ) # Register the base serialization methods. register_json() register_pickle() register_yaml() register_msgpack() # Default serializer is 'json' registry._set_default_serializer('json') NOTSET = object() def enable_insecure_serializers(choices=NOTSET): """Enable serializers that are considered to be unsafe. Note: ---- Will enable ``pickle``, ``yaml`` and ``msgpack`` by default, but you can also specify a list of serializers (by name or content type) to enable. """ choices = ['pickle', 'yaml', 'msgpack'] if choices is NOTSET else choices if choices is not None: for choice in choices: try: registry.enable(choice) except KeyError: pass def disable_insecure_serializers(allowed=NOTSET): """Disable untrusted serializers. Will disable all serializers except ``json`` or you can specify a list of deserializers to allow. Note: ---- Producers will still be able to serialize data in these formats, but consumers will not accept incoming data using the untrusted content types. """ allowed = ['json'] if allowed is NOTSET else allowed for name in registry._decoders: registry.disable(name) if allowed is not None: for name in allowed: registry.enable(name) # Insecure serializers are disabled by default since v3.0 disable_insecure_serializers() # Load entrypoints from installed extensions for ep, args in entrypoints('kombu.serializers'): # pragma: no cover register(ep.name, *args) def prepare_accept_content(content_types, name_to_type=None): """Replace aliases of content_types with full names from registry. Raises ------ SerializerNotInstalled: If the serialization method requested is not available. """ name_to_type = registry.name_to_type if not name_to_type else name_to_type if content_types is not None: try: return {n if '/' in n else name_to_type[n] for n in content_types} except KeyError as e: raise SerializerNotInstalled( f'No encoder/decoder installed for {e.args[0]}') return content_types kombu-5.5.3/kombu/simple.py000066400000000000000000000122661477772317200156510ustar00rootroot00000000000000"""Simple messaging interface.""" from __future__ import annotations import socket from collections import deque from queue import Empty from time import monotonic from typing import TYPE_CHECKING from . import entity, messaging from .connection import maybe_channel if TYPE_CHECKING: from types import TracebackType __all__ = ('SimpleQueue', 'SimpleBuffer') class SimpleBase: Empty = Empty _consuming = False def __enter__(self): return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: self.close() def __init__(self, channel, producer, consumer, no_ack=False): self.channel = maybe_channel(channel) self.producer = producer self.consumer = consumer self.no_ack = no_ack self.queue = self.consumer.queues[0] self.buffer = deque() self.consumer.register_callback(self._receive) def get(self, block=True, timeout=None): if not block: return self.get_nowait() self._consume() time_start = monotonic() remaining = timeout while True: if self.buffer: return self.buffer.popleft() if remaining is not None and remaining <= 0.0: raise self.Empty() try: # The `drain_events` method will # block on the socket connection to rabbitmq. if any # application-level messages are received, it will put them # into `self.buffer`. # * The method will block for UP TO `timeout` milliseconds. # * The method may raise a socket.timeout exception; or... # * The method may return without having put anything on # `self.buffer`. This is because internal heartbeat # messages are sent over the same socket; also POSIX makes # no guarantees against socket calls returning early. self.channel.connection.client.drain_events(timeout=remaining) except socket.timeout: raise self.Empty() if remaining is not None: elapsed = monotonic() - time_start remaining = timeout - elapsed def get_nowait(self): m = self.queue.get(no_ack=self.no_ack, accept=self.consumer.accept) if not m: raise self.Empty() return m def put(self, message, serializer=None, headers=None, compression=None, routing_key=None, **kwargs): self.producer.publish(message, serializer=serializer, routing_key=routing_key, headers=headers, compression=compression, **kwargs) def clear(self): return self.consumer.purge() def qsize(self): _, size, _ = self.queue.queue_declare(passive=True) return size def close(self): self.consumer.cancel() def _receive(self, message_data, message): self.buffer.append(message) def _consume(self): if not self._consuming: self.consumer.consume(no_ack=self.no_ack) self._consuming = True def __len__(self): """`len(self) -> self.qsize()`.""" return self.qsize() def __bool__(self): return True __nonzero__ = __bool__ class SimpleQueue(SimpleBase): """Simple API for persistent queues.""" no_ack = False queue_opts = {} queue_args = {} exchange_opts = {'type': 'direct'} def __init__(self, channel, name, no_ack=None, queue_opts=None, queue_args=None, exchange_opts=None, serializer=None, compression=None, accept=None): queue = name queue_opts = dict(self.queue_opts, **queue_opts or {}) queue_args = dict(self.queue_args, **queue_args or {}) exchange_opts = dict(self.exchange_opts, **exchange_opts or {}) if no_ack is None: no_ack = self.no_ack if not isinstance(queue, entity.Queue): exchange = entity.Exchange(name, **exchange_opts) queue = entity.Queue(name, exchange, name, queue_arguments=queue_args, **queue_opts) routing_key = name else: exchange = queue.exchange routing_key = queue.routing_key consumer = messaging.Consumer(channel, queue, accept=accept) producer = messaging.Producer(channel, exchange, serializer=serializer, routing_key=routing_key, compression=compression) super().__init__(channel, producer, consumer, no_ack) class SimpleBuffer(SimpleQueue): """Simple API for ephemeral queues.""" no_ack = True queue_opts = {'durable': False, 'auto_delete': True} exchange_opts = {'durable': False, 'delivery_mode': 'transient', 'auto_delete': True} kombu-5.5.3/kombu/transport/000077500000000000000000000000001477772317200160335ustar00rootroot00000000000000kombu-5.5.3/kombu/transport/SLMQ.py000066400000000000000000000141071477772317200171640ustar00rootroot00000000000000"""SoftLayer Message Queue transport module for kombu. Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: No * Supports Priority: No * Supports TTL: No Connection String ================= *Unreviewed* Transport Options ================= *Unreviewed* """ from __future__ import annotations import os import socket import string from queue import Empty from kombu.utils.encoding import bytes_to_str, safe_str from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property from . import virtual try: from softlayer_messaging import get_client from softlayer_messaging.errors import ResponseError except ImportError: # pragma: no cover get_client = ResponseError = None # dots are replaced by dash, all other punctuation replaced by underscore. CHARS_REPLACE_TABLE = { ord(c): 0x5f for c in string.punctuation if c not in '_' } class Channel(virtual.Channel): """SLMQ Channel.""" default_visibility_timeout = 1800 # 30 minutes. domain_format = 'kombu%(vhost)s' _slmq = None _queue_cache = {} _noack_queues = set() def __init__(self, *args, **kwargs): if get_client is None: raise ImportError( 'SLMQ transport requires the softlayer_messaging library', ) super().__init__(*args, **kwargs) queues = self.slmq.queues() for queue in queues: self._queue_cache[queue] = queue def basic_consume(self, queue, no_ack, *args, **kwargs): if no_ack: self._noack_queues.add(queue) return super().basic_consume(queue, no_ack, *args, **kwargs) def basic_cancel(self, consumer_tag): if consumer_tag in self._consumers: queue = self._tag_to_queue[consumer_tag] self._noack_queues.discard(queue) return super().basic_cancel(consumer_tag) def entity_name(self, name, table=CHARS_REPLACE_TABLE): """Format AMQP queue name into a valid SLQS queue name.""" return str(safe_str(name)).translate(table) def _new_queue(self, queue, **kwargs): """Ensure a queue exists in SLQS.""" queue = self.entity_name(self.queue_name_prefix + queue) try: return self._queue_cache[queue] except KeyError: try: self.slmq.create_queue( queue, visibility_timeout=self.visibility_timeout) except ResponseError: pass q = self._queue_cache[queue] = self.slmq.queue(queue) return q def _delete(self, queue, *args, **kwargs): """Delete queue by name.""" queue_name = self.entity_name(queue) self._queue_cache.pop(queue_name, None) self.slmq.queue(queue_name).delete(force=True) super()._delete(queue_name) def _put(self, queue, message, **kwargs): """Put message onto queue.""" q = self._new_queue(queue) q.push(dumps(message)) def _get(self, queue): """Try to retrieve a single message off ``queue``.""" q = self._new_queue(queue) rs = q.pop(1) if rs['items']: m = rs['items'][0] payload = loads(bytes_to_str(m['body'])) if queue in self._noack_queues: q.message(m['id']).delete() else: payload['properties']['delivery_info'].update({ 'slmq_message_id': m['id'], 'slmq_queue_name': q.name}) return payload raise Empty() def basic_ack(self, delivery_tag): delivery_info = self.qos.get(delivery_tag).delivery_info try: queue = delivery_info['slmq_queue_name'] except KeyError: pass else: self.delete_message(queue, delivery_info['slmq_message_id']) super().basic_ack(delivery_tag) def _size(self, queue): """Return the number of messages in a queue.""" return self._new_queue(queue).detail()['message_count'] def _purge(self, queue): """Delete all current messages in a queue.""" q = self._new_queue(queue) n = 0 results = q.pop(10) while results['items']: for m in results['items']: self.delete_message(queue, m['id']) n += 1 results = q.pop(10) return n def delete_message(self, queue, message_id): q = self.slmq.queue(self.entity_name(queue)) return q.message(message_id).delete() @property def slmq(self): if self._slmq is None: conninfo = self.conninfo account = os.environ.get('SLMQ_ACCOUNT', conninfo.virtual_host) user = os.environ.get('SL_USERNAME', conninfo.userid) api_key = os.environ.get('SL_API_KEY', conninfo.password) host = os.environ.get('SLMQ_HOST', conninfo.hostname) port = os.environ.get('SLMQ_PORT', conninfo.port) secure = bool(os.environ.get( 'SLMQ_SECURE', self.transport_options.get('secure')) or True, ) endpoint = '{}://{}{}'.format( 'https' if secure else 'http', host, f':{port}' if port else '', ) self._slmq = get_client(account, endpoint=endpoint) self._slmq.authenticate(user, api_key) return self._slmq @property def conninfo(self): return self.connection.client @property def transport_options(self): return self.connection.client.transport_options @cached_property def visibility_timeout(self): return (self.transport_options.get('visibility_timeout') or self.default_visibility_timeout) @cached_property def queue_name_prefix(self): return self.transport_options.get('queue_name_prefix', '') class Transport(virtual.Transport): """SLMQ Transport.""" Channel = Channel polling_interval = 1 default_port = None connection_errors = ( virtual.Transport.connection_errors + ( ResponseError, socket.error ) ) kombu-5.5.3/kombu/transport/SQS.py000066400000000000000000001070771477772317200170670ustar00rootroot00000000000000"""Amazon SQS transport module for Kombu. This package implements an AMQP-like interface on top of Amazons SQS service, with the goal of being optimized for high performance and reliability. The default settings for this module are focused now on high performance in task queue situations where tasks are small, idempotent and run very fast. SQS Features supported by this transport ======================================== Long Polling ------------ https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html Long polling is enabled by setting the `wait_time_seconds` transport option to a number > 1. Amazon supports up to 20 seconds. This is enabled with 10 seconds by default. Batch API Actions ----------------- https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-batch-api.html The default behavior of the SQS Channel.drain_events() method is to request up to the 'prefetch_count' messages on every request to SQS. These messages are stored locally in a deque object and passed back to the Transport until the deque is empty, before triggering a new API call to Amazon. This behavior dramatically speeds up the rate that you can pull tasks from SQS when you have short-running tasks (or a large number of workers). When a Celery worker has multiple queues to monitor, it will pull down up to 'prefetch_count' messages from queueA and work on them all before moving on to queueB. If queueB is empty, it will wait up until 'polling_interval' expires before moving back and checking on queueA. Message Attributes ----------------- https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html SQS supports sending message attributes along with the message body. To use this feature, you can pass a 'message_attributes' as keyword argument to `basic_publish` method. Other Features supported by this transport ========================================== Predefined Queues ----------------- The default behavior of this transport is to use a single AWS credential pair in order to manage all SQS queues (e.g. listing queues, creating queues, polling queues, deleting messages). If it is preferable for your environment to use multiple AWS credentials, you can use the 'predefined_queues' setting inside the 'transport_options' map. This setting allows you to specify the SQS queue URL and AWS credentials for each of your queues. For example, if you have two queues which both already exist in AWS) you can tell this transport about them as follows: .. code-block:: python transport_options = { 'predefined_queues': { 'queue-1': { 'url': 'https://sqs.us-east-1.amazonaws.com/xxx/aaa', 'access_key_id': 'a', 'secret_access_key': 'b', 'backoff_policy': {1: 10, 2: 20, 3: 40, 4: 80, 5: 320, 6: 640}, # optional 'backoff_tasks': ['svc.tasks.tasks.task1'] # optional }, 'queue-2.fifo': { 'url': 'https://sqs.us-east-1.amazonaws.com/xxx/bbb.fifo', 'access_key_id': 'c', 'secret_access_key': 'd', 'backoff_policy': {1: 10, 2: 20, 3: 40, 4: 80, 5: 320, 6: 640}, # optional 'backoff_tasks': ['svc.tasks.tasks.task2'] # optional }, } 'sts_role_arn': 'arn:aws:iam:::role/STSTest', # optional 'sts_token_timeout': 900 # optional } Note that FIFO and standard queues must be named accordingly (the name of a FIFO queue must end with the .fifo suffix). backoff_policy & backoff_tasks are optional arguments. These arguments automatically change the message visibility timeout, in order to have different times between specific task retries. This would apply after task failure. AWS STS authentication is supported, by using sts_role_arn, and sts_token_timeout. sts_role_arn is the assumed IAM role ARN we are trying to access with. sts_token_timeout is the token timeout, defaults (and minimum) to 900 seconds. After the mentioned period, a new token will be created. If you authenticate using Okta_ (e.g. calling |gac|_), you can also specify a 'session_token' to connect to a queue. Note that those tokens have a limited lifetime and are therefore only suited for short-lived tests. .. _Okta: https://www.okta.com/ .. _gac: https://github.com/Nike-Inc/gimme-aws-creds#readme .. |gac| replace:: ``gimme-aws-creds`` Client config ------------- In some cases you may need to override the botocore config. You can do it as follows: .. code-block:: python transport_option = { 'client-config': { 'connect_timeout': 5, }, } For a complete list of settings you can adjust using this option see https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: Yes * Supports Priority: No * Supports TTL: No """ from __future__ import annotations import base64 import socket import string import uuid from datetime import datetime from queue import Empty from botocore.client import Config from botocore.exceptions import ClientError from vine import ensure_promise, promise, transform from kombu.asynchronous import get_event_loop from kombu.asynchronous.aws.ext import boto3, exceptions from kombu.asynchronous.aws.sqs.connection import AsyncSQSConnection from kombu.asynchronous.aws.sqs.message import AsyncMessage from kombu.log import get_logger from kombu.utils import scheduling from kombu.utils.encoding import bytes_to_str, safe_str from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property from . import virtual logger = get_logger(__name__) # dots are replaced by dash, dash remains dash, all other punctuation # replaced by underscore. CHARS_REPLACE_TABLE = { ord(c): 0x5f for c in string.punctuation if c not in '-_.' } CHARS_REPLACE_TABLE[0x2e] = 0x2d # '.' -> '-' #: SQS bulk get supports a maximum of 10 messages at a time. SQS_MAX_MESSAGES = 10 def maybe_int(x): """Try to convert x' to int, or return x' if that fails.""" try: return int(x) except ValueError: return x class UndefinedQueueException(Exception): """Predefined queues are being used and an undefined queue was used.""" class InvalidQueueException(Exception): """Predefined queues are being used and configuration is not valid.""" class AccessDeniedQueueException(Exception): """Raised when access to the AWS queue is denied. This may occur if the permissions are not correctly set or the credentials are invalid. """ class DoesNotExistQueueException(Exception): """The specified queue doesn't exist.""" class QoS(virtual.QoS): """Quality of Service guarantees implementation for SQS.""" def reject(self, delivery_tag, requeue=False): super().reject(delivery_tag, requeue=requeue) routing_key, message, backoff_tasks, backoff_policy = \ self._extract_backoff_policy_configuration_and_message( delivery_tag) if routing_key and message and backoff_tasks and backoff_policy: self.apply_backoff_policy( routing_key, delivery_tag, backoff_policy, backoff_tasks) def _extract_backoff_policy_configuration_and_message(self, delivery_tag): try: message = self._delivered[delivery_tag] routing_key = message.delivery_info['routing_key'] except KeyError: return None, None, None, None if not routing_key or not message: return None, None, None, None queue_config = self.channel.predefined_queues.get(routing_key, {}) backoff_tasks = queue_config.get('backoff_tasks') backoff_policy = queue_config.get('backoff_policy') return routing_key, message, backoff_tasks, backoff_policy def apply_backoff_policy(self, routing_key, delivery_tag, backoff_policy, backoff_tasks): queue_url = self.channel._queue_cache[routing_key] task_name, number_of_retries = \ self.extract_task_name_and_number_of_retries(delivery_tag) if not task_name or not number_of_retries: return None policy_value = backoff_policy.get(number_of_retries) if task_name in backoff_tasks and policy_value is not None: c = self.channel.sqs(routing_key) c.change_message_visibility( QueueUrl=queue_url, ReceiptHandle=delivery_tag, VisibilityTimeout=policy_value ) def extract_task_name_and_number_of_retries(self, delivery_tag): message = self._delivered[delivery_tag] message_headers = message.headers task_name = message_headers['task'] number_of_retries = int( message.properties['delivery_info']['sqs_message'] ['Attributes']['ApproximateReceiveCount']) return task_name, number_of_retries class Channel(virtual.Channel): """SQS Channel.""" default_region = 'us-east-1' default_visibility_timeout = 1800 # 30 minutes. default_wait_time_seconds = 10 # up to 20 seconds max domain_format = 'kombu%(vhost)s' _asynsqs = None _predefined_queue_async_clients = {} # A client for each predefined queue _sqs = None _predefined_queue_clients = {} # A client for each predefined queue _queue_cache = {} # SQS queue name => SQS queue URL _noack_queues = set() QoS = QoS def __init__(self, *args, **kwargs): if boto3 is None: raise ImportError('boto3 is not installed') super().__init__(*args, **kwargs) self._validate_predifined_queues() # SQS blows up if you try to create a new queue when one already # exists but with a different visibility_timeout. This prepopulates # the queue_cache to protect us from recreating # queues that are known to already exist. self._update_queue_cache(self.queue_name_prefix) self.hub = kwargs.get('hub') or get_event_loop() def _validate_predifined_queues(self): """Check that standard and FIFO queues are named properly. AWS requires FIFO queues to have a name that ends with the .fifo suffix. """ for queue_name, q in self.predefined_queues.items(): fifo_url = q['url'].endswith('.fifo') fifo_name = queue_name.endswith('.fifo') if fifo_url and not fifo_name: raise InvalidQueueException( "Queue with url '{}' must have a name " "ending with .fifo".format(q['url']) ) elif not fifo_url and fifo_name: raise InvalidQueueException( "Queue with name '{}' is not a FIFO queue: " "'{}'".format(queue_name, q['url']) ) def _update_queue_cache(self, queue_name_prefix): if self.predefined_queues: for queue_name, q in self.predefined_queues.items(): self._queue_cache[queue_name] = q['url'] return resp = self.sqs().list_queues(QueueNamePrefix=queue_name_prefix) for url in resp.get('QueueUrls', []): queue_name = url.split('/')[-1] self._queue_cache[queue_name] = url def basic_consume(self, queue, no_ack, *args, **kwargs): if no_ack: self._noack_queues.add(queue) if self.hub: self._loop1(queue) return super().basic_consume( queue, no_ack, *args, **kwargs ) def basic_cancel(self, consumer_tag): if consumer_tag in self._consumers: queue = self._tag_to_queue[consumer_tag] self._noack_queues.discard(queue) return super().basic_cancel(consumer_tag) def drain_events(self, timeout=None, callback=None, **kwargs): """Return a single payload message from one of our queues. Raises ------ Queue.Empty: if no messages available. """ # If we're not allowed to consume or have no consumers, raise Empty if not self._consumers or not self.qos.can_consume(): raise Empty() # At this point, go and get more messages from SQS self._poll(self.cycle, callback, timeout=timeout) def _reset_cycle(self): """Reset the consume cycle. Returns ------- FairCycle: object that points to our _get_bulk() method rather than the standard _get() method. This allows for multiple messages to be returned at once from SQS ( based on the prefetch limit). """ self._cycle = scheduling.FairCycle( self._get_bulk, self._active_queues, Empty, ) def entity_name(self, name, table=CHARS_REPLACE_TABLE): """Format AMQP queue name into a legal SQS queue name.""" if name.endswith('.fifo'): partial = name[:-len('.fifo')] partial = str(safe_str(partial)).translate(table) return partial + '.fifo' else: return str(safe_str(name)).translate(table) def canonical_queue_name(self, queue_name): return self.entity_name(self.queue_name_prefix + queue_name) def _resolve_queue_url(self, queue): """Try to retrieve the SQS queue URL for a given queue name.""" # Translate to SQS name for consistency with initial # _queue_cache population. sqs_qname = self.canonical_queue_name(queue) # The SQS ListQueues method only returns 1000 queues. When you have # so many queues, it's possible that the queue you are looking for is # not cached. In this case, we could update the cache with the exact # queue name first. if sqs_qname not in self._queue_cache: self._update_queue_cache(sqs_qname) try: return self._queue_cache[sqs_qname] except KeyError: if self.predefined_queues: raise UndefinedQueueException(( "Queue with name '{}' must be " "defined in 'predefined_queues'." ).format(sqs_qname)) raise DoesNotExistQueueException( f"Queue with name '{sqs_qname}' doesn't exist in SQS" ) def _new_queue(self, queue, **kwargs): """Ensure a queue with given name exists in SQS. Arguments: --------- queue (str): the AMQP queue name Returns str: the SQS queue URL """ try: return self._resolve_queue_url(queue) except DoesNotExistQueueException: sqs_qname = self.canonical_queue_name(queue) attributes = {'VisibilityTimeout': str(self.visibility_timeout)} if sqs_qname.endswith('.fifo'): attributes['FifoQueue'] = 'true' resp = self._create_queue(sqs_qname, attributes) self._queue_cache[sqs_qname] = resp['QueueUrl'] return resp['QueueUrl'] def _create_queue(self, queue_name, attributes): """Create an SQS queue with a given name and nominal attributes.""" # Allow specifying additional boto create_queue Attributes # via transport options if self.predefined_queues: return None attributes.update( self.transport_options.get('sqs-creation-attributes') or {}, ) return self.sqs(queue=queue_name).create_queue( QueueName=queue_name, Attributes=attributes, ) def _delete(self, queue, *args, **kwargs): """Delete queue by name.""" if self.predefined_queues: return q_url = self._resolve_queue_url(queue) self.sqs().delete_queue( QueueUrl=q_url, ) self._queue_cache.pop(queue, None) def _put(self, queue, message, **kwargs): """Put message onto queue.""" q_url = self._new_queue(queue) kwargs = {'QueueUrl': q_url} if 'properties' in message: if 'message_attributes' in message['properties']: # we don't want to want to have the attribute in the body kwargs['MessageAttributes'] = \ message['properties'].pop('message_attributes') if queue.endswith('.fifo'): if 'MessageGroupId' in message['properties']: kwargs['MessageGroupId'] = \ message['properties']['MessageGroupId'] else: kwargs['MessageGroupId'] = 'default' if 'MessageDeduplicationId' in message['properties']: kwargs['MessageDeduplicationId'] = \ message['properties']['MessageDeduplicationId'] else: kwargs['MessageDeduplicationId'] = str(uuid.uuid4()) else: if "DelaySeconds" in message['properties']: kwargs['DelaySeconds'] = \ message['properties']['DelaySeconds'] if self.sqs_base64_encoding: body = AsyncMessage().encode(dumps(message)) else: body = dumps(message) kwargs['MessageBody'] = body c = self.sqs(queue=self.canonical_queue_name(queue)) if message.get('redelivered'): c.change_message_visibility( QueueUrl=q_url, ReceiptHandle=message['properties']['delivery_tag'], VisibilityTimeout=0 ) else: c.send_message(**kwargs) @staticmethod def _optional_b64_decode(byte_string): try: data = base64.b64decode(byte_string) if base64.b64encode(data) == byte_string: return data # else the base64 module found some embedded base64 content # that should be ignored. except Exception: # pylint: disable=broad-except pass return byte_string def _message_to_python(self, message, queue_name, q_url): body = self._optional_b64_decode(message['Body'].encode()) payload = loads(bytes_to_str(body)) if queue_name in self._noack_queues: q_url = self._new_queue(queue_name) self.asynsqs(queue=queue_name).delete_message( q_url, message['ReceiptHandle'], ) else: try: properties = payload['properties'] delivery_info = payload['properties']['delivery_info'] except KeyError: # json message not sent by kombu? delivery_info = {} properties = {'delivery_info': delivery_info} payload.update({ 'body': bytes_to_str(body), 'properties': properties, }) # set delivery tag to SQS receipt handle delivery_info.update({ 'sqs_message': message, 'sqs_queue': q_url, }) properties['delivery_tag'] = message['ReceiptHandle'] return payload def _messages_to_python(self, messages, queue): """Convert a list of SQS Message objects into Payloads. This method handles converting SQS Message objects into Payloads, and appropriately updating the queue depending on the 'ack' settings for that queue. Arguments: --------- messages (SQSMessage): A list of SQS Message objects. queue (str): Name representing the queue they came from. Returns ------- List: A list of Payload objects """ q_url = self._new_queue(queue) return [self._message_to_python(m, queue, q_url) for m in messages] def _get_bulk(self, queue, max_if_unlimited=SQS_MAX_MESSAGES, callback=None): """Try to retrieve multiple messages off ``queue``. Where :meth:`_get` returns a single Payload object, this method returns a list of Payload objects. The number of objects returned is determined by the total number of messages available in the queue and the number of messages the QoS object allows (based on the prefetch_count). Note: ---- Ignores QoS limits so caller is responsible for checking that we are allowed to consume at least one message from the queue. get_bulk will then ask QoS for an estimate of the number of extra messages that we can consume. Arguments: --------- queue (str): The queue name to pull from. Returns ------- List[Message] """ # drain_events calls `can_consume` first, consuming # a token, so we know that we are allowed to consume at least # one message. # Note: ignoring max_messages for SQS with boto3 max_count = self._get_message_estimate() if max_count: q_url = self._new_queue(queue) resp = self.sqs(queue=queue).receive_message( QueueUrl=q_url, MaxNumberOfMessages=max_count, WaitTimeSeconds=self.wait_time_seconds) if resp.get('Messages'): for m in resp['Messages']: m['Body'] = AsyncMessage(body=m['Body']).decode() for msg in self._messages_to_python(resp['Messages'], queue): self.connection._deliver(msg, queue) return raise Empty() def _get(self, queue): """Try to retrieve a single message off ``queue``.""" q_url = self._new_queue(queue) resp = self.sqs(queue=queue).receive_message( QueueUrl=q_url, MaxNumberOfMessages=1, WaitTimeSeconds=self.wait_time_seconds) if resp.get('Messages'): body = AsyncMessage(body=resp['Messages'][0]['Body']).decode() resp['Messages'][0]['Body'] = body return self._messages_to_python(resp['Messages'], queue)[0] raise Empty() def _loop1(self, queue, _=None): self.hub.call_soon(self._schedule_queue, queue) def _schedule_queue(self, queue): if queue in self._active_queues: if self.qos.can_consume(): self._get_bulk_async( queue, callback=promise(self._loop1, (queue,)), ) else: self._loop1(queue) def _get_message_estimate(self, max_if_unlimited=SQS_MAX_MESSAGES): maxcount = self.qos.can_consume_max_estimate() return min( max_if_unlimited if maxcount is None else max(maxcount, 1), max_if_unlimited, ) def _get_bulk_async(self, queue, max_if_unlimited=SQS_MAX_MESSAGES, callback=None): maxcount = self._get_message_estimate() if maxcount: return self._get_async(queue, maxcount, callback=callback) # Not allowed to consume, make sure to notify callback.. callback = ensure_promise(callback) callback([]) return callback def _get_async(self, queue, count=1, callback=None): q_url = self._new_queue(queue) qname = self.canonical_queue_name(queue) return self._get_from_sqs( queue_name=qname, queue_url=q_url, count=count, connection=self.asynsqs(queue=qname), callback=transform( self._on_messages_ready, callback, q_url, queue ), ) def _on_messages_ready(self, queue, qname, messages): if 'Messages' in messages and messages['Messages']: callbacks = self.connection._callbacks for msg in messages['Messages']: msg_parsed = self._message_to_python(msg, qname, queue) callbacks[qname](msg_parsed) def _get_from_sqs(self, queue_name, queue_url, connection, count=1, callback=None): """Retrieve and handle messages from SQS. Uses long polling and returns :class:`~vine.promises.promise`. """ return connection.receive_message( queue_name, queue_url, number_messages=count, wait_time_seconds=self.wait_time_seconds, callback=callback, ) def _restore(self, message, unwanted_delivery_info=('sqs_message', 'sqs_queue')): for unwanted_key in unwanted_delivery_info: # Remove objects that aren't JSON serializable (Issue #1108). message.delivery_info.pop(unwanted_key, None) return super()._restore(message) def basic_ack(self, delivery_tag, multiple=False): try: message = self.qos.get(delivery_tag).delivery_info sqs_message = message['sqs_message'] except KeyError: super().basic_ack(delivery_tag) else: queue = None if 'routing_key' in message: queue = self.canonical_queue_name(message['routing_key']) try: self.sqs(queue=queue).delete_message( QueueUrl=message['sqs_queue'], ReceiptHandle=sqs_message['ReceiptHandle'] ) except ClientError as exception: if exception.response['Error']['Code'] == 'AccessDenied': raise AccessDeniedQueueException( exception.response["Error"]["Message"] ) super().basic_reject(delivery_tag) else: super().basic_ack(delivery_tag) def _size(self, queue): """Return the number of messages in a queue.""" q_url = self._new_queue(queue) c = self.sqs(queue=self.canonical_queue_name(queue)) resp = c.get_queue_attributes( QueueUrl=q_url, AttributeNames=['ApproximateNumberOfMessages']) return int(resp['Attributes']['ApproximateNumberOfMessages']) def _purge(self, queue): """Delete all current messages in a queue.""" q_url = self._new_queue(queue) # SQS is slow at registering messages, so run for a few # iterations to ensure messages are detected and deleted. size = 0 for i in range(10): size += int(self._size(queue)) if not size: break self.sqs(queue=queue).purge_queue(QueueUrl=q_url) return size def close(self): super().close() # if self._asynsqs: # try: # self.asynsqs().close() # except AttributeError as exc: # FIXME ??? # if "can't set attribute" not in str(exc): # raise def new_sqs_client(self, region, access_key_id, secret_access_key, session_token=None): session = boto3.session.Session( region_name=region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, aws_session_token=session_token, ) is_secure = self.is_secure if self.is_secure is not None else True client_kwargs = { 'use_ssl': is_secure } if self.endpoint_url is not None: client_kwargs['endpoint_url'] = self.endpoint_url client_config = self.transport_options.get('client-config') or {} config = Config(**client_config) return session.client('sqs', config=config, **client_kwargs) def sqs(self, queue=None): if queue is not None and self.predefined_queues: if queue not in self.predefined_queues: raise UndefinedQueueException( f"Queue with name '{queue}' must be defined" " in 'predefined_queues'.") q = self.predefined_queues[queue] if self.transport_options.get('sts_role_arn'): return self._handle_sts_session(queue, q) if not self.transport_options.get('sts_role_arn'): if queue in self._predefined_queue_clients: return self._predefined_queue_clients[queue] else: c = self._predefined_queue_clients[queue] = \ self.new_sqs_client( region=q.get('region', self.region), access_key_id=q.get( 'access_key_id', self.conninfo.userid), secret_access_key=q.get( 'secret_access_key', self.conninfo.password) ) return c if self._sqs is not None: return self._sqs c = self._sqs = self.new_sqs_client( region=self.region, access_key_id=self.conninfo.userid, secret_access_key=self.conninfo.password, ) return c def _handle_sts_session(self, queue, q): region = q.get('region', self.region) if not hasattr(self, 'sts_expiration'): # STS token - token init return self._new_predefined_queue_client_with_sts_session(queue, region) # STS token - refresh if expired elif self.sts_expiration.replace(tzinfo=None) < datetime.utcnow(): return self._new_predefined_queue_client_with_sts_session(queue, region) else: # STS token - ruse existing if queue not in self._predefined_queue_clients: return self._new_predefined_queue_client_with_sts_session(queue, region) return self._predefined_queue_clients[queue] def _new_predefined_queue_client_with_sts_session(self, queue, region): sts_creds = self.generate_sts_session_token( self.transport_options.get('sts_role_arn'), self.transport_options.get('sts_token_timeout', 900)) self.sts_expiration = sts_creds['Expiration'] c = self._predefined_queue_clients[queue] = self.new_sqs_client( region=region, access_key_id=sts_creds['AccessKeyId'], secret_access_key=sts_creds['SecretAccessKey'], session_token=sts_creds['SessionToken'], ) return c def generate_sts_session_token(self, role_arn, token_expiry_seconds): sts_client = boto3.client('sts') sts_policy = sts_client.assume_role( RoleArn=role_arn, RoleSessionName='Celery', DurationSeconds=token_expiry_seconds ) return sts_policy['Credentials'] def asynsqs(self, queue=None): if queue is not None and self.predefined_queues: if queue in self._predefined_queue_async_clients and \ not hasattr(self, 'sts_expiration'): return self._predefined_queue_async_clients[queue] if queue not in self.predefined_queues: raise UndefinedQueueException(( "Queue with name '{}' must be defined in " "'predefined_queues'." ).format(queue)) q = self.predefined_queues[queue] c = self._predefined_queue_async_clients[queue] = \ AsyncSQSConnection( sqs_connection=self.sqs(queue=queue), region=q.get('region', self.region), fetch_message_attributes=self.fetch_message_attributes, ) return c if self._asynsqs is not None: return self._asynsqs c = self._asynsqs = AsyncSQSConnection( sqs_connection=self.sqs(queue=queue), region=self.region, fetch_message_attributes=self.fetch_message_attributes, ) return c @property def conninfo(self): return self.connection.client @property def transport_options(self): return self.connection.client.transport_options @cached_property def visibility_timeout(self): return (self.transport_options.get('visibility_timeout') or self.default_visibility_timeout) @cached_property def predefined_queues(self): """Map of queue_name to predefined queue settings.""" return self.transport_options.get('predefined_queues', {}) @cached_property def queue_name_prefix(self): return self.transport_options.get('queue_name_prefix', '') @cached_property def supports_fanout(self): return False @cached_property def region(self): return (self.transport_options.get('region') or boto3.Session().region_name or self.default_region) @cached_property def regioninfo(self): return self.transport_options.get('regioninfo') @cached_property def is_secure(self): return self.transport_options.get('is_secure') @cached_property def port(self): return self.transport_options.get('port') @cached_property def endpoint_url(self): if self.conninfo.hostname is not None: scheme = 'https' if self.is_secure else 'http' if self.conninfo.port is not None: port = f':{self.conninfo.port}' else: port = '' return '{}://{}{}'.format( scheme, self.conninfo.hostname, port ) @cached_property def wait_time_seconds(self): return self.transport_options.get('wait_time_seconds', self.default_wait_time_seconds) @cached_property def sqs_base64_encoding(self): return self.transport_options.get('sqs_base64_encoding', True) @cached_property def fetch_message_attributes(self): return self.transport_options.get('fetch_message_attributes') class Transport(virtual.Transport): """SQS Transport. Additional queue attributes can be supplied to SQS during queue creation by passing an ``sqs-creation-attributes`` key in transport_options. ``sqs-creation-attributes`` must be a dict whose key-value pairs correspond with Attributes in the `CreateQueue SQS API`_. For example, to have SQS queues created with server-side encryption enabled using the default Amazon Managed Customer Master Key, you can set ``KmsMasterKeyId`` Attribute. When the queue is initially created by Kombu, encryption will be enabled. .. code-block:: python from kombu.transport.SQS import Transport transport = Transport( ..., transport_options={ 'sqs-creation-attributes': { 'KmsMasterKeyId': 'alias/aws/sqs', }, } ) .. _CreateQueue SQS API: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html#API_CreateQueue_RequestParameters The ``ApproximateReceiveCount`` message attribute is fetched by this transport by default. Requested message attributes can be changed by setting ``fetch_message_attributes`` in the transport options. .. code-block:: python from kombu.transport.SQS import Transport transport = Transport( ..., transport_options={ 'fetch_message_attributes': ["All"], } ) .. _Message Attributes: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html#SQS-ReceiveMessage-request-AttributeNames """ # noqa: E501 Channel = Channel polling_interval = 1 wait_time_seconds = 0 default_port = None connection_errors = ( virtual.Transport.connection_errors + (exceptions.BotoCoreError, socket.error) ) channel_errors = ( virtual.Transport.channel_errors + (exceptions.BotoCoreError,) ) driver_type = 'sqs' driver_name = 'sqs' implements = virtual.Transport.implements.extend( asynchronous=True, exchange_type=frozenset(['direct']), ) @property def default_connection_params(self): return {'port': self.default_port} kombu-5.5.3/kombu/transport/__init__.py000066400000000000000000000065761477772317200201620ustar00rootroot00000000000000"""Built-in transports.""" from __future__ import annotations from kombu.utils.compat import _detect_environment from kombu.utils.imports import symbol_by_name def supports_librabbitmq() -> bool | None: """Return true if :pypi:`librabbitmq` can be used.""" if _detect_environment() == 'default': try: import librabbitmq # noqa except ImportError: # pragma: no cover pass else: # pragma: no cover return True return None TRANSPORT_ALIASES = { 'amqp': 'kombu.transport.pyamqp:Transport', 'amqps': 'kombu.transport.pyamqp:SSLTransport', 'pyamqp': 'kombu.transport.pyamqp:Transport', 'librabbitmq': 'kombu.transport.librabbitmq:Transport', 'confluentkafka': 'kombu.transport.confluentkafka:Transport', 'kafka': 'kombu.transport.confluentkafka:Transport', 'memory': 'kombu.transport.memory:Transport', 'redis': 'kombu.transport.redis:Transport', 'rediss': 'kombu.transport.redis:Transport', 'SQS': 'kombu.transport.SQS:Transport', 'sqs': 'kombu.transport.SQS:Transport', 'mongodb': 'kombu.transport.mongodb:Transport', 'zookeeper': 'kombu.transport.zookeeper:Transport', 'sqlalchemy': 'kombu.transport.sqlalchemy:Transport', 'sqla': 'kombu.transport.sqlalchemy:Transport', 'SLMQ': 'kombu.transport.SLMQ.Transport', 'slmq': 'kombu.transport.SLMQ.Transport', 'filesystem': 'kombu.transport.filesystem:Transport', 'qpid': 'kombu.transport.qpid:Transport', 'sentinel': 'kombu.transport.redis:SentinelTransport', 'consul': 'kombu.transport.consul:Transport', 'etcd': 'kombu.transport.etcd:Transport', 'azurestoragequeues': 'kombu.transport.azurestoragequeues:Transport', 'azureservicebus': 'kombu.transport.azureservicebus:Transport', 'pyro': 'kombu.transport.pyro:Transport', 'gcpubsub': 'kombu.transport.gcpubsub:Transport', } _transport_cache = {} def resolve_transport(transport: str | None = None) -> str | None: """Get transport by name. Arguments: --------- transport (Union[str, type]): This can be either an actual transport class, or the fully qualified path to a transport class, or the alias of a transport. """ if isinstance(transport, str): try: transport = TRANSPORT_ALIASES[transport] except KeyError: if '.' not in transport and ':' not in transport: from kombu.utils.text import fmatch_best alt = fmatch_best(transport, TRANSPORT_ALIASES) if alt: raise KeyError( 'No such transport: {}. Did you mean {}?'.format( transport, alt)) raise KeyError(f'No such transport: {transport}') else: if callable(transport): transport = transport() return symbol_by_name(transport) return transport def get_transport_cls(transport: str | None = None) -> str | None: """Get transport class by name. The transport string is the full path to a transport class, e.g.:: "kombu.transport.pyamqp:Transport" If the name does not include `"."` (is not fully qualified), the alias table will be consulted. """ if transport not in _transport_cache: _transport_cache[transport] = resolve_transport(transport) return _transport_cache[transport] kombu-5.5.3/kombu/transport/azureservicebus.py000066400000000000000000000435721477772317200216410ustar00rootroot00000000000000"""Azure Service Bus Message Queue transport module for kombu. Note that the Shared Access Policy used to connect to Azure Service Bus requires Manage, Send and Listen claims since the broker will create new queues and delete old queues as required. Notes when using with Celery if you are experiencing issues with programs not terminating properly. The Azure Service Bus SDK uses the Azure uAMQP library which in turn creates some threads. If the AzureServiceBus Channel is closed, said threads will be closed properly, but it seems there are times when Celery does not do this so these threads will be left running. As the uAMQP threads are not marked as Daemon threads, they will not be killed when the main thread exits. Setting the ``uamqp_keep_alive_interval`` transport option to 0 will prevent the keep_alive thread from starting More information about Azure Service Bus: https://azure.microsoft.com/en-us/services/service-bus/ Features ======== * Type: Virtual * Supports Direct: *Unreviewed* * Supports Topic: *Unreviewed* * Supports Fanout: *Unreviewed* * Supports Priority: *Unreviewed* * Supports TTL: *Unreviewed* Connection String ================= Connection string has the following formats: .. code-block:: azureservicebus://SAS_POLICY_NAME:SAS_KEY@SERVICE_BUSNAMESPACE azureservicebus://DefaultAzureCredential@SERVICE_BUSNAMESPACE azureservicebus://ManagedIdentityCredential@SERVICE_BUSNAMESPACE Transport Options ================= * ``queue_name_prefix`` - String prefix to prepend to queue names in a service bus namespace. * ``wait_time_seconds`` - Number of seconds to wait to receive messages. Default ``5`` * ``peek_lock_seconds`` - Number of seconds the message is visible for before it is requeued and sent to another consumer. Default ``60`` * ``uamqp_keep_alive_interval`` - Interval in seconds the Azure uAMQP library should send keepalive messages. Default ``30`` * ``retry_total`` - Azure SDK retry total. Default ``3`` * ``retry_backoff_factor`` - Azure SDK exponential backoff factor. Default ``0.8`` * ``retry_backoff_max`` - Azure SDK retry total time. Default ``120`` """ from __future__ import annotations import string from queue import Empty from typing import Any import azure.core.exceptions import azure.servicebus.exceptions import isodate from azure.servicebus import (ServiceBusClient, ServiceBusMessage, ServiceBusReceiveMode, ServiceBusReceiver, ServiceBusSender) from azure.servicebus.management import ServiceBusAdministrationClient try: from azure.identity import (DefaultAzureCredential, ManagedIdentityCredential) except ImportError: DefaultAzureCredential = None ManagedIdentityCredential = None from kombu.utils.encoding import bytes_to_str, safe_str from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property from . import virtual # dots are replaced by dash, all other punctuation replaced by underscore. PUNCTUATIONS_TO_REPLACE = set(string.punctuation) - {'_', '.', '-'} CHARS_REPLACE_TABLE = { ord('.'): ord('-'), **{ord(c): ord('_') for c in PUNCTUATIONS_TO_REPLACE} } class SendReceive: """Container for Sender and Receiver.""" def __init__(self, receiver: ServiceBusReceiver | None = None, sender: ServiceBusSender | None = None): self.receiver: ServiceBusReceiver = receiver self.sender: ServiceBusSender = sender def close(self) -> None: if self.receiver: self.receiver.close() self.receiver = None if self.sender: self.sender.close() self.sender = None class Channel(virtual.Channel): """Azure Service Bus channel.""" default_wait_time_seconds: int = 5 # in seconds default_peek_lock_seconds: int = 60 # in seconds (default 60, max 300) # in seconds (is the default from service bus repo) default_uamqp_keep_alive_interval: int = 30 # number of retries (is the default from service bus repo) default_retry_total: int = 3 # exponential backoff factor (is the default from service bus repo) default_retry_backoff_factor: float = 0.8 # Max time to backoff (is the default from service bus repo) default_retry_backoff_max: int = 120 domain_format: str = 'kombu%(vhost)s' _queue_cache: dict[str, SendReceive] = {} _noack_queues: set[str] = set() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._namespace = None self._policy = None self._sas_key = None self._connection_string = None self._try_parse_connection_string() self.qos.restore_at_shutdown = False def _try_parse_connection_string(self) -> None: self._namespace, self._credential = Transport.parse_uri( self.conninfo.hostname) if ( DefaultAzureCredential is not None and isinstance(self._credential, DefaultAzureCredential) ) or ( ManagedIdentityCredential is not None and isinstance(self._credential, ManagedIdentityCredential) ): return None if ":" in self._credential: self._policy, self._sas_key = self._credential.split(':', 1) conn_dict = { 'Endpoint': 'sb://' + self._namespace, 'SharedAccessKeyName': self._policy, 'SharedAccessKey': self._sas_key, } self._connection_string = ';'.join( [key + '=' + value for key, value in conn_dict.items()]) def basic_consume(self, queue, no_ack, *args, **kwargs): if no_ack: self._noack_queues.add(queue) return super().basic_consume( queue, no_ack, *args, **kwargs ) def basic_cancel(self, consumer_tag): if consumer_tag in self._consumers: queue = self._tag_to_queue[consumer_tag] self._noack_queues.discard(queue) return super().basic_cancel(consumer_tag) def _add_queue_to_cache( self, name: str, receiver: ServiceBusReceiver | None = None, sender: ServiceBusSender | None = None ) -> SendReceive: if name in self._queue_cache: obj = self._queue_cache[name] obj.sender = obj.sender or sender obj.receiver = obj.receiver or receiver else: obj = SendReceive(receiver, sender) self._queue_cache[name] = obj return obj def _get_asb_sender(self, queue: str) -> SendReceive: queue_obj = self._queue_cache.get(queue, None) if queue_obj is None or queue_obj.sender is None: sender = self.queue_service.get_queue_sender( queue, keep_alive=self.uamqp_keep_alive_interval) queue_obj = self._add_queue_to_cache(queue, sender=sender) return queue_obj def _get_asb_receiver( self, queue: str, recv_mode: ServiceBusReceiveMode = ServiceBusReceiveMode.PEEK_LOCK, queue_cache_key: str | None = None) -> SendReceive: cache_key = queue_cache_key or queue queue_obj = self._queue_cache.get(cache_key, None) if queue_obj is None or queue_obj.receiver is None: receiver = self.queue_service.get_queue_receiver( queue_name=queue, receive_mode=recv_mode, keep_alive=self.uamqp_keep_alive_interval) queue_obj = self._add_queue_to_cache(cache_key, receiver=receiver) return queue_obj def entity_name( self, name: str, table: dict[int, int] | None = None) -> str: """Format AMQP queue name into a valid ServiceBus queue name.""" return str(safe_str(name)).translate(table or CHARS_REPLACE_TABLE) def _restore(self, message: virtual.base.Message) -> None: # Not be needed as ASB handles unacked messages # Remove 'azure_message' as its not JSON serializable # message.delivery_info.pop('azure_message', None) # super()._restore(message) pass def _new_queue(self, queue: str, **kwargs) -> SendReceive: """Ensure a queue exists in ServiceBus.""" queue = self.entity_name(self.queue_name_prefix + queue) try: return self._queue_cache[queue] except KeyError: # Converts seconds into ISO8601 duration format # ie 66seconds = P1M6S lock_duration = isodate.duration_isoformat( isodate.Duration(seconds=self.peek_lock_seconds)) try: self.queue_mgmt_service.create_queue( queue_name=queue, lock_duration=lock_duration) except azure.core.exceptions.ResourceExistsError: pass return self._add_queue_to_cache(queue) def _delete(self, queue: str, *args, **kwargs) -> None: """Delete queue by name.""" queue = self.entity_name(self.queue_name_prefix + queue) self.queue_mgmt_service.delete_queue(queue) send_receive_obj = self._queue_cache.pop(queue, None) if send_receive_obj: send_receive_obj.close() def _put(self, queue: str, message, **kwargs) -> None: """Put message onto queue.""" queue = self.entity_name(self.queue_name_prefix + queue) msg = ServiceBusMessage(dumps(message)) queue_obj = self._get_asb_sender(queue) queue_obj.sender.send_messages(msg) def _get( self, queue: str, timeout: float | int | None = None ) -> dict[str, Any]: """Try to retrieve a single message off ``queue``.""" # If we're not ack'ing for this queue, just change receive_mode recv_mode = ServiceBusReceiveMode.RECEIVE_AND_DELETE \ if queue in self._noack_queues else ServiceBusReceiveMode.PEEK_LOCK queue = self.entity_name(self.queue_name_prefix + queue) queue_obj = self._get_asb_receiver(queue, recv_mode) messages = queue_obj.receiver.receive_messages( max_message_count=1, max_wait_time=timeout or self.wait_time_seconds) if not messages: raise Empty() # message.body is either byte or generator[bytes] message = messages[0] if not isinstance(message.body, bytes): body = b''.join(message.body) else: body = message.body msg = loads(bytes_to_str(body)) msg['properties']['delivery_info']['azure_message'] = message msg['properties']['delivery_info']['azure_queue_name'] = queue return msg def basic_ack(self, delivery_tag: str, multiple: bool = False) -> None: try: delivery_info = self.qos.get(delivery_tag).delivery_info except KeyError: super().basic_ack(delivery_tag) else: queue = delivery_info['azure_queue_name'] # recv_mode is PEEK_LOCK when ack'ing messages queue_obj = self._get_asb_receiver(queue) try: queue_obj.receiver.complete_message( delivery_info['azure_message']) except azure.servicebus.exceptions.MessageAlreadySettled: super().basic_ack(delivery_tag) except Exception: super().basic_reject(delivery_tag) else: super().basic_ack(delivery_tag) def _size(self, queue: str) -> int: """Return the number of messages in a queue.""" queue = self.entity_name(self.queue_name_prefix + queue) props = self.queue_mgmt_service.get_queue_runtime_properties(queue) return props.total_message_count def _purge(self, queue) -> int: """Delete all current messages in a queue.""" # Azure doesn't provide a purge api yet n = 0 max_purge_count = 10 queue = self.entity_name(self.queue_name_prefix + queue) # By default all the receivers will be in PEEK_LOCK receive mode queue_obj = self._queue_cache.get(queue, None) if queue not in self._noack_queues or \ queue_obj is None or queue_obj.receiver is None: queue_obj = self._get_asb_receiver( queue, ServiceBusReceiveMode.RECEIVE_AND_DELETE, 'purge_' + queue ) while True: messages = queue_obj.receiver.receive_messages( max_message_count=max_purge_count, max_wait_time=0.2 ) n += len(messages) if len(messages) < max_purge_count: break return n def close(self) -> None: # receivers and senders spawn threads so clean them up if not self.closed: self.closed = True for queue_obj in self._queue_cache.values(): queue_obj.close() self._queue_cache.clear() if self.connection is not None: self.connection.close_channel(self) @cached_property def queue_service(self) -> ServiceBusClient: if self._connection_string: return ServiceBusClient.from_connection_string( self._connection_string, retry_total=self.retry_total, retry_backoff_factor=self.retry_backoff_factor, retry_backoff_max=self.retry_backoff_max ) return ServiceBusClient( self._namespace, self._credential, retry_total=self.retry_total, retry_backoff_factor=self.retry_backoff_factor, retry_backoff_max=self.retry_backoff_max ) @cached_property def queue_mgmt_service(self) -> ServiceBusAdministrationClient: if self._connection_string: return ServiceBusAdministrationClient.from_connection_string( self._connection_string ) return ServiceBusAdministrationClient( self._namespace, self._credential ) @property def conninfo(self): return self.connection.client @property def transport_options(self): return self.connection.client.transport_options @cached_property def queue_name_prefix(self) -> str: return self.transport_options.get('queue_name_prefix', '') @cached_property def wait_time_seconds(self) -> int: return self.transport_options.get('wait_time_seconds', self.default_wait_time_seconds) @cached_property def peek_lock_seconds(self) -> int: return min(self.transport_options.get('peek_lock_seconds', self.default_peek_lock_seconds), 300) # Limit upper bounds to 300 @cached_property def uamqp_keep_alive_interval(self) -> int: return self.transport_options.get( 'uamqp_keep_alive_interval', self.default_uamqp_keep_alive_interval ) @cached_property def retry_total(self) -> int: return self.transport_options.get( 'retry_total', self.default_retry_total) @cached_property def retry_backoff_factor(self) -> float: return self.transport_options.get( 'retry_backoff_factor', self.default_retry_backoff_factor) @cached_property def retry_backoff_max(self) -> int: return self.transport_options.get( 'retry_backoff_max', self.default_retry_backoff_max) class Transport(virtual.Transport): """Azure Service Bus transport.""" Channel = Channel polling_interval = 1 default_port = None can_parse_url = True @staticmethod def parse_uri(uri: str) -> tuple[str, str | DefaultAzureCredential | ManagedIdentityCredential]: # URL like: # azureservicebus://{SAS policy name}:{SAS key}@{ServiceBus Namespace} # urllib parse does not work as the sas key could contain a slash # e.g.: azureservicebus://rootpolicy:some/key@somenamespace # > 'rootpolicy:some/key@somenamespace' uri = uri.replace('azureservicebus://', '') # > 'rootpolicy:some/key', 'somenamespace' credential, namespace = uri.rsplit('@', 1) if not namespace.endswith('.net'): namespace += '.servicebus.windows.net' if "DefaultAzureCredential".lower() == credential.lower(): if DefaultAzureCredential is None: raise ImportError('Azure Service Bus transport with a ' 'DefaultAzureCredential requires the ' 'azure-identity library') credential = DefaultAzureCredential() elif "ManagedIdentityCredential".lower() == credential.lower(): if ManagedIdentityCredential is None: raise ImportError('Azure Service Bus transport with a ' 'ManagedIdentityCredential requires the ' 'azure-identity library') credential = ManagedIdentityCredential() else: # > 'rootpolicy', 'some/key' policy, sas_key = credential.split(':', 1) credential = f"{policy}:{sas_key}" # Validate ASB connection string if not all([namespace, credential]): raise ValueError( 'Need a URI like ' 'azureservicebus://{SAS policy name}:{SAS key}@{ServiceBus Namespace} ' # noqa 'or the azure Endpoint connection string' ) return namespace, credential @classmethod def as_uri(cls, uri: str, include_password=False, mask='**') -> str: namespace, credential = cls.parse_uri(uri) if isinstance(credential, str) and ":" in credential: policy, sas_key = credential.split(':', 1) return 'azureservicebus://{}:{}@{}'.format( policy, sas_key if include_password else mask, namespace ) return 'azureservicebus://{}@{}'.format( credential.__class__.__name__, namespace ) kombu-5.5.3/kombu/transport/azurestoragequeues.py000066400000000000000000000212561477772317200223560ustar00rootroot00000000000000"""Azure Storage Queues transport module for kombu. More information about Azure Storage Queues: https://azure.microsoft.com/en-us/services/storage/queues/ Features ======== * Type: Virtual * Supports Direct: *Unreviewed* * Supports Topic: *Unreviewed* * Supports Fanout: *Unreviewed* * Supports Priority: *Unreviewed* * Supports TTL: *Unreviewed* Connection String ================= Connection string has the following formats: .. code-block:: azurestoragequeues://@ azurestoragequeues://@ azurestoragequeues://DefaultAzureCredential@ azurestoragequeues://ManagedIdentityCredential@ Note that if the access key for the storage account contains a forward slash (``/``), it will have to be regenerated before it can be used in the connection URL. .. code-block:: azurestoragequeues://DefaultAzureCredential@ azurestoragequeues://ManagedIdentityCredential@ If you wish to use an `Azure Managed Identity` you may use the ``DefaultAzureCredential`` format of the connection string which will use ``DefaultAzureCredential`` class in the azure-identity package. You may want to read the `azure-identity documentation` for more information on how the ``DefaultAzureCredential`` works. .. _azure-identity documentation: https://learn.microsoft.com/en-us/python/api/overview/azure/identity-readme?view=azure-python .. _Azure Managed Identity: https://learn.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview Transport Options ================= * ``queue_name_prefix`` """ from __future__ import annotations import string from queue import Empty from typing import Any from azure.core.exceptions import ResourceExistsError from kombu.utils.encoding import safe_str from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property from . import virtual try: from azure.storage.queue import QueueServiceClient except ImportError: # pragma: no cover QueueServiceClient = None try: from azure.identity import (DefaultAzureCredential, ManagedIdentityCredential) except ImportError: DefaultAzureCredential = None ManagedIdentityCredential = None # Azure storage queues allow only alphanumeric and dashes # so, replace everything with a dash CHARS_REPLACE_TABLE = { ord(c): 0x2d for c in string.punctuation } class Channel(virtual.Channel): """Azure Storage Queues channel.""" domain_format: str = 'kombu%(vhost)s' _queue_service: QueueServiceClient | None = None _queue_name_cache: dict[Any, Any] = {} no_ack: bool = True _noack_queues: set[Any] = set() def __init__(self, *args, **kwargs): if QueueServiceClient is None: raise ImportError('Azure Storage Queues transport requires the ' 'azure-storage-queue library') super().__init__(*args, **kwargs) self._credential, self._url = Transport.parse_uri( self.conninfo.hostname ) for queue in self.queue_service.list_queues(): self._queue_name_cache[queue['name']] = queue def basic_consume(self, queue, no_ack, *args, **kwargs): if no_ack: self._noack_queues.add(queue) return super().basic_consume(queue, no_ack, *args, **kwargs) def entity_name(self, name, table=CHARS_REPLACE_TABLE) -> str: """Format AMQP queue name into a valid Azure Storage Queue name.""" return str(safe_str(name)).translate(table) def _ensure_queue(self, queue): """Ensure a queue exists.""" queue = self.entity_name(self.queue_name_prefix + queue) try: q = self._queue_service.get_queue_client( queue=self._queue_name_cache[queue] ) except KeyError: try: q = self.queue_service.create_queue(queue) except ResourceExistsError: q = self._queue_service.get_queue_client(queue=queue) self._queue_name_cache[queue] = q.get_queue_properties() return q def _delete(self, queue, *args, **kwargs): """Delete queue by name.""" queue_name = self.entity_name(queue) self._queue_name_cache.pop(queue_name, None) self.queue_service.delete_queue(queue_name) def _put(self, queue, message, **kwargs): """Put message onto queue.""" q = self._ensure_queue(queue) encoded_message = dumps(message) q.send_message(encoded_message) def _get(self, queue, timeout=None): """Try to retrieve a single message off ``queue``.""" q = self._ensure_queue(queue) messages = q.receive_messages(messages_per_page=1, timeout=timeout) try: message = next(messages) except StopIteration: raise Empty() content = loads(message.content) q.delete_message(message=message) return content def _size(self, queue): """Return the number of messages in a queue.""" q = self._ensure_queue(queue) return q.get_queue_properties().approximate_message_count def _purge(self, queue): """Delete all current messages in a queue.""" q = self._ensure_queue(queue) n = self._size(q.queue_name) q.clear_messages() return n @property def queue_service(self) -> QueueServiceClient: if self._queue_service is None: self._queue_service = QueueServiceClient( account_url=self._url, credential=self._credential ) return self._queue_service @property def conninfo(self): return self.connection.client @property def transport_options(self): return self.connection.client.transport_options @cached_property def queue_name_prefix(self) -> str: return self.transport_options.get('queue_name_prefix', '') class Transport(virtual.Transport): """Azure Storage Queues transport.""" Channel = Channel polling_interval: int = 1 default_port: int | None = None can_parse_url: bool = True @staticmethod def parse_uri(uri: str) -> tuple[str | dict, str]: # URL like: # azurestoragequeues://@ # azurestoragequeues://@ # azurestoragequeues://DefaultAzureCredential@ # azurestoragequeues://ManagedIdentityCredential@ # urllib parse does not work as the sas key could contain a slash # e.g.: azurestoragequeues://some/key@someurl try: # > 'some/key@url' uri = uri.replace('azurestoragequeues://', '') # > 'some/key', 'url' credential, url = uri.rsplit('@', 1) if "DefaultAzureCredential".lower() == credential.lower(): if DefaultAzureCredential is None: raise ImportError('Azure Storage Queues transport with a ' 'DefaultAzureCredential requires the ' 'azure-identity library') credential = DefaultAzureCredential() elif "ManagedIdentityCredential".lower() == credential.lower(): if ManagedIdentityCredential is None: raise ImportError('Azure Storage Queues transport with a ' 'ManagedIdentityCredential requires the ' 'azure-identity library') credential = ManagedIdentityCredential() elif "devstoreaccount1" in url and ".core.windows.net" not in url: # parse credential as a dict if Azurite is being used credential = { "account_name": "devstoreaccount1", "account_key": credential, } # Validate parameters assert all([credential, url]) except Exception: raise ValueError( 'Need a URI like ' 'azurestoragequeues://{SAS or access key}@{URL}, ' 'azurestoragequeues://DefaultAzureCredential@{URL}, ' ', or ' 'azurestoragequeues://ManagedIdentityCredential@{URL}' ) return credential, url @classmethod def as_uri( cls, uri: str, include_password: bool = False, mask: str = "**" ) -> str: credential, url = cls.parse_uri(uri) return "azurestoragequeues://{}@{}".format( credential if include_password else mask, url ) kombu-5.5.3/kombu/transport/base.py000066400000000000000000000170071477772317200173240ustar00rootroot00000000000000"""Base transport interface.""" # flake8: noqa from __future__ import annotations import errno import socket from typing import TYPE_CHECKING from amqp.exceptions import RecoverableConnectionError from kombu.exceptions import ChannelError, ConnectionError from kombu.message import Message from kombu.utils.functional import dictfilter from kombu.utils.objects import cached_property from kombu.utils.time import maybe_s_to_ms if TYPE_CHECKING: from types import TracebackType __all__ = ('Message', 'StdChannel', 'Management', 'Transport') RABBITMQ_QUEUE_ARGUMENTS = { 'expires': ('x-expires', maybe_s_to_ms), 'message_ttl': ('x-message-ttl', maybe_s_to_ms), 'max_length': ('x-max-length', int), 'max_length_bytes': ('x-max-length-bytes', int), 'max_priority': ('x-max-priority', int), } # type: Mapping[str, Tuple[str, Callable]] def to_rabbitmq_queue_arguments(arguments, **options): # type: (Mapping, **Any) -> Dict """Convert queue arguments to RabbitMQ queue arguments. This is the implementation for Channel.prepare_queue_arguments for AMQP-based transports. It's used by both the pyamqp and librabbitmq transports. Arguments: arguments (Mapping): User-supplied arguments (``Queue.queue_arguments``). Keyword Arguments: expires (float): Queue expiry time in seconds. This will be converted to ``x-expires`` in int milliseconds. message_ttl (float): Message TTL in seconds. This will be converted to ``x-message-ttl`` in int milliseconds. max_length (int): Max queue length (in number of messages). This will be converted to ``x-max-length`` int. max_length_bytes (int): Max queue size in bytes. This will be converted to ``x-max-length-bytes`` int. max_priority (int): Max priority steps for queue. This will be converted to ``x-max-priority`` int. Returns ------- Dict: RabbitMQ compatible queue arguments. """ prepared = dictfilter(dict( _to_rabbitmq_queue_argument(key, value) for key, value in options.items() )) return dict(arguments, **prepared) if prepared else arguments def _to_rabbitmq_queue_argument(key, value): # type: (str, Any) -> Tuple[str, Any] opt, typ = RABBITMQ_QUEUE_ARGUMENTS[key] return opt, typ(value) if value is not None else value def _LeftBlank(obj, method): return NotImplementedError( 'Transport {0.__module__}.{0.__name__} does not implement {1}'.format( obj.__class__, method)) class StdChannel: """Standard channel base class.""" no_ack_consumers = None def Consumer(self, *args, **kwargs): from kombu.messaging import Consumer return Consumer(self, *args, **kwargs) def Producer(self, *args, **kwargs): from kombu.messaging import Producer return Producer(self, *args, **kwargs) def get_bindings(self): raise _LeftBlank(self, 'get_bindings') def after_reply_message_received(self, queue): """Callback called after RPC reply received. Notes ----- Reply queue semantics: can be used to delete the queue after transient reply message received. """ def prepare_queue_arguments(self, arguments, **kwargs): return arguments def __enter__(self): return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: self.close() class Management: """AMQP Management API (incomplete).""" def __init__(self, transport): self.transport = transport def get_bindings(self): raise _LeftBlank(self, 'get_bindings') class Implements(dict): """Helper class used to define transport features.""" def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): self[key] = value def extend(self, **kwargs): return self.__class__(self, **kwargs) default_transport_capabilities = Implements( asynchronous=False, exchange_type=frozenset(['direct', 'topic', 'fanout', 'headers']), heartbeats=False, ) class Transport: """Base class for transports.""" Management = Management #: The :class:`~kombu.Connection` owning this instance. client = None #: Set to True if :class:`~kombu.Connection` should pass the URL #: unmodified. can_parse_url = False #: Default port used when no port has been specified. default_port = None #: Tuple of errors that can happen due to connection failure. connection_errors = (ConnectionError,) #: Tuple of errors that can happen due to channel/method failure. channel_errors = (ChannelError,) #: Type of driver, can be used to separate transports #: using the AMQP protocol (driver_type: 'amqp'), #: Redis (driver_type: 'redis'), etc... driver_type = 'N/A' #: Name of driver library (e.g. 'py-amqp', 'redis'). driver_name = 'N/A' __reader = None implements = default_transport_capabilities.extend() def __init__(self, client, **kwargs): self.client = client def establish_connection(self): raise _LeftBlank(self, 'establish_connection') def close_connection(self, connection): raise _LeftBlank(self, 'close_connection') def create_channel(self, connection): raise _LeftBlank(self, 'create_channel') def close_channel(self, connection): raise _LeftBlank(self, 'close_channel') def drain_events(self, connection, **kwargs): raise _LeftBlank(self, 'drain_events') def heartbeat_check(self, connection, rate=2): pass def driver_version(self): return 'N/A' def get_heartbeat_interval(self, connection): return 0 def register_with_event_loop(self, connection, loop): pass def unregister_from_event_loop(self, connection, loop): pass def verify_connection(self, connection): return True def _make_reader(self, connection, timeout=socket.timeout, error=socket.error, _unavail=(errno.EAGAIN, errno.EINTR)): drain_events = connection.drain_events def _read(loop): if not connection.connected: raise RecoverableConnectionError('Socket was disconnected') try: drain_events(timeout=0) except timeout: return except error as exc: if exc.errno in _unavail: return raise loop.call_soon(_read, loop) return _read def qos_semantics_matches_spec(self, connection): return True def on_readable(self, connection, loop): reader = self.__reader if reader is None: reader = self.__reader = self._make_reader(connection) reader(loop) def as_uri(self, uri: str, include_password=False, mask='**') -> str: """Customise the display format of the URI.""" raise NotImplementedError() @property def default_connection_params(self): return {} def get_manager(self, *args, **kwargs): return self.Management(self) @cached_property def manager(self): return self.get_manager() @property def supports_heartbeats(self): return self.implements.heartbeats @property def supports_ev(self): return self.implements.asynchronous kombu-5.5.3/kombu/transport/confluentkafka.py000066400000000000000000000275551477772317200214160ustar00rootroot00000000000000"""confluent-kafka transport module for Kombu. Kafka transport using confluent-kafka library. **References** - http://docs.confluent.io/current/clients/confluent-kafka-python **Limitations** The confluent-kafka transport does not support PyPy environment. Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: No * Supports Priority: No * Supports TTL: No Connection String ================= Connection string has the following format: .. code-block:: confluentkafka://[USER:PASSWORD@]KAFKA_ADDRESS[:PORT] Transport Options ================= * ``connection_wait_time_seconds`` - Time in seconds to wait for connection to succeed. Default ``5`` * ``wait_time_seconds`` - Time in seconds to wait to receive messages. Default ``5`` * ``security_protocol`` - Protocol used to communicate with broker. Visit https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md for an explanation of valid values. Default ``plaintext`` * ``sasl_mechanism`` - SASL mechanism to use for authentication. Visit https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md for an explanation of valid values. * ``num_partitions`` - Number of partitions to create. Default ``1`` * ``replication_factor`` - Replication factor of partitions. Default ``1`` * ``topic_config`` - Topic configuration. Must be a dict whose key-value pairs correspond with attributes in the http://kafka.apache.org/documentation.html#topicconfigs. * ``kafka_common_config`` - Configuration applied to producer, consumer and admin client. Must be a dict whose key-value pairs correspond with attributes in the https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md. * ``kafka_producer_config`` - Producer configuration. Must be a dict whose key-value pairs correspond with attributes in the https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md. * ``kafka_consumer_config`` - Consumer configuration. Must be a dict whose key-value pairs correspond with attributes in the https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md. * ``kafka_admin_config`` - Admin client configuration. Must be a dict whose key-value pairs correspond with attributes in the https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md. """ from __future__ import annotations from queue import Empty from kombu.transport import virtual from kombu.utils import cached_property from kombu.utils.encoding import str_to_bytes from kombu.utils.json import dumps, loads try: import confluent_kafka from confluent_kafka import (Consumer, KafkaException, Producer, TopicPartition) from confluent_kafka.admin import AdminClient, NewTopic KAFKA_CONNECTION_ERRORS = () KAFKA_CHANNEL_ERRORS = () except ImportError: confluent_kafka = None KAFKA_CONNECTION_ERRORS = KAFKA_CHANNEL_ERRORS = () from kombu.log import get_logger logger = get_logger(__name__) DEFAULT_PORT = 9092 class NoBrokersAvailable(KafkaException): """Kafka broker is not available exception.""" retriable = True class Message(virtual.Message): """Message object.""" def __init__(self, payload, channel=None, **kwargs): self.topic = payload.get('topic') super().__init__(payload, channel=channel, **kwargs) class QoS(virtual.QoS): """Quality of Service guarantees.""" _not_yet_acked = {} def can_consume(self): """Return true if the channel can be consumed from. :returns: True, if this QoS object can accept a message. :rtype: bool """ return not self.prefetch_count or len(self._not_yet_acked) < self \ .prefetch_count def can_consume_max_estimate(self): if self.prefetch_count: return self.prefetch_count - len(self._not_yet_acked) else: return 1 def append(self, message, delivery_tag): self._not_yet_acked[delivery_tag] = message def get(self, delivery_tag): return self._not_yet_acked[delivery_tag] def ack(self, delivery_tag): if delivery_tag not in self._not_yet_acked: return message = self._not_yet_acked.pop(delivery_tag) consumer = self.channel._get_consumer(message.topic) consumer.commit() def reject(self, delivery_tag, requeue=False): """Reject a message by delivery tag. If requeue is True, then the last consumed message is reverted so it'll be refetched on the next attempt. If False, that message is consumed and ignored. """ if requeue: message = self._not_yet_acked.pop(delivery_tag) consumer = self.channel._get_consumer(message.topic) for assignment in consumer.assignment(): topic_partition = TopicPartition(message.topic, assignment.partition) [committed_offset] = consumer.committed([topic_partition]) consumer.seek(committed_offset) else: self.ack(delivery_tag) def restore_unacked_once(self, stderr=None): pass class Channel(virtual.Channel): """Kafka Channel.""" QoS = QoS Message = Message default_wait_time_seconds = 5 default_connection_wait_time_seconds = 5 _client = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._kafka_consumers = {} self._kafka_producers = {} self._client = self._open() def sanitize_queue_name(self, queue): """Need to sanitize the name, celery sometimes pushes in @ signs.""" return str(queue).replace('@', '') def _get_producer(self, queue): """Create/get a producer instance for the given topic/queue.""" queue = self.sanitize_queue_name(queue) producer = self._kafka_producers.get(queue, None) if producer is None: producer = Producer({ **self.common_config, **(self.options.get('kafka_producer_config') or {}), }) self._kafka_producers[queue] = producer return producer def _get_consumer(self, queue): """Create/get a consumer instance for the given topic/queue.""" queue = self.sanitize_queue_name(queue) consumer = self._kafka_consumers.get(queue, None) if consumer is None: consumer = Consumer({ 'group.id': f'{queue}-consumer-group', 'auto.offset.reset': 'earliest', 'enable.auto.commit': False, **self.common_config, **(self.options.get('kafka_consumer_config') or {}), }) consumer.subscribe([queue]) self._kafka_consumers[queue] = consumer return consumer def _put(self, queue, message, **kwargs): """Put a message on the topic/queue.""" queue = self.sanitize_queue_name(queue) producer = self._get_producer(queue) producer.produce(queue, str_to_bytes(dumps(message))) producer.flush() def _get(self, queue, **kwargs): """Get a message from the topic/queue.""" queue = self.sanitize_queue_name(queue) consumer = self._get_consumer(queue) message = None try: message = consumer.poll(self.wait_time_seconds) except StopIteration: pass if not message: raise Empty() error = message.error() if error: logger.error(error) raise Empty() return {**loads(message.value()), 'topic': message.topic()} def _delete(self, queue, *args, **kwargs): """Delete a queue/topic.""" queue = self.sanitize_queue_name(queue) self._kafka_consumers[queue].close() self._kafka_consumers.pop(queue) self.client.delete_topics([queue]) def _size(self, queue): """Get the number of pending messages in the topic/queue.""" queue = self.sanitize_queue_name(queue) consumer = self._kafka_consumers.get(queue, None) if consumer is None: return 0 size = 0 for assignment in consumer.assignment(): topic_partition = TopicPartition(queue, assignment.partition) (_, end_offset) = consumer.get_watermark_offsets(topic_partition) [committed_offset] = consumer.committed([topic_partition]) size += end_offset - committed_offset.offset return size def _new_queue(self, queue, **kwargs): """Create a new topic if it does not exist.""" queue = self.sanitize_queue_name(queue) if queue in self.client.list_topics().topics: return topic = NewTopic( queue, num_partitions=self.options.get('num_partitions', 1), replication_factor=self.options.get('replication_factor', 1), config=self.options.get('topic_config', {}) ) self.client.create_topics(new_topics=[topic]) def _has_queue(self, queue, **kwargs): """Check if a topic already exists.""" queue = self.sanitize_queue_name(queue) return queue in self.client.list_topics().topics def _open(self): client = AdminClient({ **self.common_config, **(self.options.get('kafka_admin_config') or {}), }) try: # seems to be the only way to check connection client.list_topics(timeout=self.wait_time_seconds) except confluent_kafka.KafkaException as e: raise NoBrokersAvailable(e) return client @property def client(self): if self._client is None: self._client = self._open() return self._client @property def options(self): return self.connection.client.transport_options @property def conninfo(self): return self.connection.client @cached_property def wait_time_seconds(self): return self.options.get( 'wait_time_seconds', self.default_wait_time_seconds ) @cached_property def connection_wait_time_seconds(self): return self.options.get( 'connection_wait_time_seconds', self.default_connection_wait_time_seconds, ) @cached_property def common_config(self): conninfo = self.connection.client config = { 'bootstrap.servers': f'{conninfo.hostname}:{int(conninfo.port) or DEFAULT_PORT}', } security_protocol = self.options.get('security_protocol', 'plaintext') if security_protocol.lower() != 'plaintext': config.update({ 'security.protocol': security_protocol, 'sasl.username': conninfo.userid, 'sasl.password': conninfo.password, 'sasl.mechanism': self.options.get('sasl_mechanism'), }) config.update(self.options.get('kafka_common_config') or {}) return config def close(self): super().close() self._kafka_producers = {} for consumer in self._kafka_consumers.values(): consumer.close() self._kafka_consumers = {} class Transport(virtual.Transport): """Kafka Transport.""" def as_uri(self, uri: str, include_password=False, mask='**') -> str: pass Channel = Channel default_port = DEFAULT_PORT driver_type = 'kafka' driver_name = 'confluentkafka' recoverable_connection_errors = ( NoBrokersAvailable, ) def __init__(self, client, **kwargs): if confluent_kafka is None: raise ImportError('The confluent-kafka library is not installed') super().__init__(client, **kwargs) def driver_version(self): return confluent_kafka.__version__ def establish_connection(self): return super().establish_connection() def close_connection(self, connection): return super().close_connection(connection) kombu-5.5.3/kombu/transport/consul.py000066400000000000000000000223201477772317200177070ustar00rootroot00000000000000"""Consul Transport module for Kombu. Features ======== It uses Consul.io's Key/Value store to transport messages in Queues It uses python-consul for talking to Consul's HTTP API Features ======== * Type: Native * Supports Direct: Yes * Supports Topic: *Unreviewed* * Supports Fanout: *Unreviewed* * Supports Priority: *Unreviewed* * Supports TTL: *Unreviewed* Connection String ================= Connection string has the following format: .. code-block:: consul://CONSUL_ADDRESS[:PORT] """ from __future__ import annotations import socket import uuid from collections import defaultdict from contextlib import contextmanager from queue import Empty from time import monotonic from kombu.exceptions import ChannelError from kombu.log import get_logger from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property from . import virtual try: import consul except ImportError: consul = None logger = get_logger('kombu.transport.consul') DEFAULT_PORT = 8500 DEFAULT_HOST = 'localhost' class LockError(Exception): """An error occurred while trying to acquire the lock.""" class Channel(virtual.Channel): """Consul Channel class which talks to the Consul Key/Value store.""" prefix = 'kombu' index = None timeout = '10s' session_ttl = 30 def __init__(self, *args, **kwargs): if consul is None: raise ImportError('Missing python-consul library') super().__init__(*args, **kwargs) port = self.connection.client.port or self.connection.default_port host = self.connection.client.hostname or DEFAULT_HOST logger.debug('Host: %s Port: %s Timeout: %s', host, port, self.timeout) self.queues = defaultdict(dict) self.client = consul.Consul(host=host, port=int(port)) def _lock_key(self, queue): return f'{self.prefix}/{queue}.lock' def _key_prefix(self, queue): return f'{self.prefix}/{queue}' def _get_or_create_session(self, queue): """Get or create consul session. Try to renew the session if it exists, otherwise create a new session in Consul. This session is used to acquire a lock inside Consul so that we achieve read-consistency between the nodes. Arguments: --------- queue (str): The name of the Queue. Returns ------- str: The ID of the session. """ try: session_id = self.queues[queue]['session_id'] except KeyError: session_id = None return (self._renew_existing_session(session_id) if session_id is not None else self._create_new_session()) def _renew_existing_session(self, session_id): logger.debug('Trying to renew existing session %s', session_id) session = self.client.session.renew(session_id=session_id) return session.get('ID') def _create_new_session(self): logger.debug('Creating session %s with TTL %s', self.lock_name, self.session_ttl) session_id = self.client.session.create( name=self.lock_name, ttl=self.session_ttl) logger.debug('Created session %s with id %s', self.lock_name, session_id) return session_id @contextmanager def _queue_lock(self, queue, raising=LockError): """Try to acquire a lock on the Queue. It does so by creating a object called 'lock' which is locked by the current session.. This way other nodes are not able to write to the lock object which means that they have to wait before the lock is released. Arguments: --------- queue (str): The name of the Queue. raising (Exception): Set custom lock error class. Raises ------ LockError: if the lock cannot be acquired. Returns ------- bool: success? """ self._acquire_lock(queue, raising=raising) try: yield finally: self._release_lock(queue) def _acquire_lock(self, queue, raising=LockError): session_id = self._get_or_create_session(queue) lock_key = self._lock_key(queue) logger.debug('Trying to create lock object %s with session %s', lock_key, session_id) if self.client.kv.put(key=lock_key, acquire=session_id, value=self.lock_name): self.queues[queue]['session_id'] = session_id return logger.info('Could not acquire lock on key %s', lock_key) raise raising() def _release_lock(self, queue): """Try to release a lock. It does so by simply removing the lock key in Consul. Arguments: --------- queue (str): The name of the queue we want to release the lock from. """ logger.debug('Removing lock key %s', self._lock_key(queue)) self.client.kv.delete(key=self._lock_key(queue)) def _destroy_session(self, queue): """Destroy a previously created Consul session. Will release all locks it still might hold. Arguments: --------- queue (str): The name of the Queue. """ logger.debug('Destroying session %s', self.queues[queue]['session_id']) self.client.session.destroy(self.queues[queue]['session_id']) def _new_queue(self, queue, **_): self.queues[queue] = {'session_id': None} return self.client.kv.put(key=self._key_prefix(queue), value=None) def _delete(self, queue, *args, **_): self._destroy_session(queue) self.queues.pop(queue, None) self._purge(queue) def _put(self, queue, payload, **_): """Put `message` onto `queue`. This simply writes a key to the K/V store of Consul """ key = '{}/msg/{}_{}'.format( self._key_prefix(queue), int(round(monotonic() * 1000)), uuid.uuid4(), ) if not self.client.kv.put(key=key, value=dumps(payload), cas=0): raise ChannelError(f'Cannot add key {key!r} to consul') def _get(self, queue, timeout=None): """Get the first available message from the queue. Before it does so it acquires a lock on the Key/Value store so only one node reads at the same time. This is for read consistency """ with self._queue_lock(queue, raising=Empty): key = f'{self._key_prefix(queue)}/msg/' logger.debug('Fetching key %s with index %s', key, self.index) self.index, data = self.client.kv.get( key=key, recurse=True, index=self.index, wait=self.timeout, ) try: if data is None: raise Empty() logger.debug('Removing key %s with modifyindex %s', data[0]['Key'], data[0]['ModifyIndex']) self.client.kv.delete(key=data[0]['Key'], cas=data[0]['ModifyIndex']) return loads(data[0]['Value']) except TypeError: pass raise Empty() def _purge(self, queue): self._destroy_session(queue) return self.client.kv.delete( key=f'{self._key_prefix(queue)}/msg/', recurse=True, ) def _size(self, queue): size = 0 try: key = f'{self._key_prefix(queue)}/msg/' logger.debug('Fetching key recursively %s with index %s', key, self.index) self.index, data = self.client.kv.get( key=key, recurse=True, index=self.index, wait=self.timeout, ) size = len(data) except TypeError: pass logger.debug('Found %s keys under %s with index %s', size, key, self.index) return size @cached_property def lock_name(self): return f'{socket.gethostname()}' class Transport(virtual.Transport): """Consul K/V storage Transport for Kombu.""" Channel = Channel default_port = DEFAULT_PORT driver_type = 'consul' driver_name = 'consul' if consul: connection_errors = ( virtual.Transport.connection_errors + ( consul.ConsulException, consul.base.ConsulException ) ) channel_errors = ( virtual.Transport.channel_errors + ( consul.ConsulException, consul.base.ConsulException ) ) def __init__(self, *args, **kwargs): if consul is None: raise ImportError('Missing python-consul library') super().__init__(*args, **kwargs) def verify_connection(self, connection): port = connection.client.port or self.default_port host = connection.client.hostname or DEFAULT_HOST logger.debug('Verify Consul connection to %s:%s', host, port) try: client = consul.Consul(host=host, port=int(port)) client.agent.self() return True except ValueError: pass return False def driver_version(self): return consul.__version__ kombu-5.5.3/kombu/transport/etcd.py000066400000000000000000000207041477772317200173270ustar00rootroot00000000000000"""Etcd Transport module for Kombu. It uses Etcd as a store to transport messages in Queues It uses python-etcd for talking to Etcd's HTTP API Features ======== * Type: Virtual * Supports Direct: *Unreviewed* * Supports Topic: *Unreviewed* * Supports Fanout: *Unreviewed* * Supports Priority: *Unreviewed* * Supports TTL: *Unreviewed* Connection String ================= Connection string has the following format: .. code-block:: 'etcd'://SERVER:PORT """ from __future__ import annotations import os import socket from collections import defaultdict from contextlib import contextmanager from queue import Empty from kombu.exceptions import ChannelError from kombu.log import get_logger from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property from . import virtual try: import etcd except ImportError: etcd = None logger = get_logger('kombu.transport.etcd') DEFAULT_PORT = 2379 DEFAULT_HOST = 'localhost' class Channel(virtual.Channel): """Etcd Channel class which talks to the Etcd.""" prefix = 'kombu' index = None timeout = 10 session_ttl = 30 lock_ttl = 10 def __init__(self, *args, **kwargs): if etcd is None: raise ImportError('Missing python-etcd library') super().__init__(*args, **kwargs) port = self.connection.client.port or self.connection.default_port host = self.connection.client.hostname or DEFAULT_HOST logger.debug('Host: %s Port: %s Timeout: %s', host, port, self.timeout) self.queues = defaultdict(dict) self.client = etcd.Client(host=host, port=int(port)) def _key_prefix(self, queue): """Create and return the `queue` with the proper prefix. Arguments: --------- queue (str): The name of the queue. """ return f'{self.prefix}/{queue}' @contextmanager def _queue_lock(self, queue): """Try to acquire a lock on the Queue. It does so by creating a object called 'lock' which is locked by the current session.. This way other nodes are not able to write to the lock object which means that they have to wait before the lock is released. Arguments: --------- queue (str): The name of the queue. """ lock = etcd.Lock(self.client, queue) lock._uuid = self.lock_value logger.debug(f'Acquiring lock {lock.name}') lock.acquire(blocking=True, lock_ttl=self.lock_ttl) try: yield finally: logger.debug(f'Releasing lock {lock.name}') lock.release() def _new_queue(self, queue, **_): """Create a new `queue` if the `queue` doesn't already exist. Arguments: --------- queue (str): The name of the queue. """ self.queues[queue] = queue with self._queue_lock(queue): try: return self.client.write( key=self._key_prefix(queue), dir=True, value=None) except etcd.EtcdNotFile: logger.debug(f'Queue "{queue}" already exists') return self.client.read(key=self._key_prefix(queue)) def _has_queue(self, queue, **kwargs): """Verify that queue exists. Returns ------- bool: Should return :const:`True` if the queue exists or :const:`False` otherwise. """ try: self.client.read(self._key_prefix(queue)) return True except etcd.EtcdKeyNotFound: return False def _delete(self, queue, *args, **_): """Delete a `queue`. Arguments: --------- queue (str): The name of the queue. """ self.queues.pop(queue, None) self._purge(queue) def _put(self, queue, payload, **_): """Put `message` onto `queue`. This simply writes a key to the Etcd store Arguments: --------- queue (str): The name of the queue. payload (dict): Message data which will be dumped to etcd. """ with self._queue_lock(queue): key = self._key_prefix(queue) if not self.client.write( key=key, value=dumps(payload), append=True): raise ChannelError(f'Cannot add key {key!r} to etcd') def _get(self, queue, timeout=None): """Get the first available message from the queue. Before it does so it acquires a lock on the store so only one node reads at the same time. This is for read consistency Arguments: --------- queue (str): The name of the queue. timeout (int): Optional seconds to wait for a response. """ with self._queue_lock(queue): key = self._key_prefix(queue) logger.debug('Fetching key %s with index %s', key, self.index) try: result = self.client.read( key=key, recursive=True, index=self.index, timeout=self.timeout) if result is None: raise Empty() item = result._children[-1] logger.debug('Removing key {}'.format(item['key'])) msg_content = loads(item['value']) self.client.delete(key=item['key']) return msg_content except (TypeError, IndexError, etcd.EtcdException) as error: logger.debug(f'_get failed: {type(error)}:{error}') raise Empty() def _purge(self, queue): """Remove all `message`s from a `queue`. Arguments: --------- queue (str): The name of the queue. """ with self._queue_lock(queue): key = self._key_prefix(queue) logger.debug(f'Purging queue at key {key}') return self.client.delete(key=key, recursive=True) def _size(self, queue): """Return the size of the `queue`. Arguments: --------- queue (str): The name of the queue. """ with self._queue_lock(queue): size = 0 try: key = self._key_prefix(queue) logger.debug('Fetching key recursively %s with index %s', key, self.index) result = self.client.read( key=key, recursive=True, index=self.index) size = len(result._children) except TypeError: pass logger.debug('Found %s keys under %s with index %s', size, key, self.index) return size @cached_property def lock_value(self): return f'{socket.gethostname()}.{os.getpid()}' class Transport(virtual.Transport): """Etcd storage Transport for Kombu.""" Channel = Channel default_port = DEFAULT_PORT driver_type = 'etcd' driver_name = 'python-etcd' polling_interval = 3 implements = virtual.Transport.implements.extend( exchange_type=frozenset(['direct'])) if etcd: connection_errors = ( virtual.Transport.connection_errors + (etcd.EtcdException, ) ) channel_errors = ( virtual.Transport.channel_errors + (etcd.EtcdException, ) ) def __init__(self, *args, **kwargs): """Create a new instance of etcd.Transport.""" if etcd is None: raise ImportError('Missing python-etcd library') super().__init__(*args, **kwargs) def verify_connection(self, connection): """Verify the connection works.""" port = connection.client.port or self.default_port host = connection.client.hostname or DEFAULT_HOST logger.debug('Verify Etcd connection to %s:%s', host, port) try: etcd.Client(host=host, port=int(port)) return True except ValueError: pass return False def driver_version(self): """Return the version of the etcd library. .. note:: python-etcd has no __version__. This is a workaround. """ try: import pip.commands.freeze for x in pip.commands.freeze.freeze(): if x.startswith('python-etcd'): return x.split('==')[1] except (ImportError, IndexError): logger.warning('Unable to find the python-etcd version.') return 'Unknown' kombu-5.5.3/kombu/transport/filesystem.py000066400000000000000000000242601477772317200205750ustar00rootroot00000000000000"""File-system Transport module for kombu. Transport using the file-system as the message store. Messages written to the queue are stored in `data_folder_in` directory and messages read from the queue are read from `data_folder_out` directory. Both directories must be created manually. Simple example: * Producer: .. code-block:: python import kombu conn = kombu.Connection( 'filesystem://', transport_options={ 'data_folder_in': 'data_in', 'data_folder_out': 'data_out' } ) conn.connect() test_queue = kombu.Queue('test', routing_key='test') with conn as conn: with conn.default_channel as channel: producer = kombu.Producer(channel) producer.publish( {'hello': 'world'}, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle' ) * Consumer: .. code-block:: python import kombu conn = kombu.Connection( 'filesystem://', transport_options={ 'data_folder_in': 'data_out', 'data_folder_out': 'data_in' } ) conn.connect() def callback(body, message): print(body, message) message.ack() test_queue = kombu.Queue('test', routing_key='test') with conn as conn: with conn.default_channel as channel: consumer = kombu.Consumer( conn, [test_queue], accept=['pickle'] ) consumer.register_callback(callback) with consumer: conn.drain_events(timeout=1) Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: Yes * Supports Priority: No * Supports TTL: No Connection String ================= Connection string is in the following format: .. code-block:: filesystem:// Transport Options ================= * ``data_folder_in`` - directory where are messages stored when written to queue. * ``data_folder_out`` - directory from which are messages read when read from queue. * ``store_processed`` - if set to True, all processed messages are backed up to ``processed_folder``. * ``processed_folder`` - directory where are backed up processed files. * ``control_folder`` - directory where are exchange-queue table stored. """ from __future__ import annotations import os import shutil import tempfile import uuid from collections import namedtuple from pathlib import Path from queue import Empty from time import monotonic from kombu.exceptions import ChannelError from kombu.transport import virtual from kombu.utils.encoding import bytes_to_str, str_to_bytes from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property VERSION = (1, 0, 0) __version__ = '.'.join(map(str, VERSION)) # needs win32all to work on Windows if os.name == 'nt': import pywintypes import win32con import win32file LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK # 0 is the default LOCK_SH = 0 LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY __overlapped = pywintypes.OVERLAPPED() def lock(file, flags): """Create file lock.""" hfile = win32file._get_osfhandle(file.fileno()) win32file.LockFileEx(hfile, flags, 0, 0xffff0000, __overlapped) def unlock(file): """Remove file lock.""" hfile = win32file._get_osfhandle(file.fileno()) win32file.UnlockFileEx(hfile, 0, 0xffff0000, __overlapped) elif os.name == 'posix': import fcntl from fcntl import LOCK_EX, LOCK_SH def lock(file, flags): """Create file lock.""" fcntl.flock(file.fileno(), flags) def unlock(file): """Remove file lock.""" fcntl.flock(file.fileno(), fcntl.LOCK_UN) else: raise RuntimeError( 'Filesystem plugin only defined for NT and POSIX platforms') exchange_queue_t = namedtuple("exchange_queue_t", ["routing_key", "pattern", "queue"]) class Channel(virtual.Channel): """Filesystem Channel.""" supports_fanout = True def get_table(self, exchange): file = self.control_folder / f"{exchange}.exchange" try: f_obj = file.open("r") try: lock(f_obj, LOCK_SH) exchange_table = loads(bytes_to_str(f_obj.read())) return [exchange_queue_t(*q) for q in exchange_table] finally: unlock(f_obj) f_obj.close() except FileNotFoundError: return [] except OSError: raise ChannelError(f"Cannot open {file}") def _queue_bind(self, exchange, routing_key, pattern, queue): file = self.control_folder / f"{exchange}.exchange" self.control_folder.mkdir(exist_ok=True) queue_val = exchange_queue_t(routing_key or "", pattern or "", queue or "") try: if file.exists(): f_obj = file.open("rb+", buffering=0) lock(f_obj, LOCK_EX) exchange_table = loads(bytes_to_str(f_obj.read())) queues = [exchange_queue_t(*q) for q in exchange_table] if queue_val not in queues: queues.insert(0, queue_val) f_obj.seek(0) f_obj.write(str_to_bytes(dumps(queues))) else: f_obj = file.open("wb", buffering=0) lock(f_obj, LOCK_EX) queues = [queue_val] f_obj.write(str_to_bytes(dumps(queues))) finally: unlock(f_obj) f_obj.close() def _put_fanout(self, exchange, payload, routing_key, **kwargs): for q in self.get_table(exchange): self._put(q.queue, payload, **kwargs) def _put(self, queue, payload, **kwargs): """Put `message` onto `queue`.""" filename = '{}_{}.{}.msg'.format(int(round(monotonic() * 1000)), uuid.uuid4(), queue) filename = os.path.join(self.data_folder_out, filename) try: f = open(filename, 'wb', buffering=0) lock(f, LOCK_EX) f.write(str_to_bytes(dumps(payload))) except OSError: raise ChannelError( f'Cannot add file {filename!r} to directory') finally: unlock(f) f.close() def _get(self, queue): """Get next message from `queue`.""" queue_find = '.' + queue + '.msg' folder = os.listdir(self.data_folder_in) folder = sorted(folder) while len(folder) > 0: filename = folder.pop(0) # only handle message for the requested queue if filename.find(queue_find) < 0: continue if self.store_processed: processed_folder = self.processed_folder else: processed_folder = tempfile.gettempdir() try: # move the file to the tmp/processed folder shutil.move(os.path.join(self.data_folder_in, filename), processed_folder) except OSError: # file could be locked, or removed in meantime so ignore continue filename = os.path.join(processed_folder, filename) try: f = open(filename, 'rb') payload = f.read() f.close() if not self.store_processed: os.remove(filename) except OSError: raise ChannelError( f'Cannot read file {filename!r} from queue.') return loads(bytes_to_str(payload)) raise Empty() def _purge(self, queue): """Remove all messages from `queue`.""" count = 0 queue_find = '.' + queue + '.msg' folder = os.listdir(self.data_folder_in) while len(folder) > 0: filename = folder.pop() try: # only purge messages for the requested queue if filename.find(queue_find) < 0: continue filename = os.path.join(self.data_folder_in, filename) os.remove(filename) count += 1 except OSError: # we simply ignore its existence, as it was probably # processed by another worker pass return count def _size(self, queue): """Return the number of messages in `queue` as an :class:`int`.""" count = 0 queue_find = f'.{queue}.msg' folder = os.listdir(self.data_folder_in) while len(folder) > 0: filename = folder.pop() # only handle message for the requested queue if filename.find(queue_find) < 0: continue count += 1 return count @property def transport_options(self): return self.connection.client.transport_options @cached_property def data_folder_in(self): return self.transport_options.get('data_folder_in', 'data_in') @cached_property def data_folder_out(self): return self.transport_options.get('data_folder_out', 'data_out') @cached_property def store_processed(self): return self.transport_options.get('store_processed', False) @cached_property def processed_folder(self): return self.transport_options.get('processed_folder', 'processed') @property def control_folder(self): return Path(self.transport_options.get('control_folder', 'control')) class Transport(virtual.Transport): """Filesystem Transport.""" implements = virtual.Transport.implements.extend( asynchronous=False, exchange_type=frozenset(['direct', 'topic', 'fanout']) ) Channel = Channel # filesystem backend state is global. global_state = virtual.BrokerState() default_port = 0 driver_type = 'filesystem' driver_name = 'filesystem' def __init__(self, client, **kwargs): super().__init__(client, **kwargs) self.state = self.global_state def driver_version(self): return 'N/A' kombu-5.5.3/kombu/transport/gcpubsub.py000066400000000000000000000656111477772317200202300ustar00rootroot00000000000000"""GCP Pub/Sub transport module for kombu. More information about GCP Pub/Sub: https://cloud.google.com/pubsub Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: No * Supports Fanout: Yes * Supports Priority: No * Supports TTL: No Connection String ================= Connection string has the following formats: .. code-block:: gcpubsub://projects/project-name Transport Options ================= * ``queue_name_prefix``: (str) Prefix for queue names. * ``ack_deadline_seconds``: (int) The maximum time after receiving a message and acknowledging it before pub/sub redelivers the message. * ``expiration_seconds``: (int) Subscriptions without any subscriber activity or changes made to their properties are removed after this period. Examples of subscriber activities include open connections, active pulls, or successful pushes. * ``wait_time_seconds``: (int) The maximum time to wait for new messages. Defaults to 10. * ``retry_timeout_seconds``: (int) The maximum time to wait before retrying. * ``bulk_max_messages``: (int) The maximum number of messages to pull in bulk. Defaults to 32. """ from __future__ import annotations import dataclasses import datetime import string import threading from concurrent.futures import (FIRST_COMPLETED, Future, ThreadPoolExecutor, wait) from contextlib import suppress from os import getpid from queue import Empty from threading import Lock from time import monotonic, sleep from uuid import NAMESPACE_OID, uuid3 from _socket import gethostname from _socket import timeout as socket_timeout from google.api_core.exceptions import (AlreadyExists, DeadlineExceeded, PermissionDenied) from google.api_core.retry import Retry from google.cloud import monitoring_v3 from google.cloud.monitoring_v3 import query from google.cloud.pubsub_v1 import PublisherClient, SubscriberClient from google.cloud.pubsub_v1 import exceptions as pubsub_exceptions from google.cloud.pubsub_v1.publisher import exceptions as publisher_exceptions from google.cloud.pubsub_v1.subscriber import \ exceptions as subscriber_exceptions from google.pubsub_v1 import gapic_version as package_version from kombu.entity import TRANSIENT_DELIVERY_MODE from kombu.log import get_logger from kombu.utils.encoding import bytes_to_str, safe_str from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property from . import virtual logger = get_logger('kombu.transport.gcpubsub') # dots are replaced by dash, all other punctuation replaced by underscore. PUNCTUATIONS_TO_REPLACE = set(string.punctuation) - {'_', '.', '-'} CHARS_REPLACE_TABLE = { ord('.'): ord('-'), **{ord(c): ord('_') for c in PUNCTUATIONS_TO_REPLACE}, } class UnackedIds: """Threadsafe list of ack_ids.""" def __init__(self): self._list = [] self._lock = Lock() def append(self, val): # append is atomic self._list.append(val) def extend(self, vals: list): # extend is atomic self._list.extend(vals) def pop(self, index=-1): with self._lock: return self._list.pop(index) def remove(self, val): with self._lock, suppress(ValueError): self._list.remove(val) def __len__(self): with self._lock: return len(self._list) def __getitem__(self, item): # getitem is atomic return self._list[item] class AtomicCounter: """Threadsafe counter. Returns the value after inc/dec operations. """ def __init__(self, initial=0): self._value = initial self._lock = Lock() def inc(self, n=1): with self._lock: self._value += n return self._value def dec(self, n=1): with self._lock: self._value -= n return self._value def get(self): with self._lock: return self._value @dataclasses.dataclass class QueueDescriptor: """Pub/Sub queue descriptor.""" name: str topic_path: str # projects/{project_id}/topics/{topic_id} subscription_id: str subscription_path: str # projects/{project_id}/subscriptions/{subscription_id} unacked_ids: UnackedIds = dataclasses.field(default_factory=UnackedIds) class Channel(virtual.Channel): """GCP Pub/Sub channel.""" supports_fanout = True do_restore = False # pub/sub does that for us default_wait_time_seconds = 10 default_ack_deadline_seconds = 240 default_expiration_seconds = 86400 default_retry_timeout_seconds = 300 default_bulk_max_messages = 32 _min_ack_deadline = 10 _fanout_exchanges = set() _unacked_extender: threading.Thread = None _stop_extender = threading.Event() _n_channels = AtomicCounter() _queue_cache: dict[str, QueueDescriptor] = {} _tmp_subscriptions: set[str] = set() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.pool = ThreadPoolExecutor() logger.info('new GCP pub/sub channel: %s', self.conninfo.hostname) self.project_id = Transport.parse_uri(self.conninfo.hostname) if self._n_channels.inc() == 1: Channel._unacked_extender = threading.Thread( target=self._extend_unacked_deadline, daemon=True, ) self._stop_extender.clear() Channel._unacked_extender.start() def entity_name(self, name: str, table=CHARS_REPLACE_TABLE) -> str: """Format AMQP queue name into a valid Pub/Sub queue name.""" if not name.startswith(self.queue_name_prefix): name = self.queue_name_prefix + name return str(safe_str(name)).translate(table) def _queue_bind(self, exchange, routing_key, pattern, queue): exchange_type = self.typeof(exchange).type queue = self.entity_name(queue) logger.debug( 'binding queue: %s to %s exchange: %s with routing_key: %s', queue, exchange_type, exchange, routing_key, ) filter_args = {} if exchange_type == 'direct': # Direct exchange is implemented as a single subscription # E.g. for exchange 'test_direct': # -topic:'test_direct' # -bound queue:'direct1': # -subscription: direct1' on topic 'test_direct' # -filter:routing_key' filter_args = { 'filter': f'attributes.routing_key="{routing_key}"' } subscription_path = self.subscriber.subscription_path( self.project_id, queue ) message_retention_duration = self.expiration_seconds elif exchange_type == 'fanout': # Fanout exchange is implemented as a separate subscription. # E.g. for exchange 'test_fanout': # -topic:'test_fanout' # -bound queue 'fanout1': # -subscription:'fanout1-uuid' on topic 'test_fanout' # -bound queue 'fanout2': # -subscription:'fanout2-uuid' on topic 'test_fanout' uid = f'{uuid3(NAMESPACE_OID, f"{gethostname()}.{getpid()}")}' uniq_sub_name = f'{queue}-{uid}' subscription_path = self.subscriber.subscription_path( self.project_id, uniq_sub_name ) self._tmp_subscriptions.add(subscription_path) self._fanout_exchanges.add(exchange) message_retention_duration = 600 else: raise NotImplementedError( f'exchange type {exchange_type} not implemented' ) exchange_topic = self._create_topic( self.project_id, exchange, message_retention_duration ) self._create_subscription( topic_path=exchange_topic, subscription_path=subscription_path, filter_args=filter_args, msg_retention=message_retention_duration, ) qdesc = QueueDescriptor( name=queue, topic_path=exchange_topic, subscription_id=queue, subscription_path=subscription_path, ) self._queue_cache[queue] = qdesc def _create_topic( self, project_id: str, topic_id: str, message_retention_duration: int = None, ) -> str: topic_path = self.publisher.topic_path(project_id, topic_id) if self._is_topic_exists(topic_path): # topic creation takes a while, so skip if possible logger.debug('topic: %s exists', topic_path) return topic_path try: logger.debug('creating topic: %s', topic_path) request = {'name': topic_path} if message_retention_duration: request[ 'message_retention_duration' ] = f'{message_retention_duration}s' self.publisher.create_topic(request=request) except AlreadyExists: pass return topic_path def _is_topic_exists(self, topic_path: str) -> bool: topics = self.publisher.list_topics( request={"project": f'projects/{self.project_id}'} ) for t in topics: if t.name == topic_path: return True return False def _create_subscription( self, project_id: str = None, topic_id: str = None, topic_path: str = None, subscription_path: str = None, filter_args=None, msg_retention: int = None, ) -> str: subscription_path = ( subscription_path or self.subscriber.subscription_path(self.project_id, topic_id) ) topic_path = topic_path or self.publisher.topic_path( project_id, topic_id ) try: logger.debug( 'creating subscription: %s, topic: %s, filter: %s', subscription_path, topic_path, filter_args, ) msg_retention = msg_retention or self.expiration_seconds self.subscriber.create_subscription( request={ "name": subscription_path, "topic": topic_path, 'ack_deadline_seconds': self.ack_deadline_seconds, 'expiration_policy': { 'ttl': f'{self.expiration_seconds}s' }, 'message_retention_duration': f'{msg_retention}s', **(filter_args or {}), } ) except AlreadyExists: pass return subscription_path def _delete(self, queue, *args, **kwargs): """Delete a queue by name.""" queue = self.entity_name(queue) logger.info('deleting queue: %s', queue) qdesc = self._queue_cache.get(queue) if not qdesc: return self.subscriber.delete_subscription( request={"subscription": qdesc.subscription_path} ) self._queue_cache.pop(queue, None) def _put(self, queue, message, **kwargs): """Put a message onto the queue.""" queue = self.entity_name(queue) qdesc = self._queue_cache[queue] routing_key = self._get_routing_key(message) logger.debug( 'putting message to queue: %s, topic: %s, routing_key: %s', queue, qdesc.topic_path, routing_key, ) encoded_message = dumps(message) self.publisher.publish( qdesc.topic_path, encoded_message.encode("utf-8"), routing_key=routing_key, ) def _put_fanout(self, exchange, message, routing_key, **kwargs): """Put a message onto fanout exchange.""" self._lookup(exchange, routing_key) topic_path = self.publisher.topic_path(self.project_id, exchange) logger.debug( 'putting msg to fanout exchange: %s, topic: %s', exchange, topic_path, ) encoded_message = dumps(message) self.publisher.publish( topic_path, encoded_message.encode("utf-8"), retry=Retry(deadline=self.retry_timeout_seconds), ) def _get(self, queue: str, timeout: float = None): """Retrieves a single message from a queue.""" queue = self.entity_name(queue) qdesc = self._queue_cache[queue] try: response = self.subscriber.pull( request={ 'subscription': qdesc.subscription_path, 'max_messages': 1, }, retry=Retry(deadline=self.retry_timeout_seconds), timeout=timeout or self.wait_time_seconds, ) except DeadlineExceeded: raise Empty() if len(response.received_messages) == 0: raise Empty() message = response.received_messages[0] ack_id = message.ack_id payload = loads(message.message.data) delivery_info = payload['properties']['delivery_info'] logger.debug( 'queue:%s got message, ack_id: %s, payload: %s', queue, ack_id, payload['properties'], ) if self._is_auto_ack(payload['properties']): logger.debug('auto acking message ack_id: %s', ack_id) self._do_ack([ack_id], qdesc.subscription_path) else: delivery_info['gcpubsub_message'] = { 'queue': queue, 'ack_id': ack_id, 'message_id': message.message.message_id, 'subscription_path': qdesc.subscription_path, } qdesc.unacked_ids.append(ack_id) return payload def _is_auto_ack(self, payload_properties: dict): exchange = payload_properties['delivery_info']['exchange'] delivery_mode = payload_properties['delivery_mode'] return ( delivery_mode == TRANSIENT_DELIVERY_MODE or exchange in self._fanout_exchanges ) def _get_bulk(self, queue: str, timeout: float): """Retrieves bulk of messages from a queue.""" prefixed_queue = self.entity_name(queue) qdesc = self._queue_cache[prefixed_queue] max_messages = self._get_max_messages_estimate() if not max_messages: raise Empty() try: response = self.subscriber.pull( request={ 'subscription': qdesc.subscription_path, 'max_messages': max_messages, }, retry=Retry(deadline=self.retry_timeout_seconds), timeout=timeout or self.wait_time_seconds, ) except DeadlineExceeded: raise Empty() received_messages = response.received_messages if len(received_messages) == 0: raise Empty() auto_ack_ids = [] ret_payloads = [] logger.debug( 'batching %d messages from queue: %s', len(received_messages), prefixed_queue, ) for message in received_messages: ack_id = message.ack_id payload = loads(bytes_to_str(message.message.data)) delivery_info = payload['properties']['delivery_info'] delivery_info['gcpubsub_message'] = { 'queue': prefixed_queue, 'ack_id': ack_id, 'message_id': message.message.message_id, 'subscription_path': qdesc.subscription_path, } if self._is_auto_ack(payload['properties']): auto_ack_ids.append(ack_id) else: qdesc.unacked_ids.append(ack_id) ret_payloads.append(payload) if auto_ack_ids: logger.debug('auto acking ack_ids: %s', auto_ack_ids) self._do_ack(auto_ack_ids, qdesc.subscription_path) return queue, ret_payloads def _get_max_messages_estimate(self) -> int: max_allowed = self.qos.can_consume_max_estimate() max_if_unlimited = self.bulk_max_messages return max_if_unlimited if max_allowed is None else max_allowed def _lookup(self, exchange, routing_key, default=None): exchange_info = self.state.exchanges.get(exchange, {}) if not exchange_info: return super()._lookup(exchange, routing_key, default) ret = self.typeof(exchange).lookup( self.get_table(exchange), exchange, routing_key, default, ) if ret: return ret logger.debug( 'no queues bound to exchange: %s, binding on the fly', exchange, ) self.queue_bind(exchange, exchange, routing_key) return [exchange] def _size(self, queue: str) -> int: """Return the number of messages in a queue. This is a *rough* estimation, as Pub/Sub doesn't provide an exact API. """ queue = self.entity_name(queue) if queue not in self._queue_cache: return 0 qdesc = self._queue_cache[queue] result = query.Query( self.monitor, self.project_id, 'pubsub.googleapis.com/subscription/num_undelivered_messages', end_time=datetime.datetime.now(), minutes=1, ).select_resources(subscription_id=qdesc.subscription_id) # monitoring API requires the caller to have the monitoring.viewer # role. Since we can live without the exact number of messages # in the queue, we can ignore the exception and allow users to # use the transport without this role. with suppress(PermissionDenied): return sum( content.points[0].value.int64_value for content in result ) return -1 def basic_ack(self, delivery_tag, multiple=False): """Acknowledge one message.""" if multiple: raise NotImplementedError('multiple acks not implemented') delivery_info = self.qos.get(delivery_tag).delivery_info pubsub_message = delivery_info['gcpubsub_message'] ack_id = pubsub_message['ack_id'] queue = pubsub_message['queue'] logger.debug('ack message. queue: %s ack_id: %s', queue, ack_id) subscription_path = pubsub_message['subscription_path'] self._do_ack([ack_id], subscription_path) qdesc = self._queue_cache[queue] qdesc.unacked_ids.remove(ack_id) super().basic_ack(delivery_tag) def _do_ack(self, ack_ids: list[str], subscription_path: str): self.subscriber.acknowledge( request={"subscription": subscription_path, "ack_ids": ack_ids}, retry=Retry(deadline=self.retry_timeout_seconds), ) def _purge(self, queue: str): """Delete all current messages in a queue.""" queue = self.entity_name(queue) qdesc = self._queue_cache.get(queue) if not qdesc: return n = self._size(queue) self.subscriber.seek( request={ "subscription": qdesc.subscription_path, "time": datetime.datetime.now(), } ) return n def _extend_unacked_deadline(self): thread_id = threading.get_native_id() logger.info( 'unacked deadline extension thread: [%s] started', thread_id, ) min_deadline_sleep = self._min_ack_deadline / 2 sleep_time = max(min_deadline_sleep, self.ack_deadline_seconds / 4) while not self._stop_extender.wait(sleep_time): for qdesc in self._queue_cache.values(): if len(qdesc.unacked_ids) == 0: logger.debug( 'thread [%s]: no unacked messages for %s', thread_id, qdesc.subscription_path, ) continue logger.debug( 'thread [%s]: extend ack deadline for %s: %d msgs [%s]', thread_id, qdesc.subscription_path, len(qdesc.unacked_ids), list(qdesc.unacked_ids), ) self.subscriber.modify_ack_deadline( request={ "subscription": qdesc.subscription_path, "ack_ids": list(qdesc.unacked_ids), "ack_deadline_seconds": self.ack_deadline_seconds, } ) logger.info( 'unacked deadline extension thread [%s] stopped', thread_id ) def after_reply_message_received(self, queue: str): queue = self.entity_name(queue) sub = self.subscriber.subscription_path(self.project_id, queue) logger.debug( 'after_reply_message_received: queue: %s, sub: %s', queue, sub ) self._tmp_subscriptions.add(sub) @cached_property def subscriber(self): return SubscriberClient() @cached_property def publisher(self): return PublisherClient() @cached_property def monitor(self): return monitoring_v3.MetricServiceClient() @property def conninfo(self): return self.connection.client @property def transport_options(self): return self.connection.client.transport_options @cached_property def wait_time_seconds(self): return self.transport_options.get( 'wait_time_seconds', self.default_wait_time_seconds ) @cached_property def retry_timeout_seconds(self): return self.transport_options.get( 'retry_timeout_seconds', self.default_retry_timeout_seconds ) @cached_property def ack_deadline_seconds(self): return self.transport_options.get( 'ack_deadline_seconds', self.default_ack_deadline_seconds ) @cached_property def queue_name_prefix(self): return self.transport_options.get('queue_name_prefix', 'kombu-') @cached_property def expiration_seconds(self): return self.transport_options.get( 'expiration_seconds', self.default_expiration_seconds ) @cached_property def bulk_max_messages(self): return self.transport_options.get( 'bulk_max_messages', self.default_bulk_max_messages ) def close(self): """Close the channel.""" logger.debug('closing channel') while self._tmp_subscriptions: sub = self._tmp_subscriptions.pop() with suppress(Exception): logger.debug('deleting subscription: %s', sub) self.subscriber.delete_subscription( request={"subscription": sub} ) if not self._n_channels.dec(): self._stop_extender.set() Channel._unacked_extender.join() super().close() @staticmethod def _get_routing_key(message): routing_key = ( message['properties'] .get('delivery_info', {}) .get('routing_key', '') ) return routing_key class Transport(virtual.Transport): """GCP Pub/Sub transport.""" Channel = Channel can_parse_url = True polling_interval = 0.1 connection_errors = virtual.Transport.connection_errors + ( pubsub_exceptions.TimeoutError, ) channel_errors = ( virtual.Transport.channel_errors + ( publisher_exceptions.FlowControlLimitError, publisher_exceptions.MessageTooLargeError, publisher_exceptions.PublishError, publisher_exceptions.TimeoutError, publisher_exceptions.PublishToPausedOrderingKeyException, ) + (subscriber_exceptions.AcknowledgeError,) ) driver_type = 'gcpubsub' driver_name = 'pubsub_v1' implements = virtual.Transport.implements.extend( exchange_type=frozenset(['direct', 'fanout']), ) def __init__(self, client, **kwargs): super().__init__(client, **kwargs) self._pool = ThreadPoolExecutor() self._get_bulk_future_to_queue: dict[Future, str] = dict() def driver_version(self): return package_version.__version__ @staticmethod def parse_uri(uri: str) -> str: # URL like: # gcpubsub://projects/project-name project = uri.split('gcpubsub://projects/')[1] return project.strip('/') @classmethod def as_uri(self, uri: str, include_password=False, mask='**') -> str: return uri or 'gcpubsub://' def drain_events(self, connection, timeout=None): time_start = monotonic() polling_interval = self.polling_interval if timeout and polling_interval and polling_interval > timeout: polling_interval = timeout while 1: try: self._drain_from_active_queues(timeout=timeout) except Empty: if timeout and monotonic() - time_start >= timeout: raise socket_timeout() if polling_interval: sleep(polling_interval) else: break def _drain_from_active_queues(self, timeout): # cleanup empty requests from prev run self._rm_empty_bulk_requests() # submit new requests for all active queues # longer timeout means less frequent polling # and more messages in a single bulk self._submit_get_bulk_requests(timeout=10) done, _ = wait( self._get_bulk_future_to_queue, timeout=timeout, return_when=FIRST_COMPLETED, ) empty = {f for f in done if f.exception()} done -= empty for f in empty: self._get_bulk_future_to_queue.pop(f, None) if not done: raise Empty() logger.debug('got %d done get_bulk tasks', len(done)) for f in done: queue, payloads = f.result() for payload in payloads: logger.debug('consuming message from queue: %s', queue) if queue not in self._callbacks: logger.warning( 'Message for queue %s without consumers', queue ) continue self._deliver(payload, queue) self._get_bulk_future_to_queue.pop(f, None) def _rm_empty_bulk_requests(self): empty = { f for f in self._get_bulk_future_to_queue if f.done() and f.exception() } for f in empty: self._get_bulk_future_to_queue.pop(f, None) def _submit_get_bulk_requests(self, timeout): queues_with_submitted_get_bulk = set( self._get_bulk_future_to_queue.values() ) for channel in self.channels: for queue in channel._active_queues: if queue in queues_with_submitted_get_bulk: continue future = self._pool.submit(channel._get_bulk, queue, timeout) self._get_bulk_future_to_queue[future] = queue kombu-5.5.3/kombu/transport/librabbitmq.py000066400000000000000000000136061477772317200207030ustar00rootroot00000000000000"""`librabbitmq`_ transport. .. _`librabbitmq`: https://pypi.org/project/librabbitmq/ """ from __future__ import annotations import os import socket import warnings import librabbitmq as amqp from librabbitmq import ChannelError, ConnectionError from kombu.utils.amq_manager import get_manager from kombu.utils.text import version_string_as_tuple from . import base from .base import to_rabbitmq_queue_arguments W_VERSION = """ librabbitmq version too old to detect RabbitMQ version information so make sure you are using librabbitmq 1.5 when using rabbitmq > 3.3 """ DEFAULT_PORT = 5672 DEFAULT_SSL_PORT = 5671 NO_SSL_ERROR = """\ ssl not supported by librabbitmq, please use pyamqp:// or stunnel\ """ class Message(base.Message): """AMQP Message (librabbitmq).""" def __init__(self, channel, props, info, body): super().__init__( channel=channel, body=body, delivery_info=info, properties=props, delivery_tag=info.get('delivery_tag'), content_type=props.get('content_type'), content_encoding=props.get('content_encoding'), headers=props.get('headers')) class Channel(amqp.Channel, base.StdChannel): """AMQP Channel (librabbitmq).""" Message = Message def prepare_message(self, body, priority=None, content_type=None, content_encoding=None, headers=None, properties=None): """Encapsulate data into a AMQP message.""" properties = properties if properties is not None else {} properties.update({'content_type': content_type, 'content_encoding': content_encoding, 'headers': headers}) # Don't include priority if it's not an integer. # If that's the case librabbitmq will fail # and raise an exception. if priority is not None: properties['priority'] = priority return body, properties def prepare_queue_arguments(self, arguments, **kwargs): arguments = to_rabbitmq_queue_arguments(arguments, **kwargs) return {k.encode('utf8'): v for k, v in arguments.items()} class Connection(amqp.Connection): """AMQP Connection (librabbitmq).""" Channel = Channel Message = Message class Transport(base.Transport): """AMQP Transport (librabbitmq).""" Connection = Connection default_port = DEFAULT_PORT default_ssl_port = DEFAULT_SSL_PORT connection_errors = ( base.Transport.connection_errors + ( ConnectionError, socket.error, IOError, OSError) ) channel_errors = ( base.Transport.channel_errors + (ChannelError,) ) driver_type = 'amqp' driver_name = 'librabbitmq' implements = base.Transport.implements.extend( asynchronous=True, heartbeats=False, ) def __init__(self, client, **kwargs): self.client = client self.default_port = kwargs.get('default_port') or self.default_port self.default_ssl_port = (kwargs.get('default_ssl_port') or self.default_ssl_port) self.__reader = None def driver_version(self): return amqp.__version__ def create_channel(self, connection): return connection.channel() def drain_events(self, connection, **kwargs): return connection.drain_events(**kwargs) def establish_connection(self): """Establish connection to the AMQP broker.""" conninfo = self.client for name, default_value in self.default_connection_params.items(): if not getattr(conninfo, name, None): setattr(conninfo, name, default_value) if conninfo.ssl: raise NotImplementedError(NO_SSL_ERROR) opts = dict({ 'host': conninfo.host, 'userid': conninfo.userid, 'password': conninfo.password, 'virtual_host': conninfo.virtual_host, 'login_method': conninfo.login_method, 'insist': conninfo.insist, 'ssl': conninfo.ssl, 'connect_timeout': conninfo.connect_timeout, }, **conninfo.transport_options or {}) conn = self.Connection(**opts) conn.client = self.client self.client.drain_events = conn.drain_events return conn def close_connection(self, connection): """Close the AMQP broker connection.""" self.client.drain_events = None connection.close() def _collect(self, connection): if connection is not None: for channel in connection.channels.values(): channel.connection = None try: os.close(connection.fileno()) except (OSError, ValueError): pass connection.channels.clear() connection.callbacks.clear() self.client.drain_events = None self.client = None def verify_connection(self, connection): return connection.connected def register_with_event_loop(self, connection, loop): loop.add_reader( connection.fileno(), self.on_readable, connection, loop, ) def get_manager(self, *args, **kwargs): return get_manager(self.client, *args, **kwargs) def qos_semantics_matches_spec(self, connection): try: props = connection.server_properties except AttributeError: warnings.warn(UserWarning(W_VERSION)) else: if props.get('product') == 'RabbitMQ': return version_string_as_tuple(props['version']) < (3, 3) return True @property def default_connection_params(self): return { 'userid': 'guest', 'password': 'guest', 'port': (self.default_ssl_port if self.client.ssl else self.default_port), 'hostname': 'localhost', 'login_method': 'PLAIN', } kombu-5.5.3/kombu/transport/memory.py000066400000000000000000000045441477772317200177240ustar00rootroot00000000000000"""In-memory transport module for Kombu. Simple transport using memory for storing messages. Messages can be passed only between threads. Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: No * Supports Priority: No * Supports TTL: Yes Connection String ================= Connection string is in the following format: .. code-block:: memory:// """ from __future__ import annotations from collections import defaultdict from queue import Queue from . import base, virtual class Channel(virtual.Channel): """In-memory Channel.""" events = defaultdict(set) queues = {} do_restore = False supports_fanout = True def _has_queue(self, queue, **kwargs): return queue in self.queues def _new_queue(self, queue, **kwargs): if queue not in self.queues: self.queues[queue] = Queue() def _get(self, queue, timeout=None): return self._queue_for(queue).get(block=False) def _queue_for(self, queue): if queue not in self.queues: self.queues[queue] = Queue() return self.queues[queue] def _queue_bind(self, *args): pass def _put_fanout(self, exchange, message, routing_key=None, **kwargs): for queue in self._lookup(exchange, routing_key): self._queue_for(queue).put(message) def _put(self, queue, message, **kwargs): self._queue_for(queue).put(message) def _size(self, queue): return self._queue_for(queue).qsize() def _delete(self, queue, *args, **kwargs): self.queues.pop(queue, None) def _purge(self, queue): q = self._queue_for(queue) size = q.qsize() q.queue.clear() return size def close(self): super().close() for queue in self.queues.values(): queue.empty() self.queues = {} def after_reply_message_received(self, queue): pass class Transport(virtual.Transport): """In-memory Transport.""" Channel = Channel #: memory backend state is global. global_state = virtual.BrokerState() implements = base.Transport.implements driver_type = 'memory' driver_name = 'memory' def __init__(self, client, **kwargs): super().__init__(client, **kwargs) self.state = self.global_state def driver_version(self): return 'N/A' kombu-5.5.3/kombu/transport/mongodb.py000066400000000000000000000367061477772317200200460ustar00rootroot00000000000000# copyright: (c) 2010 - 2013 by Flavio Percoco Premoli. # license: BSD, see LICENSE for more details. """MongoDB transport module for kombu. Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: Yes * Supports Priority: Yes * Supports TTL: Yes Connection String ================= *Unreviewed* Transport Options ================= * ``connect_timeout``, * ``ssl``, * ``ttl``, * ``capped_queue_size``, * ``default_hostname``, * ``default_port``, * ``default_database``, * ``messages_collection``, * ``routing_collection``, * ``broadcast_collection``, * ``queues_collection``, * ``calc_queue_size``, """ from __future__ import annotations import datetime from queue import Empty import pymongo from pymongo import MongoClient, errors, uri_parser from pymongo.cursor import CursorType from kombu.exceptions import VersionMismatch from kombu.utils.compat import _detect_environment from kombu.utils.encoding import bytes_to_str from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property from kombu.utils.url import maybe_sanitize_url from . import virtual from .base import to_rabbitmq_queue_arguments E_SERVER_VERSION = """\ Kombu requires MongoDB version 1.3+ (server is {0})\ """ E_NO_TTL_INDEXES = """\ Kombu requires MongoDB version 2.2+ (server is {0}) for TTL indexes support\ """ class BroadcastCursor: """Cursor for broadcast queues.""" def __init__(self, cursor): self._cursor = cursor self._offset = 0 self.purge(rewind=False) def get_size(self): return self._cursor.collection.count_documents({}) - self._offset def close(self): self._cursor.close() def purge(self, rewind=True): if rewind: self._cursor.rewind() # Fast-forward the cursor past old events self._offset = self._cursor.collection.count_documents({}) self._cursor = self._cursor.skip(self._offset) def __iter__(self): return self def __next__(self): while True: try: msg = next(self._cursor) except pymongo.errors.OperationFailure as exc: # In some cases tailed cursor can become invalid # and have to be reinitalized if 'not valid at server' in str(exc): self.purge() continue raise else: break self._offset += 1 return msg next = __next__ class Channel(virtual.Channel): """MongoDB Channel.""" supports_fanout = True # Mutable container. Shared by all class instances _fanout_queues = {} # Options ssl = False ttl = False connect_timeout = None capped_queue_size = 100000 calc_queue_size = True default_hostname = '127.0.0.1' default_port = 27017 default_database = 'kombu_default' messages_collection = 'messages' routing_collection = 'messages.routing' broadcast_collection = 'messages.broadcast' queues_collection = 'messages.queues' from_transport_options = (virtual.Channel.from_transport_options + ( 'connect_timeout', 'ssl', 'ttl', 'capped_queue_size', 'default_hostname', 'default_port', 'default_database', 'messages_collection', 'routing_collection', 'broadcast_collection', 'queues_collection', 'calc_queue_size', )) def __init__(self, *vargs, **kwargs): super().__init__(*vargs, **kwargs) self._broadcast_cursors = {} # Evaluate connection self.client # AbstractChannel/Channel interface implementation def _new_queue(self, queue, **kwargs): if self.ttl: self.queues.update_one( {'_id': queue}, { '$set': { '_id': queue, 'options': kwargs, 'expire_at': self._get_queue_expire( kwargs, 'x-expires' ), }, }, upsert=True) def _get(self, queue): if queue in self._fanout_queues: try: msg = next(self._get_broadcast_cursor(queue)) except StopIteration: msg = None else: msg = self.messages.find_one_and_delete( {'queue': queue}, sort=[('priority', pymongo.ASCENDING)], ) if self.ttl: self._update_queues_expire(queue) if msg is None: raise Empty() return loads(bytes_to_str(msg['payload'])) def _size(self, queue): # Do not calculate actual queue size if requested # for performance considerations if not self.calc_queue_size: return super()._size(queue) if queue in self._fanout_queues: return self._get_broadcast_cursor(queue).get_size() return self.messages.count_documents({'queue': queue}) def _put(self, queue, message, **kwargs): data = { 'payload': dumps(message), 'queue': queue, 'priority': self._get_message_priority(message, reverse=True) } if self.ttl: data['expire_at'] = self._get_queue_expire(queue, 'x-message-ttl') msg_expire = self._get_message_expire(message) if msg_expire is not None and ( data['expire_at'] is None or msg_expire < data['expire_at'] ): data['expire_at'] = msg_expire self.messages.insert_one(data) def _put_fanout(self, exchange, message, routing_key, **kwargs): self.broadcast.insert_one({'payload': dumps(message), 'queue': exchange}) def _purge(self, queue): size = self._size(queue) if queue in self._fanout_queues: self._get_broadcast_cursor(queue).purge() else: self.messages.delete_many({'queue': queue}) return size def get_table(self, exchange): localRoutes = frozenset(self.state.exchanges[exchange]['table']) brokerRoutes = self.routing.find( {'exchange': exchange} ) return localRoutes | frozenset( (r['routing_key'], r['pattern'], r['queue']) for r in brokerRoutes ) def _queue_bind(self, exchange, routing_key, pattern, queue): if self.typeof(exchange).type == 'fanout': self._create_broadcast_cursor( exchange, routing_key, pattern, queue) self._fanout_queues[queue] = exchange lookup = { 'exchange': exchange, 'queue': queue, 'routing_key': routing_key, 'pattern': pattern, } data = lookup.copy() if self.ttl: data['expire_at'] = self._get_queue_expire(queue, 'x-expires') self.routing.update_one(lookup, {'$set': data}, upsert=True) def queue_delete(self, queue, **kwargs): self.routing.delete_many({'queue': queue}) if self.ttl: self.queues.delete_one({'_id': queue}) super().queue_delete(queue, **kwargs) if queue in self._fanout_queues: try: cursor = self._broadcast_cursors.pop(queue) except KeyError: pass else: cursor.close() self._fanout_queues.pop(queue) # Implementation details def _parse_uri(self, scheme='mongodb://'): # See mongodb uri documentation: # https://docs.mongodb.org/manual/reference/connection-string/ client = self.connection.client hostname = client.hostname if hostname.startswith('srv://'): scheme = 'mongodb+srv://' hostname = 'mongodb+' + hostname if not hostname.startswith(scheme): hostname = scheme + hostname if not hostname[len(scheme):]: hostname += self.default_hostname if client.userid and '@' not in hostname: head, tail = hostname.split('://') credentials = client.userid if client.password: credentials += ':' + client.password hostname = head + '://' + credentials + '@' + tail port = client.port if client.port else self.default_port # We disable validating and normalization parameters here, # because pymongo will validate and normalize parameters later in __init__ of MongoClient parsed = uri_parser.parse_uri(hostname, port, validate=False) dbname = parsed['database'] or client.virtual_host if dbname in ('/', None): dbname = self.default_database options = { 'auto_start_request': True, 'ssl': self.ssl, 'connectTimeoutMS': (int(self.connect_timeout * 1000) if self.connect_timeout else None), } options.update(parsed['options']) options = self._prepare_client_options(options) if 'tls' in options: options.pop('ssl') return hostname, dbname, options def _prepare_client_options(self, options): if pymongo.version_tuple >= (3,): options.pop('auto_start_request', None) if isinstance(options.get('readpreference'), int): modes = pymongo.read_preferences._MONGOS_MODES options['readpreference'] = modes[options['readpreference']] return options def prepare_queue_arguments(self, arguments, **kwargs): return to_rabbitmq_queue_arguments(arguments, **kwargs) def _open(self, scheme='mongodb://'): hostname, dbname, conf = self._parse_uri(scheme=scheme) conf['host'] = hostname env = _detect_environment() if env == 'gevent': from gevent import monkey monkey.patch_all() elif env == 'eventlet': from eventlet import monkey_patch monkey_patch() mongoconn = MongoClient(**conf) database = mongoconn[dbname] version_str = mongoconn.server_info()['version'] version_str = version_str.split('-')[0] version = tuple(map(int, version_str.split('.'))) if version < (1, 3): raise VersionMismatch(E_SERVER_VERSION.format(version_str)) elif self.ttl and version < (2, 2): raise VersionMismatch(E_NO_TTL_INDEXES.format(version_str)) return database def _create_broadcast(self, database): """Create capped collection for broadcast messages.""" if self.broadcast_collection in database.list_collection_names(): return database.create_collection(self.broadcast_collection, size=self.capped_queue_size, capped=True) def _ensure_indexes(self, database): """Ensure indexes on collections.""" messages = database[self.messages_collection] messages.create_index( [('queue', 1), ('priority', 1), ('_id', 1)], background=True, ) database[self.broadcast_collection].create_index([('queue', 1)]) routing = database[self.routing_collection] routing.create_index([('queue', 1), ('exchange', 1)]) if self.ttl: messages.create_index([('expire_at', 1)], expireAfterSeconds=0) routing.create_index([('expire_at', 1)], expireAfterSeconds=0) database[self.queues_collection].create_index( [('expire_at', 1)], expireAfterSeconds=0) def _create_client(self): """Actually creates connection.""" database = self._open() self._create_broadcast(database) self._ensure_indexes(database) return database @cached_property def client(self): return self._create_client() @cached_property def messages(self): return self.client[self.messages_collection] @cached_property def routing(self): return self.client[self.routing_collection] @cached_property def broadcast(self): return self.client[self.broadcast_collection] @cached_property def queues(self): return self.client[self.queues_collection] def _get_broadcast_cursor(self, queue): try: return self._broadcast_cursors[queue] except KeyError: # Cursor may be absent when Channel created more than once. # _fanout_queues is a class-level mutable attribute so it's # shared over all Channel instances. return self._create_broadcast_cursor( self._fanout_queues[queue], None, None, queue, ) def _create_broadcast_cursor(self, exchange, routing_key, pattern, queue): if pymongo.version_tuple >= (3, ): query = { 'filter': {'queue': exchange}, 'cursor_type': CursorType.TAILABLE, } else: query = { 'query': {'queue': exchange}, 'tailable': True, } cursor = self.broadcast.find(**query) ret = self._broadcast_cursors[queue] = BroadcastCursor(cursor) return ret def _get_message_expire(self, message): value = message.get('properties', {}).get('expiration') if value is not None: return self.get_now() + datetime.timedelta(milliseconds=int(value)) def _get_queue_expire(self, queue, argument): """Get expiration header named `argument` of queue definition. Note: ---- `queue` must be either queue name or options itself. """ if isinstance(queue, str): doc = self.queues.find_one({'_id': queue}) if not doc: return data = doc['options'] else: data = queue try: value = data['arguments'][argument] except (KeyError, TypeError): return return self.get_now() + datetime.timedelta(milliseconds=value) def _update_queues_expire(self, queue): """Update expiration field on queues documents.""" expire_at = self._get_queue_expire(queue, 'x-expires') if not expire_at: return self.routing.update_many( {'queue': queue}, {'$set': {'expire_at': expire_at}}) self.queues.update_many( {'_id': queue}, {'$set': {'expire_at': expire_at}}) def get_now(self): """Return current time in UTC.""" return datetime.datetime.utcnow() class Transport(virtual.Transport): """MongoDB Transport.""" Channel = Channel can_parse_url = True polling_interval = 1 default_port = Channel.default_port connection_errors = ( virtual.Transport.connection_errors + (errors.ConnectionFailure,) ) channel_errors = ( virtual.Transport.channel_errors + ( errors.ConnectionFailure, errors.OperationFailure) ) driver_type = 'mongodb' driver_name = 'pymongo' implements = virtual.Transport.implements.extend( exchange_type=frozenset(['direct', 'topic', 'fanout']), ) def driver_version(self): return pymongo.version def as_uri(self, uri: str, include_password=False, mask='**') -> str: if not uri: return 'mongodb://' if include_password: return uri if ',' not in uri: return maybe_sanitize_url(uri) uri1, remainder = uri.split(',', 1) return ','.join([maybe_sanitize_url(uri1), remainder]) kombu-5.5.3/kombu/transport/native_delayed_delivery.py000066400000000000000000000112431477772317200232660ustar00rootroot00000000000000"""Native Delayed Delivery API. Only relevant for RabbitMQ. """ from __future__ import annotations from kombu import Connection, Exchange, Queue, binding from kombu.log import get_logger logger = get_logger(__name__) MAX_NUMBER_OF_BITS_TO_USE = 28 MAX_LEVEL = MAX_NUMBER_OF_BITS_TO_USE - 1 CELERY_DELAYED_DELIVERY_EXCHANGE = "celery_delayed_delivery" def level_name(level: int) -> str: """Generates the delayed queue/exchange name based on the level.""" if level < 0: raise ValueError("level must be a non-negative number") return f"celery_delayed_{level}" def declare_native_delayed_delivery_exchanges_and_queues(connection: Connection, queue_type: str) -> None: """Declares all native delayed delivery exchanges and queues.""" if queue_type != "classic" and queue_type != "quorum": raise ValueError("queue_type must be either classic or quorum") channel = connection.channel() routing_key: str = "1.#" for level in range(27, -1, - 1): current_level = level_name(level) next_level = level_name(level - 1) if level > 0 else None delayed_exchange: Exchange = Exchange( current_level, type="topic").bind(channel) delayed_exchange.declare() queue_arguments = { "x-queue-type": queue_type, "x-overflow": "reject-publish", "x-message-ttl": pow(2, level) * 1000, "x-dead-letter-exchange": next_level if level > 0 else CELERY_DELAYED_DELIVERY_EXCHANGE, } if queue_type == 'quorum': queue_arguments["x-dead-letter-strategy"] = "at-least-once" delayed_queue: Queue = Queue( current_level, queue_arguments=queue_arguments ).bind(channel) delayed_queue.declare() delayed_queue.bind_to(current_level, routing_key) routing_key = "*." + routing_key routing_key = "0.#" for level in range(27, 0, - 1): current_level = level_name(level) next_level = level_name(level - 1) if level > 0 else None next_level_exchange: Exchange = Exchange( next_level, type="topic").bind(channel) next_level_exchange.bind_to(current_level, routing_key) routing_key = "*." + routing_key delivery_exchange: Exchange = Exchange( CELERY_DELAYED_DELIVERY_EXCHANGE, type="topic").bind(channel) delivery_exchange.declare() delivery_exchange.bind_to(level_name(0), routing_key) def bind_queue_to_native_delayed_delivery_exchange(connection: Connection, queue: Queue) -> None: """Bind a queue to the native delayed delivery exchange. When a message arrives at the delivery exchange, it must be forwarded to the original exchange and queue. To accomplish this, the function retrieves the exchange or binding objects associated with the queue and binds them to the delivery exchange. :param connection: The connection object used to create and manage the channel. :type connection: Connection :param queue: The queue to be bound to the native delayed delivery exchange. :type queue: Queue Warning: ------- If a direct exchange is detected, a warning will be logged because native delayed delivery does not support direct exchanges. """ channel = connection.channel() queue = queue.bind(channel) bindings: set[binding] = set() if queue.exchange: bindings.add(binding( queue.exchange, routing_key=queue.routing_key, arguments=queue.binding_arguments )) elif queue.bindings: bindings = queue.bindings for binding_entry in bindings: exchange: Exchange = binding_entry.exchange.bind(channel) if exchange.type == 'direct': logger.warn(f"Exchange {exchange.name} is a direct exchange " f"and native delayed delivery do not support direct exchanges.\n" f"ETA tasks published to this exchange will block the worker until the ETA arrives.") continue routing_key = binding_entry.routing_key if binding_entry.routing_key.startswith( '#') else f"#.{binding_entry.routing_key}" exchange.bind_to(CELERY_DELAYED_DELIVERY_EXCHANGE, routing_key=routing_key) queue.bind_to(exchange.name, routing_key=routing_key) def calculate_routing_key(countdown: int, routing_key: str) -> str: """Calculate the routing key for publishing a delayed message based on the countdown.""" if countdown < 1: raise ValueError("countdown must be a positive number") if not routing_key: raise ValueError("routing_key must be non-empty") return '.'.join(list(f'{countdown:028b}')) + f'.{routing_key}' kombu-5.5.3/kombu/transport/pyamqp.py000066400000000000000000000171071477772317200177220ustar00rootroot00000000000000"""pyamqp transport module for Kombu. Pure-Python amqp transport using py-amqp library. Features ======== * Type: Native * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: Yes * Supports Priority: Yes * Supports TTL: Yes Connection String ================= Connection string can have the following formats: .. code-block:: amqp://[USER:PASSWORD@]BROKER_ADDRESS[:PORT][/VIRTUALHOST] [USER:PASSWORD@]BROKER_ADDRESS[:PORT][/VIRTUALHOST] amqp:// For TLS encryption use: .. code-block:: amqps://[USER:PASSWORD@]BROKER_ADDRESS[:PORT][/VIRTUALHOST] Transport Options ================= Transport Options are passed to constructor of underlying py-amqp :class:`~kombu.connection.Connection` class. Using TLS ========= Transport over TLS can be enabled by ``ssl`` parameter of :class:`~kombu.Connection` class. By setting ``ssl=True``, TLS transport is used:: conn = Connect('amqp://', ssl=True) This is equivalent to ``amqps://`` transport URI:: conn = Connect('amqps://') For adding additional parameters to underlying TLS, ``ssl`` parameter should be set with dict instead of True:: conn = Connect('amqp://broker.example.com', ssl={ 'keyfile': '/path/to/keyfile' 'certfile': '/path/to/certfile', 'ca_certs': '/path/to/ca_certfile' } ) All parameters are passed to ``ssl`` parameter of :class:`amqp.connection.Connection` class. SSL option ``server_hostname`` can be set to ``None`` which is causing using hostname from broker URL. This is useful when failover is used to fill ``server_hostname`` with currently used broker:: conn = Connect('amqp://broker1.example.com;broker2.example.com', ssl={ 'server_hostname': None } ) """ from __future__ import annotations import amqp from kombu.utils.amq_manager import get_manager from kombu.utils.text import version_string_as_tuple from . import base from .base import to_rabbitmq_queue_arguments DEFAULT_PORT = 5672 DEFAULT_SSL_PORT = 5671 class Message(base.Message): """AMQP Message.""" def __init__(self, msg, channel=None, **kwargs): props = msg.properties super().__init__( body=msg.body, channel=channel, delivery_tag=msg.delivery_tag, content_type=props.get('content_type'), content_encoding=props.get('content_encoding'), delivery_info=msg.delivery_info, properties=msg.properties, headers=props.get('application_headers') or {}, **kwargs) class Channel(amqp.Channel, base.StdChannel): """AMQP Channel.""" Message = Message def prepare_message(self, body, priority=None, content_type=None, content_encoding=None, headers=None, properties=None, _Message=amqp.Message): """Prepare message so that it can be sent using this transport.""" return _Message( body, priority=priority, content_type=content_type, content_encoding=content_encoding, application_headers=headers, **properties or {} ) def prepare_queue_arguments(self, arguments, **kwargs): return to_rabbitmq_queue_arguments(arguments, **kwargs) def message_to_python(self, raw_message): """Convert encoded message body back to a Python value.""" return self.Message(raw_message, channel=self) class Connection(amqp.Connection): """AMQP Connection.""" Channel = Channel class Transport(base.Transport): """AMQP Transport.""" Connection = Connection default_port = DEFAULT_PORT default_ssl_port = DEFAULT_SSL_PORT # it's very annoying that pyamqp sometimes raises AttributeError # if the connection is lost, but nothing we can do about that here. connection_errors = amqp.Connection.connection_errors channel_errors = amqp.Connection.channel_errors recoverable_connection_errors = \ amqp.Connection.recoverable_connection_errors recoverable_channel_errors = amqp.Connection.recoverable_channel_errors driver_name = 'py-amqp' driver_type = 'amqp' implements = base.Transport.implements.extend( asynchronous=True, heartbeats=True, ) def __init__(self, client, default_port=None, default_ssl_port=None, **kwargs): self.client = client self.default_port = default_port or self.default_port self.default_ssl_port = default_ssl_port or self.default_ssl_port def driver_version(self): return amqp.__version__ def create_channel(self, connection): return connection.channel() def drain_events(self, connection, **kwargs): return connection.drain_events(**kwargs) def _collect(self, connection): if connection is not None: connection.collect() def establish_connection(self): """Establish connection to the AMQP broker.""" conninfo = self.client for name, default_value in self.default_connection_params.items(): if not getattr(conninfo, name, None): setattr(conninfo, name, default_value) if conninfo.hostname == 'localhost': conninfo.hostname = '127.0.0.1' # when server_hostname is None, use hostname from URI. if isinstance(conninfo.ssl, dict) and \ 'server_hostname' in conninfo.ssl and \ conninfo.ssl['server_hostname'] is None: conninfo.ssl['server_hostname'] = conninfo.hostname opts = dict({ 'host': conninfo.host, 'userid': conninfo.userid, 'password': conninfo.password, 'login_method': conninfo.login_method, 'virtual_host': conninfo.virtual_host, 'insist': conninfo.insist, 'ssl': conninfo.ssl, 'connect_timeout': conninfo.connect_timeout, 'heartbeat': conninfo.heartbeat, }, **conninfo.transport_options or {}) conn = self.Connection(**opts) conn.client = self.client conn.connect() return conn def verify_connection(self, connection): return connection.connected def close_connection(self, connection): """Close the AMQP broker connection.""" connection.client = None connection.close() def get_heartbeat_interval(self, connection): return connection.heartbeat def register_with_event_loop(self, connection, loop): connection.transport.raise_on_initial_eintr = True loop.add_reader(connection.sock, self.on_readable, connection, loop) def heartbeat_check(self, connection, rate=2): return connection.heartbeat_tick(rate=rate) def qos_semantics_matches_spec(self, connection): props = connection.server_properties if props.get('product') == 'RabbitMQ': return version_string_as_tuple(props['version']) < (3, 3) return True @property def default_connection_params(self): return { 'userid': 'guest', 'password': 'guest', 'port': (self.default_ssl_port if self.client.ssl else self.default_port), 'hostname': 'localhost', 'login_method': 'PLAIN', } def get_manager(self, *args, **kwargs): return get_manager(self.client, *args, **kwargs) class SSLTransport(Transport): """AMQP SSL Transport.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # ugh, not exactly pure, but hey, it's python. if not self.client.ssl: # not dict or False self.client.ssl = True kombu-5.5.3/kombu/transport/pyro.py000066400000000000000000000133241477772317200174010ustar00rootroot00000000000000"""Pyro transport module for kombu. Pyro transport, and Kombu Broker daemon. Requires the :mod:`Pyro4` library to be installed. Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: No * Supports Priority: No * Supports TTL: No Connection String ================= To use the Pyro transport with Kombu, use an url of the form: .. code-block:: pyro://localhost/kombu.broker The hostname is where the transport will be looking for a Pyro name server, which is used in turn to locate the kombu.broker Pyro service. This broker can be launched by simply executing this transport module directly, with the command: ``python -m kombu.transport.pyro`` Transport Options ================= """ from __future__ import annotations import sys from queue import Empty, Queue from kombu.exceptions import reraise from kombu.log import get_logger from kombu.utils.objects import cached_property from . import virtual try: import Pyro4 as pyro from Pyro4.errors import NamingError from Pyro4.util import SerializerBase except ImportError: # pragma: no cover pyro = NamingError = SerializerBase = None DEFAULT_PORT = 9090 E_NAMESERVER = """\ Unable to locate pyro nameserver on host {0.hostname}\ """ E_LOOKUP = """\ Unable to lookup '{0.virtual_host}' in pyro nameserver on host {0.hostname}\ """ logger = get_logger(__name__) class Channel(virtual.Channel): """Pyro Channel.""" def close(self): super().close() if self.shared_queues: self.shared_queues._pyroRelease() def queues(self): return self.shared_queues.get_queue_names() def _new_queue(self, queue, **kwargs): if queue not in self.queues(): self.shared_queues.new_queue(queue) def _has_queue(self, queue, **kwargs): return self.shared_queues.has_queue(queue) def _get(self, queue, timeout=None): queue = self._queue_for(queue) return self.shared_queues.get(queue) def _queue_for(self, queue): if queue not in self.queues(): self.shared_queues.new_queue(queue) return queue def _put(self, queue, message, **kwargs): queue = self._queue_for(queue) self.shared_queues.put(queue, message) def _size(self, queue): return self.shared_queues.size(queue) def _delete(self, queue, *args, **kwargs): self.shared_queues.delete(queue) def _purge(self, queue): return self.shared_queues.purge(queue) def after_reply_message_received(self, queue): pass @cached_property def shared_queues(self): return self.connection.shared_queues class Transport(virtual.Transport): """Pyro Transport.""" Channel = Channel #: memory backend state is global. # TODO: To be checked whether state can be per-Transport global_state = virtual.BrokerState() default_port = DEFAULT_PORT driver_type = driver_name = 'pyro' def __init__(self, client, **kwargs): super().__init__(client, **kwargs) self.state = self.global_state def _open(self): logger.debug("trying Pyro nameserver to find the broker daemon") conninfo = self.client try: nameserver = pyro.locateNS(host=conninfo.hostname, port=self.default_port) except NamingError: reraise(NamingError, NamingError(E_NAMESERVER.format(conninfo)), sys.exc_info()[2]) try: # name of registered pyro object uri = nameserver.lookup(conninfo.virtual_host) return pyro.Proxy(uri) except NamingError: reraise(NamingError, NamingError(E_LOOKUP.format(conninfo)), sys.exc_info()[2]) def driver_version(self): return pyro.__version__ @cached_property def shared_queues(self): return self._open() if pyro is not None: SerializerBase.register_dict_to_class("queue.Empty", lambda cls, data: Empty()) @pyro.expose @pyro.behavior(instance_mode="single") class KombuBroker: """Kombu Broker used by the Pyro transport. You have to run this as a separate (Pyro) service. """ def __init__(self): self.queues = {} def get_queue_names(self): return list(self.queues) def new_queue(self, queue): if queue in self.queues: return # silently ignore the fact that queue already exists self.queues[queue] = Queue() def has_queue(self, queue): return queue in self.queues def get(self, queue): return self.queues[queue].get(block=False) def put(self, queue, message): self.queues[queue].put(message) def size(self, queue): return self.queues[queue].qsize() def delete(self, queue): del self.queues[queue] def purge(self, queue): while True: try: self.queues[queue].get(blocking=False) except Empty: break # launch a Kombu Broker daemon with the command: # ``python -m kombu.transport.pyro`` if __name__ == "__main__": print("Launching Broker for Kombu's Pyro transport.") with pyro.Daemon() as daemon: print("(Expecting a Pyro name server at {}:{})" .format(pyro.config.NS_HOST, pyro.config.NS_PORT)) with pyro.locateNS() as ns: print("You can connect with Kombu using the url " "'pyro://{}/kombu.broker'".format(pyro.config.NS_HOST)) uri = daemon.register(KombuBroker) ns.register("kombu.broker", uri) daemon.requestLoop() kombu-5.5.3/kombu/transport/qpid.py000066400000000000000000002140011477772317200173400ustar00rootroot00000000000000"""Qpid Transport module for kombu. `Qpid`_ transport using `qpid-python`_ as the client and `qpid-tools`_ for broker management. The use this transport you must install the necessary dependencies. These dependencies are available via PyPI and can be installed using the pip command: .. code-block:: console $ pip install kombu[qpid] or to install the requirements manually: .. code-block:: console $ pip install qpid-tools qpid-python .. admonition:: Python 3 and PyPy Limitations The Qpid transport does not support Python 3 or PyPy environments due to underlying dependencies not being compatible. This version is tested and works with with Python 2.7. .. _`Qpid`: https://qpid.apache.org/ .. _`qpid-python`: https://pypi.org/project/qpid-python/ .. _`qpid-tools`: https://pypi.org/project/qpid-tools/ Features ======== * Type: Native * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: Yes * Supports Priority: Yes * Supports TTL: Yes Authentication ============== This transport supports SASL authentication with the Qpid broker. Normally, SASL mechanisms are negotiated from a client list and a server list of possible mechanisms, but in practice, different SASL client libraries give different behaviors. These different behaviors cause the expected SASL mechanism to not be selected in many cases. As such, this transport restricts the mechanism types based on Kombu's configuration according to the following table. +------------------------------------+--------------------+ | **Broker String** | **SASL Mechanism** | +------------------------------------+--------------------+ | qpid://hostname/ | ANONYMOUS | +------------------------------------+--------------------+ | qpid://username:password@hostname/ | PLAIN | +------------------------------------+--------------------+ | see instructions below | EXTERNAL | +------------------------------------+--------------------+ The user can override the above SASL selection behaviors and specify the SASL string using the :attr:`~kombu.Connection.login_method` argument to the :class:`~kombu.Connection` object. The string can be a single SASL mechanism or a space separated list of SASL mechanisms. If you are using Celery with Kombu, this can be accomplished by setting the *BROKER_LOGIN_METHOD* Celery option. .. note:: While using SSL, Qpid users may want to override the SASL mechanism to use *EXTERNAL*. In that case, Qpid requires a username to be presented that matches the *CN* of the SSL client certificate. Ensure that the broker string contains the corresponding username. For example, if the client certificate has *CN=asdf* and the client connects to *example.com* on port 5671, the broker string should be: **qpid://asdf@example.com:5671/** Transport Options ================= The :attr:`~kombu.Connection.transport_options` argument to the :class:`~kombu.Connection` object are passed directly to the :class:`qpid.messaging.endpoints.Connection` as keyword arguments. These options override and replace any other default or specified values. If using Celery, this can be accomplished by setting the *BROKER_TRANSPORT_OPTIONS* Celery option. """ from __future__ import annotations import os import select import socket import ssl import sys import uuid from gettext import gettext as _ from queue import Empty from time import monotonic import amqp.protocol try: import fcntl except ImportError: fcntl = None try: import qpidtoollibs except ImportError: # pragma: no cover qpidtoollibs = None try: from qpid.messaging.exceptions import ConnectionError from qpid.messaging.exceptions import Empty as QpidEmpty from qpid.messaging.exceptions import NotFound, SessionClosed except ImportError: # pragma: no cover ConnectionError = None NotFound = None QpidEmpty = None SessionClosed = None try: import qpid except ImportError: # pragma: no cover qpid = None from kombu.log import get_logger from kombu.transport import base, virtual from kombu.transport.virtual import Base64, Message logger = get_logger(__name__) try: buffer except NameError: buffer = bytes OBJECT_ALREADY_EXISTS_STRING = 'object already exists' VERSION = (1, 0, 0) __version__ = '.'.join(map(str, VERSION)) def dependency_is_none(dependency): """Return True if the dependency is None, otherwise False. This is done using a function so that tests can mock this behavior easily. :param dependency: The module to check if it is None :return: True if dependency is None otherwise False. """ return dependency is None class AuthenticationFailure(Exception): """Cannot authenticate with Qpid.""" class QoS: """A helper object for message prefetch and ACKing purposes. :keyword prefetch_count: Initial prefetch count, hard set to 1. :type prefetch_count: int NOTE: prefetch_count is currently hard set to 1, and needs to be improved This object is instantiated 1-for-1 with a :class:`~.kombu.transport.qpid.Channel` instance. QoS allows ``prefetch_count`` to be set to the number of outstanding messages the corresponding :class:`~kombu.transport.qpid.Channel` should be allowed to prefetch. Setting ``prefetch_count`` to 0 disables prefetch limits, and the object can hold an arbitrary number of messages. Messages are added using :meth:`append`, which are held until they are ACKed asynchronously through a call to :meth:`ack`. Messages that are received, but not ACKed will not be delivered by the broker to another consumer until an ACK is received, or the session is closed. Messages are referred to using delivery_tag, which are unique per :class:`Channel`. Delivery tags are managed outside of this object and are passed in with a message to :meth:`append`. Un-ACKed messages can be looked up from QoS using :meth:`get` and can be rejected and forgotten using :meth:`reject`. """ def __init__(self, session, prefetch_count=1): self.session = session self.prefetch_count = 1 self._not_yet_acked = {} def can_consume(self): """Return True if the :class:`Channel` can consume more messages. Used to ensure the client adheres to currently active prefetch limits. :returns: True, if this QoS object can accept more messages without violating the prefetch_count. If prefetch_count is 0, can_consume will always return True. :rtype: bool """ return ( not self.prefetch_count or len(self._not_yet_acked) < self.prefetch_count ) def can_consume_max_estimate(self): """Return the remaining message capacity. Returns an estimated number of outstanding messages that a :class:`kombu.transport.qpid.Channel` can accept without exceeding ``prefetch_count``. If ``prefetch_count`` is 0, then this method returns 1. :returns: The number of estimated messages that can be fetched without violating the prefetch_count. :rtype: int """ return 1 if not self.prefetch_count else ( self.prefetch_count - len(self._not_yet_acked) ) def append(self, message, delivery_tag): """Append message to the list of un-ACKed messages. Add a message, referenced by the delivery_tag, for ACKing, rejecting, or getting later. Messages are saved into a dict by delivery_tag. :param message: A received message that has not yet been ACKed. :type message: qpid.messaging.Message :param delivery_tag: A UUID to refer to this message by upon receipt. :type delivery_tag: uuid.UUID """ self._not_yet_acked[delivery_tag] = message def get(self, delivery_tag): """Get an un-ACKed message by delivery_tag. If called with an invalid delivery_tag a :exc:`KeyError` is raised. :param delivery_tag: The delivery tag associated with the message to be returned. :type delivery_tag: uuid.UUID :return: An un-ACKed message that is looked up by delivery_tag. :rtype: qpid.messaging.Message """ return self._not_yet_acked[delivery_tag] def ack(self, delivery_tag): """Acknowledge a message by delivery_tag. Called asynchronously once the message has been handled and can be forgotten by the broker. :param delivery_tag: the delivery tag associated with the message to be acknowledged. :type delivery_tag: uuid.UUID """ message = self._not_yet_acked.pop(delivery_tag) self.session.acknowledge(message=message) def reject(self, delivery_tag, requeue=False): """Reject a message by delivery_tag. Explicitly notify the broker that the channel associated with this QoS object is rejecting the message that was previously delivered. If requeue is False, then the message is not requeued for delivery to another consumer. If requeue is True, then the message is requeued for delivery to another consumer. :param delivery_tag: The delivery tag associated with the message to be rejected. :type delivery_tag: uuid.UUID :keyword requeue: If True, the broker will be notified to requeue the message. If False, the broker will be told to drop the message entirely. In both cases, the message will be removed from this object. :type requeue: bool """ message = self._not_yet_acked.pop(delivery_tag) QpidDisposition = qpid.messaging.Disposition if requeue: disposition = QpidDisposition(qpid.messaging.RELEASED) else: disposition = QpidDisposition(qpid.messaging.REJECTED) self.session.acknowledge(message=message, disposition=disposition) class Channel(base.StdChannel): """Supports broker configuration and messaging send and receive. :param connection: A Connection object that this Channel can reference. Currently only used to access callbacks. :type connection: kombu.transport.qpid.Connection :param transport: The Transport this Channel is associated with. :type transport: kombu.transport.qpid.Transport A channel object is designed to have method-parity with a Channel as defined in AMQP 0-10 and earlier, which allows for the following broker actions: - exchange declare and delete - queue declare and delete - queue bind and unbind operations - queue length and purge operations - sending/receiving/rejecting messages - structuring, encoding, and decoding messages - supports synchronous and asynchronous reads - reading state about the exchange, queues, and bindings Channels are designed to all share a single TCP connection with a broker, but provide a level of isolated communication with the broker while benefiting from a shared TCP connection. The Channel is given its :class:`~kombu.transport.qpid.Connection` object by the :class:`~kombu.transport.qpid.Transport` that instantiates the channel. This channel inherits from :class:`~kombu.transport.base.StdChannel`, which makes this a 'native' channel versus a 'virtual' channel which would inherit from :class:`kombu.transports.virtual`. Messages sent using this channel are assigned a delivery_tag. The delivery_tag is generated for a message as they are prepared for sending by :meth:`basic_publish`. The delivery_tag is unique per channel instance. The delivery_tag has no meaningful context in other objects, and is only maintained in the memory of this object, and the underlying :class:`QoS` object that provides support. Each channel object instantiates exactly one :class:`QoS` object for prefetch limiting, and asynchronous ACKing. The :class:`QoS` object is lazily instantiated through a property method :meth:`qos`. The :class:`QoS` object is a supporting object that should not be accessed directly except by the channel itself. Synchronous reads on a queue are done using a call to :meth:`basic_get` which uses :meth:`_get` to perform the reading. These methods read immediately and do not accept any form of timeout. :meth:`basic_get` reads synchronously and ACKs messages before returning them. ACKing is done in all cases, because an application that reads messages using qpid.messaging, but does not ACK them will experience a memory leak. The no_ack argument to :meth:`basic_get` does not affect ACKing functionality. Asynchronous reads on a queue are done by starting a consumer using :meth:`basic_consume`. Each call to :meth:`basic_consume` will cause a :class:`~qpid.messaging.endpoints.Receiver` to be created on the :class:`~qpid.messaging.endpoints.Session` started by the :class: `Transport`. The receiver will asynchronously read using qpid.messaging, and prefetch messages before the call to :meth:`Transport.basic_drain` occurs. The prefetch_count value of the :class:`QoS` object is the capacity value of the new receiver. The new receiver capacity must always be at least 1, otherwise none of the receivers will appear to be ready for reading, and will never be read from. Each call to :meth:`basic_consume` creates a consumer, which is given a consumer tag that is identified by the caller of :meth:`basic_consume`. Already started consumers can be cancelled using by their consumer_tag using :meth:`basic_cancel`. Cancellation of a consumer causes the :class:`~qpid.messaging.endpoints.Receiver` object to be closed. Asynchronous message ACKing is supported through :meth:`basic_ack`, and is referenced by delivery_tag. The Channel object uses its :class:`QoS` object to perform the message ACKing. """ #: A class reference that will be instantiated using the qos property. QoS = QoS #: A class reference that identifies # :class:`~kombu.transport.virtual.Message` as the message class type Message = Message #: Default body encoding. #: NOTE: ``transport_options['body_encoding']`` will override this value. body_encoding = 'base64' #: Binary <-> ASCII codecs. codecs = {'base64': Base64()} def __init__(self, connection, transport): self.connection = connection self.transport = transport qpid_connection = connection.get_qpid_connection() self._broker = qpidtoollibs.BrokerAgent(qpid_connection) self.closed = False self._tag_to_queue = {} self._receivers = {} self._qos = None def _get(self, queue): """Non-blocking, single-message read from a queue. An internal method to perform a non-blocking, single-message read from a queue by name. This method creates a :class:`~qpid.messaging.endpoints.Receiver` to read from the queue using the :class:`~qpid.messaging.endpoints.Session` saved on the associated :class:`~kombu.transport.qpid.Transport`. The receiver is closed before the method exits. If a message is available, a :class:`qpid.messaging.Message` object is returned. If no message is available, a :class:`qpid.messaging.exceptions.Empty` exception is raised. This is an internal method. External calls for get functionality should be done using :meth:`basic_get`. :param queue: The queue name to get the message from :type queue: str :return: The received message. :rtype: :class:`qpid.messaging.Message` :raises: :class:`qpid.messaging.exceptions.Empty` if no message is available. """ rx = self.transport.session.receiver(queue) try: message = rx.fetch(timeout=0) finally: rx.close() return message def _put(self, routing_key, message, exchange=None, durable=True, **kwargs): """Synchronously send a single message onto a queue or exchange. An internal method which synchronously sends a single message onto a given queue or exchange. If exchange is not specified, the message is sent directly to a queue specified by routing_key. If no queue is found by the name of routing_key while exchange is not specified an exception is raised. If an exchange is specified, then the message is delivered onto the requested exchange using routing_key. Message sending is synchronous using sync=True because large messages in kombu funtests were not being fully sent before the receiver closed. This method creates a :class:`qpid.messaging.endpoints.Sender` to send the message to the queue using the :class:`qpid.messaging.endpoints.Session` created and referenced by the associated :class:`~kombu.transport.qpid.Transport`. The sender is closed before the method exits. External calls for put functionality should be done using :meth:`basic_publish`. :param routing_key: If exchange is None, treated as the queue name to send the message to. If exchange is not None, treated as the routing_key to use as the message is submitted onto the exchange. :type routing_key: str :param message: The message to be sent as prepared by :meth:`basic_publish`. :type message: dict :keyword exchange: keyword parameter of the exchange this message should be sent on. If no exchange is specified, the message is sent directly to a queue specified by routing_key. :type exchange: str :keyword durable: whether or not the message should persist or be durable. :type durable: bool """ if not exchange: address = f'{routing_key}; ' \ '{{assert: always, node: {{type: queue}}}}' msg_subject = None else: address = f'{exchange}/{routing_key}; '\ '{{assert: always, node: {{type: topic}}}}' msg_subject = str(routing_key) sender = self.transport.session.sender(address) qpid_message = qpid.messaging.Message(content=message, durable=durable, subject=msg_subject) try: sender.send(qpid_message, sync=True) finally: sender.close() def _purge(self, queue): """Purge all undelivered messages from a queue specified by name. An internal method to purge all undelivered messages from a queue specified by name. If the queue does not exist a :class:`qpid.messaging.exceptions.NotFound` exception is raised. The queue message depth is first checked, and then the broker is asked to purge that number of messages. The integer number of messages requested to be purged is returned. The actual number of messages purged may be different than the requested number of messages to purge (see below). Sometimes delivered messages are asked to be purged, but are not. This case fails silently, which is the correct behavior when a message that has been delivered to a different consumer, who has not ACKed the message, and still has an active session with the broker. Messages in that case are not safe for purging and will be retained by the broker. The client is unable to change this delivery behavior. This is an internal method. External calls for purge functionality should be done using :meth:`queue_purge`. :param queue: the name of the queue to be purged :type queue: str :return: The number of messages requested to be purged. :rtype: int :raises: :class:`qpid.messaging.exceptions.NotFound` if the queue being purged cannot be found. """ queue_to_purge = self._broker.getQueue(queue) if queue_to_purge is None: error_text = f"NOT_FOUND - no queue '{queue}'" raise NotFound(code=404, text=error_text) message_count = queue_to_purge.values['msgDepth'] if message_count > 0: queue_to_purge.purge(message_count) return message_count def _size(self, queue): """Get the number of messages in a queue specified by name. An internal method to return the number of messages in a queue specified by name. It returns an integer count of the number of messages currently in the queue. :param queue: The name of the queue to be inspected for the number of messages :type queue: str :return the number of messages in the queue specified by name. :rtype: int """ queue_to_check = self._broker.getQueue(queue) message_depth = queue_to_check.values['msgDepth'] return message_depth def _delete(self, queue, *args, **kwargs): """Delete a queue and all messages on that queue. An internal method to delete a queue specified by name and all the messages on it. First, all messages are purged from a queue using a call to :meth:`_purge`. Second, the broker is asked to delete the queue. This is an internal method. External calls for queue delete functionality should be done using :meth:`queue_delete`. :param queue: The name of the queue to be deleted. :type queue: str """ self._purge(queue) self._broker.delQueue(queue) def _has_queue(self, queue, **kwargs): """Determine if the broker has a queue specified by name. :param queue: The queue name to check if the queue exists. :type queue: str :return: True if a queue exists on the broker, and false otherwise. :rtype: bool """ if self._broker.getQueue(queue): return True else: return False def queue_declare(self, queue, passive=False, durable=False, exclusive=False, auto_delete=True, nowait=False, arguments=None): """Create a new queue specified by name. If the queue already exists, no change is made to the queue, and the return value returns information about the existing queue. The queue name is required and specified as the first argument. If passive is True, the server will not create the queue. The client can use this to check whether a queue exists without modifying the server state. Default is False. If durable is True, the queue will be durable. Durable queues remain active when a server restarts. Non-durable queues ( transient queues) are purged if/when a server restarts. Note that durable queues do not necessarily hold persistent messages, although it does not make sense to send persistent messages to a transient queue. Default is False. If exclusive is True, the queue will be exclusive. Exclusive queues may only be consumed by the current connection. Setting the 'exclusive' flag always implies 'auto-delete'. Default is False. If auto_delete is True, the queue is deleted when all consumers have finished using it. The last consumer can be cancelled either explicitly or because its channel is closed. If there was no consumer ever on the queue, it won't be deleted. Default is True. The nowait parameter is unused. It was part of the 0-9-1 protocol, but this AMQP client implements 0-10 which removed the nowait option. The arguments parameter is a set of arguments for the declaration of the queue. Arguments are passed as a dict or None. This field is ignored if passive is True. Default is None. This method returns a :class:`~collections.namedtuple` with the name 'queue_declare_ok_t' and the queue name as 'queue', message count on the queue as 'message_count', and the number of active consumers as 'consumer_count'. The named tuple values are ordered as queue, message_count, and consumer_count respectively. Due to Celery's non-ACKing of events, a ring policy is set on any queue that starts with the string 'celeryev' or ends with the string 'pidbox'. These are celery event queues, and Celery does not ack them, causing the messages to build-up. Eventually Qpid stops serving messages unless the 'ring' policy is set, at which point the buffer backing the queue becomes circular. :param queue: The name of the queue to be created. :type queue: str :param passive: If True, the sever will not create the queue. :type passive: bool :param durable: If True, the queue will be durable. :type durable: bool :param exclusive: If True, the queue will be exclusive. :type exclusive: bool :param auto_delete: If True, the queue is deleted when all consumers have finished using it. :type auto_delete: bool :param nowait: This parameter is unused since the 0-10 specification does not include it. :type nowait: bool :param arguments: A set of arguments for the declaration of the queue. :type arguments: dict or None :return: A named tuple representing the declared queue as a named tuple. The tuple values are ordered as queue, message count, and the active consumer count. :rtype: :class:`~collections.namedtuple` """ options = {'passive': passive, 'durable': durable, 'exclusive': exclusive, 'auto-delete': auto_delete, 'arguments': arguments} if queue.startswith('celeryev') or queue.endswith('pidbox'): options['qpid.policy_type'] = 'ring' try: self._broker.addQueue(queue, options=options) except Exception as exc: if OBJECT_ALREADY_EXISTS_STRING not in str(exc): raise exc queue_to_check = self._broker.getQueue(queue) message_count = queue_to_check.values['msgDepth'] consumer_count = queue_to_check.values['consumerCount'] return amqp.protocol.queue_declare_ok_t(queue, message_count, consumer_count) def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): """Delete a queue by name. Delete a queue specified by name. Using the if_unused keyword argument, the delete can only occur if there are 0 consumers bound to it. Using the if_empty keyword argument, the delete can only occur if there are 0 messages in the queue. :param queue: The name of the queue to be deleted. :type queue: str :keyword if_unused: If True, delete only if the queue has 0 consumers. If False, delete a queue even with consumers bound to it. :type if_unused: bool :keyword if_empty: If True, only delete the queue if it is empty. If False, delete the queue if it is empty or not. :type if_empty: bool """ if self._has_queue(queue): if if_empty and self._size(queue): return queue_obj = self._broker.getQueue(queue) consumer_count = queue_obj.getAttributes()['consumerCount'] if if_unused and consumer_count > 0: return self._delete(queue) def exchange_declare(self, exchange='', type='direct', durable=False, **kwargs): """Create a new exchange. Create an exchange of a specific type, and optionally have the exchange be durable. If an exchange of the requested name already exists, no action is taken and no exceptions are raised. Durable exchanges will survive a broker restart, non-durable exchanges will not. Exchanges provide behaviors based on their type. The expected behaviors are those defined in the AMQP 0-10 and prior specifications including 'direct', 'topic', and 'fanout' functionality. :keyword type: The exchange type. Valid values include 'direct', 'topic', and 'fanout'. :type type: str :keyword exchange: The name of the exchange to be created. If no exchange is specified, then a blank string will be used as the name. :type exchange: str :keyword durable: True if the exchange should be durable, or False otherwise. :type durable: bool """ options = {'durable': durable} try: self._broker.addExchange(type, exchange, options) except Exception as exc: if OBJECT_ALREADY_EXISTS_STRING not in str(exc): raise exc def exchange_delete(self, exchange_name, **kwargs): """Delete an exchange specified by name. :param exchange_name: The name of the exchange to be deleted. :type exchange_name: str """ self._broker.delExchange(exchange_name) def queue_bind(self, queue, exchange, routing_key, **kwargs): """Bind a queue to an exchange with a bind key. Bind a queue specified by name, to an exchange specified by name, with a specific bind key. The queue and exchange must already exist on the broker for the bind to complete successfully. Queues may be bound to exchanges multiple times with different keys. :param queue: The name of the queue to be bound. :type queue: str :param exchange: The name of the exchange that the queue should be bound to. :type exchange: str :param routing_key: The bind key that the specified queue should bind to the specified exchange with. :type routing_key: str """ self._broker.bind(exchange, queue, routing_key) def queue_unbind(self, queue, exchange, routing_key, **kwargs): """Unbind a queue from an exchange with a given bind key. Unbind a queue specified by name, from an exchange specified by name, that is already bound with a bind key. The queue and exchange must already exist on the broker, and bound with the bind key for the operation to complete successfully. Queues may be bound to exchanges multiple times with different keys, thus the bind key is a required field to unbind in an explicit way. :param queue: The name of the queue to be unbound. :type queue: str :param exchange: The name of the exchange that the queue should be unbound from. :type exchange: str :param routing_key: The existing bind key between the specified queue and a specified exchange that should be unbound. :type routing_key: str """ self._broker.unbind(exchange, queue, routing_key) def queue_purge(self, queue, **kwargs): """Remove all undelivered messages from queue. Purge all undelivered messages from a queue specified by name. If the queue does not exist an exception is raised. The queue message depth is first checked, and then the broker is asked to purge that number of messages. The integer number of messages requested to be purged is returned. The actual number of messages purged may be different than the requested number of messages to purge. Sometimes delivered messages are asked to be purged, but are not. This case fails silently, which is the correct behavior when a message that has been delivered to a different consumer, who has not ACKed the message, and still has an active session with the broker. Messages in that case are not safe for purging and will be retained by the broker. The client is unable to change this delivery behavior. Internally, this method relies on :meth:`_purge`. :param queue: The name of the queue which should have all messages removed. :type queue: str :return: The number of messages requested to be purged. :rtype: int :raises: :class:`qpid.messaging.exceptions.NotFound` if the queue being purged cannot be found. """ return self._purge(queue) def basic_get(self, queue, no_ack=False, **kwargs): """Non-blocking single message get and ACK from a queue by name. Internally this method uses :meth:`_get` to fetch the message. If an :class:`~qpid.messaging.exceptions.Empty` exception is raised by :meth:`_get`, this method silences it and returns None. If :meth:`_get` does return a message, that message is ACKed. The no_ack parameter has no effect on ACKing behavior, and all messages are ACKed in all cases. This method never adds fetched Messages to the internal QoS object for asynchronous ACKing. This method converts the object type of the method as it passes through. Fetching from the broker, :meth:`_get` returns a :class:`qpid.messaging.Message`, but this method takes the payload of the :class:`qpid.messaging.Message` and instantiates a :class:`~kombu.transport.virtual.Message` object with the payload based on the class setting of self.Message. :param queue: The queue name to fetch a message from. :type queue: str :keyword no_ack: The no_ack parameter has no effect on the ACK behavior of this method. Un-ACKed messages create a memory leak in qpid.messaging, and need to be ACKed in all cases. :type noack: bool :return: The received message. :rtype: :class:`~kombu.transport.virtual.Message` """ try: qpid_message = self._get(queue) raw_message = qpid_message.content message = self.Message(raw_message, channel=self) self.transport.session.acknowledge(message=qpid_message) return message except Empty: pass def basic_ack(self, delivery_tag, multiple=False): """Acknowledge a message by delivery_tag. Acknowledges a message referenced by delivery_tag. Messages can only be ACKed using :meth:`basic_ack` if they were acquired using :meth:`basic_consume`. This is the ACKing portion of the asynchronous read behavior. Internally, this method uses the :class:`QoS` object, which stores messages and is responsible for the ACKing. :param delivery_tag: The delivery tag associated with the message to be acknowledged. :type delivery_tag: uuid.UUID :param multiple: not implemented. If set to True an AssertionError is raised. :type multiple: bool """ assert multiple is False self.qos.ack(delivery_tag) def basic_reject(self, delivery_tag, requeue=False): """Reject a message by delivery_tag. Rejects a message that has been received by the Channel, but not yet acknowledged. Messages are referenced by their delivery_tag. If requeue is False, the rejected message will be dropped by the broker and not delivered to any other consumers. If requeue is True, then the rejected message will be requeued for delivery to another consumer, potentially to the same consumer who rejected the message previously. :param delivery_tag: The delivery tag associated with the message to be rejected. :type delivery_tag: uuid.UUID :keyword requeue: If False, the rejected message will be dropped by the broker and not delivered to any other consumers. If True, then the rejected message will be requeued for delivery to another consumer, potentially to the same consumer who rejected the message previously. :type requeue: bool """ self.qos.reject(delivery_tag, requeue=requeue) def basic_consume(self, queue, no_ack, callback, consumer_tag, **kwargs): """Start an asynchronous consumer that reads from a queue. This method starts a consumer of type :class:`~qpid.messaging.endpoints.Receiver` using the :class:`~qpid.messaging.endpoints.Session` created and referenced by the :class:`Transport` that reads messages from a queue specified by name until stopped by a call to :meth:`basic_cancel`. Messages are available later through a synchronous call to :meth:`Transport.drain_events`, which will drain from the consumer started by this method. :meth:`Transport.drain_events` is synchronous, but the receiving of messages over the network occurs asynchronously, so it should still perform well. :meth:`Transport.drain_events` calls the callback provided here with the Message of type self.Message. Each consumer is referenced by a consumer_tag, which is provided by the caller of this method. This method sets up the callback onto the self.connection object in a dict keyed by queue name. :meth:`~Transport.drain_events` is responsible for calling that callback upon message receipt. All messages that are received are added to the QoS object to be saved for asynchronous ACKing later after the message has been handled by the caller of :meth:`~Transport.drain_events`. Messages can be ACKed after being received through a call to :meth:`basic_ack`. If no_ack is True, The no_ack flag indicates that the receiver of the message will not call :meth:`basic_ack` later. Since the message will not be ACKed later, it is ACKed immediately. :meth:`basic_consume` transforms the message object type prior to calling the callback. Initially the message comes in as a :class:`qpid.messaging.Message`. This method unpacks the payload of the :class:`qpid.messaging.Message` and creates a new object of type self.Message. This method wraps the user delivered callback in a runtime-built function which provides the type transformation from :class:`qpid.messaging.Message` to :class:`~kombu.transport.virtual.Message`, and adds the message to the associated :class:`QoS` object for asynchronous ACKing if necessary. :param queue: The name of the queue to consume messages from :type queue: str :param no_ack: If True, then messages will not be saved for ACKing later, but will be ACKed immediately. If False, then messages will be saved for ACKing later with a call to :meth:`basic_ack`. :type no_ack: bool :param callback: a callable that will be called when messages arrive on the queue. :type callback: a callable object :param consumer_tag: a tag to reference the created consumer by. This consumer_tag is needed to cancel the consumer. :type consumer_tag: an immutable object """ self._tag_to_queue[consumer_tag] = queue def _callback(qpid_message): raw_message = qpid_message.content message = self.Message(raw_message, channel=self) delivery_tag = message.delivery_tag self.qos.append(qpid_message, delivery_tag) if no_ack: # Celery will not ack this message later, so we should ack now self.basic_ack(delivery_tag) return callback(message) self.connection._callbacks[queue] = _callback new_receiver = self.transport.session.receiver(queue) new_receiver.capacity = self.qos.prefetch_count self._receivers[consumer_tag] = new_receiver def basic_cancel(self, consumer_tag): """Cancel consumer by consumer tag. Request the consumer stops reading messages from its queue. The consumer is a :class:`~qpid.messaging.endpoints.Receiver`, and it is closed using :meth:`~qpid.messaging.endpoints.Receiver.close`. This method also cleans up all lingering references of the consumer. :param consumer_tag: The tag which refers to the consumer to be cancelled. Originally specified when the consumer was created as a parameter to :meth:`basic_consume`. :type consumer_tag: an immutable object """ if consumer_tag in self._receivers: receiver = self._receivers.pop(consumer_tag) receiver.close() queue = self._tag_to_queue.pop(consumer_tag, None) self.connection._callbacks.pop(queue, None) def close(self): """Cancel all associated messages and close the Channel. This cancels all consumers by calling :meth:`basic_cancel` for each known consumer_tag. It also closes the self._broker sessions. Closing the sessions implicitly causes all outstanding, un-ACKed messages to be considered undelivered by the broker. """ if not self.closed: self.closed = True for consumer_tag in self._receivers.keys(): self.basic_cancel(consumer_tag) if self.connection is not None: self.connection.close_channel(self) self._broker.close() @property def qos(self): """:class:`QoS` manager for this channel. Lazily instantiates an object of type :class:`QoS` upon access to the self.qos attribute. :return: An already existing, or newly created QoS object :rtype: :class:`QoS` """ if self._qos is None: self._qos = self.QoS(self.transport.session) return self._qos def basic_qos(self, prefetch_count, *args): """Change :class:`QoS` settings for this Channel. Set the number of un-acknowledged messages this Channel can fetch and hold. The prefetch_value is also used as the capacity for any new :class:`~qpid.messaging.endpoints.Receiver` objects. Currently, this value is hard coded to 1. :param prefetch_count: Not used. This method is hard-coded to 1. :type prefetch_count: int """ self.qos.prefetch_count = 1 def prepare_message(self, body, priority=None, content_type=None, content_encoding=None, headers=None, properties=None): """Prepare message data for sending. This message is typically called by :meth:`kombu.messaging.Producer._publish` as a preparation step in message publication. :param body: The body of the message :type body: str :keyword priority: A number between 0 and 9 that sets the priority of the message. :type priority: int :keyword content_type: The content_type the message body should be treated as. If this is unset, the :class:`qpid.messaging.endpoints.Sender` object tries to autodetect the content_type from the body. :type content_type: str :keyword content_encoding: The content_encoding the message body is encoded as. :type content_encoding: str :keyword headers: Additional Message headers that should be set. Passed in as a key-value pair. :type headers: dict :keyword properties: Message properties to be set on the message. :type properties: dict :return: Returns a dict object that encapsulates message attributes. See parameters for more details on attributes that can be set. :rtype: dict """ properties = properties or {} info = properties.setdefault('delivery_info', {}) info['priority'] = priority or 0 return {'body': body, 'content-encoding': content_encoding, 'content-type': content_type, 'headers': headers or {}, 'properties': properties or {}} def basic_publish(self, message, exchange, routing_key, **kwargs): """Publish message onto an exchange using a routing key. Publish a message onto an exchange specified by name using a routing key specified by routing_key. Prepares the message in the following ways before sending: - encodes the body using :meth:`encode_body` - wraps the body as a buffer object, so that :class:`qpid.messaging.endpoints.Sender` uses a content type that can support arbitrarily large messages. - sets delivery_tag to a random uuid.UUID - sets the exchange and routing_key info as delivery_info Internally uses :meth:`_put` to send the message synchronously. This message is typically called by :class:`kombu.messaging.Producer._publish` as the final step in message publication. :param message: A dict containing key value pairs with the message data. A valid message dict can be generated using the :meth:`prepare_message` method. :type message: dict :param exchange: The name of the exchange to submit this message onto. :type exchange: str :param routing_key: The routing key to be used as the message is submitted onto the exchange. :type routing_key: str """ message['body'], body_encoding = self.encode_body( message['body'], self.body_encoding, ) props = message['properties'] props.update( body_encoding=body_encoding, delivery_tag=uuid.uuid4(), ) props['delivery_info'].update( exchange=exchange, routing_key=routing_key, ) self._put(routing_key, message, exchange, **kwargs) def encode_body(self, body, encoding=None): """Encode a body using an optionally specified encoding. The encoding can be specified by name, and is looked up in self.codecs. self.codecs uses strings as its keys which specify the name of the encoding, and then the value is an instantiated object that can provide encoding/decoding of that type through encode and decode methods. :param body: The body to be encoded. :type body: str :keyword encoding: The encoding type to be used. Must be a supported codec listed in self.codecs. :type encoding: str :return: If encoding is specified, return a tuple with the first position being the encoded body, and the second position the encoding used. If encoding is not specified, the body is passed through unchanged. :rtype: tuple """ if encoding: return self.codecs.get(encoding).encode(body), encoding return body, encoding def decode_body(self, body, encoding=None): """Decode a body using an optionally specified encoding. The encoding can be specified by name, and is looked up in self.codecs. self.codecs uses strings as its keys which specify the name of the encoding, and then the value is an instantiated object that can provide encoding/decoding of that type through encode and decode methods. :param body: The body to be encoded. :type body: str :keyword encoding: The encoding type to be used. Must be a supported codec listed in self.codecs. :type encoding: str :return: If encoding is specified, the decoded body is returned. If encoding is not specified, the body is returned unchanged. :rtype: str """ if encoding: return self.codecs.get(encoding).decode(body) return body def typeof(self, exchange, default='direct'): """Get the exchange type. Lookup and return the exchange type for an exchange specified by name. Exchange types are expected to be 'direct', 'topic', and 'fanout', which correspond with exchange functionality as specified in AMQP 0-10 and earlier. If the exchange cannot be found, the default exchange type is returned. :param exchange: The exchange to have its type lookup up. :type exchange: str :keyword default: The type of exchange to assume if the exchange does not exist. :type default: str :return: The exchange type either 'direct', 'topic', or 'fanout'. :rtype: str """ qpid_exchange = self._broker.getExchange(exchange) if qpid_exchange: qpid_exchange_attributes = qpid_exchange.getAttributes() return qpid_exchange_attributes['type'] else: return default class Connection: """Qpid Connection. Encapsulate a connection object for the :class:`~kombu.transport.qpid.Transport`. :param host: The host that connections should connect to. :param port: The port that connection should connect to. :param username: The username that connections should connect with. Optional. :param password: The password that connections should connect with. Optional but requires a username. :param transport: The transport type that connections should use. Either 'tcp', or 'ssl' are expected as values. :param timeout: the timeout used when a Connection connects to the broker. :param sasl_mechanisms: The sasl authentication mechanism type to use. refer to SASL documentation for an explanation of valid values. .. note:: qpid.messaging has an AuthenticationFailure exception type, but instead raises a ConnectionError with a message that indicates an authentication failure occurred in those situations. ConnectionError is listed as a recoverable error type, so kombu will attempt to retry if a ConnectionError is raised. Retrying the operation without adjusting the credentials is not correct, so this method specifically checks for a ConnectionError that indicates an Authentication Failure occurred. In those situations, the error type is mutated while preserving the original message and raised so kombu will allow the exception to not be considered recoverable. A connection object is created by a :class:`~kombu.transport.qpid.Transport` during a call to :meth:`~kombu.transport.qpid.Transport.establish_connection`. The :class:`~kombu.transport.qpid.Transport` passes in connection options as keywords that should be used for any connections created. Each :class:`~kombu.transport.qpid.Transport` creates exactly one Connection. A Connection object maintains a reference to a :class:`~qpid.messaging.endpoints.Connection` which can be accessed through a bound getter method named :meth:`get_qpid_connection` method. Each Channel uses a the Connection for each :class:`~qpidtoollibs.BrokerAgent`, and the Transport maintains a session for all senders and receivers. The Connection object is also responsible for maintaining the dictionary of references to callbacks that should be called when messages are received. These callbacks are saved in _callbacks, and keyed on the queue name associated with the received message. The _callbacks are setup in :meth:`Channel.basic_consume`, removed in :meth:`Channel.basic_cancel`, and called in :meth:`Transport.drain_events`. The following keys are expected to be passed in as keyword arguments at a minimum: All keyword arguments are collected into the connection_options dict and passed directly through to :meth:`qpid.messaging.endpoints.Connection.establish`. """ # A class reference to the :class:`Channel` object Channel = Channel def __init__(self, **connection_options): self.connection_options = connection_options self.channels = [] self._callbacks = {} self._qpid_conn = None establish = qpid.messaging.Connection.establish # There are several inconsistent behaviors in the sasl libraries # used on different systems. Although qpid.messaging allows # multiple space separated sasl mechanisms, this implementation # only advertises one type to the server. These are either # ANONYMOUS, PLAIN, or an overridden value specified by the user. sasl_mech = connection_options['sasl_mechanisms'] try: msg = _('Attempting to connect to qpid with ' 'SASL mechanism %s') % sasl_mech logger.debug(msg) self._qpid_conn = establish(**self.connection_options) # connection was successful if we got this far msg = _('Connected to qpid with SASL ' 'mechanism %s') % sasl_mech logger.info(msg) except ConnectionError as conn_exc: # if we get one of these errors, do not raise an exception. # Raising will cause the connection to be retried. Instead, # just continue on to the next mech. coded_as_auth_failure = getattr(conn_exc, 'code', None) == 320 contains_auth_fail_text = \ 'Authentication failed' in conn_exc.text contains_mech_fail_text = \ 'sasl negotiation failed: no mechanism agreed' \ in conn_exc.text contains_mech_unavail_text = 'no mechanism available' \ in conn_exc.text if coded_as_auth_failure or \ contains_auth_fail_text or contains_mech_fail_text or \ contains_mech_unavail_text: msg = _('Unable to connect to qpid with SASL ' 'mechanism %s') % sasl_mech logger.error(msg) raise AuthenticationFailure(sys.exc_info()[1]) raise def get_qpid_connection(self): """Return the existing connection (singleton). :return: The existing qpid.messaging.Connection :rtype: :class:`qpid.messaging.endpoints.Connection` """ return self._qpid_conn def close(self): """Close the connection. Closing the connection will close all associated session, senders, or receivers used by the Connection. """ self._qpid_conn.close() def close_channel(self, channel): """Close a Channel. Close a channel specified by a reference to the :class:`~kombu.transport.qpid.Channel` object. :param channel: Channel that should be closed. :type channel: :class:`~kombu.transport.qpid.Channel`. """ try: self.channels.remove(channel) except ValueError: pass finally: channel.connection = None class Transport(base.Transport): """Kombu native transport for a Qpid broker. Provide a native transport for Kombu that allows consumers and producers to read and write messages to/from a broker. This Transport is capable of supporting both synchronous and asynchronous reading. All writes are synchronous through the :class:`Channel` objects that support this Transport. Asynchronous reads are done using a call to :meth:`drain_events`, which synchronously reads messages that were fetched asynchronously, and then handles them through calls to the callback handlers maintained on the :class:`Connection` object. The Transport also provides methods to establish and close a connection to the broker. This Transport establishes a factory-like pattern that allows for singleton pattern to consolidate all Connections into a single one. The Transport can create :class:`Channel` objects to communicate with the broker with using the :meth:`create_channel` method. The Transport identifies recoverable connection errors and recoverable channel errors according to the Kombu 3.0 interface. These exception are listed as tuples and store in the Transport class attribute `recoverable_connection_errors` and `recoverable_channel_errors` respectively. Any exception raised that is not a member of one of these tuples is considered non-recoverable. This allows Kombu support for automatic retry of certain operations to function correctly. For backwards compatibility to the pre Kombu 3.0 exception interface, the recoverable errors are also listed as `connection_errors` and `channel_errors`. """ # Reference to the class that should be used as the Connection object Connection = Connection # This Transport does not specify a polling interval. polling_interval = None # This Transport does support the Celery asynchronous event model. implements = virtual.Transport.implements.extend( asynchronous=True, exchange_type=frozenset(['direct', 'topic', 'fanout']), ) # The driver type and name for identification purposes. driver_type = 'qpid' driver_name = 'qpid' # Exceptions that can be recovered from, but where the connection must be # closed and re-established first. recoverable_connection_errors = ( ConnectionError, select.error, ) # Exceptions that can be automatically recovered from without # re-establishing the connection. recoverable_channel_errors = ( NotFound, ) # Support the pre 3.0 Kombu exception labeling interface which treats # connection_errors and channel_errors both as recoverable via a # reconnect. connection_errors = recoverable_connection_errors channel_errors = recoverable_channel_errors def __init__(self, *args, **kwargs): self.verify_runtime_environment() super().__init__(*args, **kwargs) self.use_async_interface = False def verify_runtime_environment(self): """Verify that the runtime environment is acceptable. This method is called as part of __init__ and raises a RuntimeError in Python3 or PyPI environments. This module is not compatible with Python3 or PyPI. The RuntimeError identifies this to the user up front along with suggesting Python 2.6+ be used instead. This method also checks that the dependencies qpidtoollibs and qpid.messaging are installed. If either one is not installed a RuntimeError is raised. :raises: RuntimeError if the runtime environment is not acceptable. """ if dependency_is_none(qpidtoollibs): raise RuntimeError( 'The Python package "qpidtoollibs" is missing. Install it ' 'with your package manager. You can also try `pip install ' 'qpid-tools`.') if dependency_is_none(qpid): raise RuntimeError( 'The Python package "qpid.messaging" is missing. Install it ' 'with your package manager. You can also try `pip install ' 'qpid-python`.') def _qpid_message_ready_handler(self, session): if self.use_async_interface: os.write(self._w, '0') def _qpid_async_exception_notify_handler(self, obj_with_exception, exc): if self.use_async_interface: os.write(self._w, 'e') def on_readable(self, connection, loop): """Handle any messages associated with this Transport. This method clears a single message from the externally monitored file descriptor by issuing a read call to the self.r file descriptor which removes a single '0' character that was placed into the pipe by the Qpid session message callback handler. Once a '0' is read, all available events are drained through a call to :meth:`drain_events`. The file descriptor self.r is modified to be non-blocking, ensuring that an accidental call to this method when no more messages will not cause indefinite blocking. Nothing is expected to be returned from :meth:`drain_events` because :meth:`drain_events` handles messages by calling callbacks that are maintained on the :class:`~kombu.transport.qpid.Connection` object. When :meth:`drain_events` returns, all associated messages have been handled. This method calls drain_events() which reads as many messages as are available for this Transport, and then returns. It blocks in the sense that reading and handling a large number of messages may take time, but it does not block waiting for a new message to arrive. When :meth:`drain_events` is called a timeout is not specified, which causes this behavior. One interesting behavior of note is where multiple messages are ready, and this method removes a single '0' character from self.r, but :meth:`drain_events` may handle an arbitrary amount of messages. In that case, extra '0' characters may be left on self.r to be read, where messages corresponding with those '0' characters have already been handled. The external epoll loop will incorrectly think additional data is ready for reading, and will call on_readable unnecessarily, once for each '0' to be read. Additional calls to :meth:`on_readable` produce no negative side effects, and will eventually clear out the symbols from the self.r file descriptor. If new messages show up during this draining period, they will also be properly handled. :param connection: The connection associated with the readable events, which contains the callbacks that need to be called for the readable objects. :type connection: kombu.transport.qpid.Connection :param loop: The asynchronous loop object that contains epoll like functionality. :type loop: kombu.asynchronous.Hub """ os.read(self.r, 1) try: self.drain_events(connection) except socket.timeout: pass def register_with_event_loop(self, connection, loop): """Register a file descriptor and callback with the loop. Register the callback self.on_readable to be called when an external epoll loop sees that the file descriptor registered is ready for reading. The file descriptor is created by this Transport, and is written to when a message is available. Because supports_ev == True, Celery expects to call this method to give the Transport an opportunity to register a read file descriptor for external monitoring by celery using an Event I/O notification mechanism such as epoll. A callback is also registered that is to be called once the external epoll loop is ready to handle the epoll event associated with messages that are ready to be handled for this Transport. The registration call is made exactly once per Transport after the Transport is instantiated. :param connection: A reference to the connection associated with this Transport. :type connection: kombu.transport.qpid.Connection :param loop: A reference to the external loop. :type loop: kombu.asynchronous.hub.Hub """ self.r, self._w = os.pipe() if fcntl is not None: fcntl.fcntl(self.r, fcntl.F_SETFL, os.O_NONBLOCK) self.use_async_interface = True loop.add_reader(self.r, self.on_readable, connection, loop) def establish_connection(self): """Establish a Connection object. Determines the correct options to use when creating any connections needed by this Transport, and create a :class:`Connection` object which saves those values for connections generated as they are needed. The options are a mixture of what is passed in through the creator of the Transport, and the defaults provided by :meth:`default_connection_params`. Options cover broker network settings, timeout behaviors, authentication, and identity verification settings. This method also creates and stores a :class:`~qpid.messaging.endpoints.Session` using the :class:`~qpid.messaging.endpoints.Connection` created by this method. The Session is stored on self. :return: The created :class:`Connection` object is returned. :rtype: :class:`Connection` """ conninfo = self.client for name, default_value in self.default_connection_params.items(): if not getattr(conninfo, name, None): setattr(conninfo, name, default_value) if conninfo.ssl: conninfo.qpid_transport = 'ssl' conninfo.transport_options['ssl_keyfile'] = conninfo.ssl[ 'keyfile'] conninfo.transport_options['ssl_certfile'] = conninfo.ssl[ 'certfile'] conninfo.transport_options['ssl_trustfile'] = conninfo.ssl[ 'ca_certs'] if conninfo.ssl['cert_reqs'] == ssl.CERT_REQUIRED: conninfo.transport_options['ssl_skip_hostname_check'] = False else: conninfo.transport_options['ssl_skip_hostname_check'] = True else: conninfo.qpid_transport = 'tcp' credentials = {} if conninfo.login_method is None: if conninfo.userid is not None and conninfo.password is not None: sasl_mech = 'PLAIN' credentials['username'] = conninfo.userid credentials['password'] = conninfo.password elif conninfo.userid is None and conninfo.password is not None: raise Exception( 'Password configured but no username. SASL PLAIN ' 'requires a username when using a password.') elif conninfo.userid is not None and conninfo.password is None: raise Exception( 'Username configured but no password. SASL PLAIN ' 'requires a password when using a username.') else: sasl_mech = 'ANONYMOUS' else: sasl_mech = conninfo.login_method if conninfo.userid is not None: credentials['username'] = conninfo.userid opts = { 'host': conninfo.hostname, 'port': conninfo.port, 'sasl_mechanisms': sasl_mech, 'timeout': conninfo.connect_timeout, 'transport': conninfo.qpid_transport } opts.update(credentials) opts.update(conninfo.transport_options) conn = self.Connection(**opts) conn.client = self.client self.session = conn.get_qpid_connection().session() self.session.set_message_received_notify_handler( self._qpid_message_ready_handler ) conn.get_qpid_connection().set_async_exception_notify_handler( self._qpid_async_exception_notify_handler ) self.session.set_async_exception_notify_handler( self._qpid_async_exception_notify_handler ) return conn def close_connection(self, connection): """Close the :class:`Connection` object. :param connection: The Connection that should be closed. :type connection: :class:`kombu.transport.qpid.Connection` """ connection.close() def drain_events(self, connection, timeout=0, **kwargs): """Handle and call callbacks for all ready Transport messages. Drains all events that are ready from all :class:`~qpid.messaging.endpoints.Receiver` that are asynchronously fetching messages. For each drained message, the message is called to the appropriate callback. Callbacks are organized by queue name. :param connection: The :class:`~kombu.transport.qpid.Connection` that contains the callbacks, indexed by queue name, which will be called by this method. :type connection: kombu.transport.qpid.Connection :keyword timeout: The timeout that limits how long this method will run for. The timeout could interrupt a blocking read that is waiting for a new message, or cause this method to return before all messages are drained. Defaults to 0. :type timeout: int """ start_time = monotonic() elapsed_time = -1 while elapsed_time < timeout: try: receiver = self.session.next_receiver(timeout=timeout) message = receiver.fetch() queue = receiver.source except QpidEmpty: raise socket.timeout() else: connection._callbacks[queue](message) elapsed_time = monotonic() - start_time raise socket.timeout() def create_channel(self, connection): """Create and return a :class:`~kombu.transport.qpid.Channel`. Creates a new channel, and appends the channel to the list of channels known by the Connection. Once the new channel is created, it is returned. :param connection: The connection that should support the new :class:`~kombu.transport.qpid.Channel`. :type connection: kombu.transport.qpid.Connection :return: The new Channel that is made. :rtype: :class:`kombu.transport.qpid.Channel`. """ channel = connection.Channel(connection, self) connection.channels.append(channel) return channel @property def default_connection_params(self): """Return a dict with default connection parameters. These connection parameters will be used whenever the creator of Transport does not specify a required parameter. :return: A dict containing the default parameters. :rtype: dict """ return { 'hostname': 'localhost', 'port': 5672, } def __del__(self): """Ensure file descriptors opened in __init__() are closed.""" if getattr(self, 'use_async_interface', False): for fd in (self.r, self._w): try: os.close(fd) except OSError: # ignored pass kombu-5.5.3/kombu/transport/redis.py000066400000000000000000001401721477772317200175200ustar00rootroot00000000000000"""Redis transport module for Kombu. Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: Yes * Supports Priority: Yes * Supports TTL: No Connection String ================= Connection string has the following format: .. code-block:: redis://[USER:PASSWORD@]REDIS_ADDRESS[:PORT][/VIRTUALHOST] rediss://[USER:PASSWORD@]REDIS_ADDRESS[:PORT][/VIRTUALHOST] To use sentinel for dynamic Redis discovery, the connection string has following format: .. code-block:: sentinel://[USER:PASSWORD@]SENTINEL_ADDRESS[:PORT] Transport Options ================= * ``sep`` * ``ack_emulation``: (bool) If set to True transport will simulate Acknowledge of AMQP protocol. * ``unacked_key`` * ``unacked_index_key`` * ``unacked_mutex_key`` * ``unacked_mutex_expire`` * ``visibility_timeout`` * ``unacked_restore_limit`` * ``fanout_prefix`` * ``fanout_patterns`` * ``global_keyprefix``: (str) The global key prefix to be prepended to all keys used by Kombu * ``socket_timeout`` * ``socket_connect_timeout`` * ``socket_keepalive`` * ``socket_keepalive_options`` * ``queue_order_strategy`` * ``max_connections`` * ``health_check_interval`` * ``retry_on_timeout`` * ``priority_steps`` """ from __future__ import annotations import functools import numbers import socket from bisect import bisect from collections import namedtuple from contextlib import contextmanager from queue import Empty from time import time from vine import promise from kombu.exceptions import InconsistencyError, VersionMismatch from kombu.log import get_logger from kombu.utils.compat import register_after_fork from kombu.utils.encoding import bytes_to_str from kombu.utils.eventio import ERR, READ, poll from kombu.utils.functional import accepts_argument from kombu.utils.json import dumps, loads from kombu.utils.objects import cached_property from kombu.utils.scheduling import cycle_by_name from kombu.utils.url import _parse_url from . import virtual try: import redis except ImportError: # pragma: no cover redis = None try: from redis import sentinel except ImportError: # pragma: no cover sentinel = None logger = get_logger('kombu.transport.redis') crit, warning = logger.critical, logger.warning DEFAULT_PORT = 6379 DEFAULT_DB = 0 DEFAULT_HEALTH_CHECK_INTERVAL = 25 PRIORITY_STEPS = [0, 3, 6, 9] error_classes_t = namedtuple('error_classes_t', ( 'connection_errors', 'channel_errors', )) # This implementation may seem overly complex, but I assure you there is # a good reason for doing it this way. # # Consuming from several connections enables us to emulate channels, # which means we can have different service guarantees for individual # channels. # # So we need to consume messages from multiple connections simultaneously, # and using epoll means we don't have to do so using multiple threads. # # Also it means we can easily use PUBLISH/SUBSCRIBE to do fanout # exchanges (broadcast), as an alternative to pushing messages to fanout-bound # queues manually. def get_redis_error_classes(): """Return tuple of redis error classes.""" from redis import exceptions # This exception suddenly changed name between redis-py versions if hasattr(exceptions, 'InvalidData'): DataError = exceptions.InvalidData else: DataError = exceptions.DataError return error_classes_t( (virtual.Transport.connection_errors + ( InconsistencyError, socket.error, IOError, OSError, exceptions.ConnectionError, exceptions.BusyLoadingError, exceptions.AuthenticationError, exceptions.TimeoutError)), (virtual.Transport.channel_errors + ( DataError, exceptions.InvalidResponse, exceptions.ResponseError)), ) def get_redis_ConnectionError(): """Return the redis ConnectionError exception class.""" from redis import exceptions return exceptions.ConnectionError class MutexHeld(Exception): """Raised when another party holds the lock.""" @contextmanager def Mutex(client, name, expire): """Acquire redis lock in non blocking way. Raise MutexHeld if not successful. """ lock = client.lock(name, timeout=expire) lock_acquired = False try: lock_acquired = lock.acquire(blocking=False) if lock_acquired: yield else: raise MutexHeld() finally: if lock_acquired: try: lock.release() except redis.exceptions.LockNotOwnedError: # when lock is expired pass def _after_fork_cleanup_channel(channel): channel._after_fork() class GlobalKeyPrefixMixin: """Mixin to provide common logic for global key prefixing. Overriding all the methods used by Kombu with the same key prefixing logic would be cumbersome and inefficient. Hence, we override the command execution logic that is called by all commands. """ PREFIXED_SIMPLE_COMMANDS = [ "HDEL", "HGET", "HLEN", "HSET", "LLEN", "LPUSH", "PUBLISH", "RPUSH", "RPOP", "SADD", "SREM", "SET", "SMEMBERS", "ZADD", "ZREM", "ZREVRANGEBYSCORE", ] PREFIXED_COMPLEX_COMMANDS = { "DEL": {"args_start": 0, "args_end": None}, "BRPOP": {"args_start": 0, "args_end": -1}, "EVALSHA": {"args_start": 2, "args_end": 3}, "WATCH": {"args_start": 0, "args_end": None}, } def _prefix_args(self, args): args = list(args) command = args.pop(0) if command in self.PREFIXED_SIMPLE_COMMANDS: args[0] = self.global_keyprefix + str(args[0]) elif command in self.PREFIXED_COMPLEX_COMMANDS: args_start = self.PREFIXED_COMPLEX_COMMANDS[command]["args_start"] args_end = self.PREFIXED_COMPLEX_COMMANDS[command]["args_end"] pre_args = args[:args_start] if args_start > 0 else [] post_args = [] if args_end is not None: post_args = args[args_end:] args = pre_args + [ self.global_keyprefix + str(arg) for arg in args[args_start:args_end] ] + post_args return [command, *args] def parse_response(self, connection, command_name, **options): """Parse a response from the Redis server. Method wraps ``redis.parse_response()`` to remove prefixes of keys returned by redis command. """ ret = super().parse_response(connection, command_name, **options) if command_name == 'BRPOP' and ret: key, value = ret key = key[len(self.global_keyprefix):] return key, value return ret def execute_command(self, *args, **kwargs): return super().execute_command(*self._prefix_args(args), **kwargs) def pipeline(self, transaction=True, shard_hint=None): return PrefixedRedisPipeline( self.connection_pool, self.response_callbacks, transaction, shard_hint, global_keyprefix=self.global_keyprefix, ) class PrefixedStrictRedis(GlobalKeyPrefixMixin, redis.Redis): """Returns a ``StrictRedis`` client that prefixes the keys it uses.""" def __init__(self, *args, **kwargs): self.global_keyprefix = kwargs.pop('global_keyprefix', '') redis.Redis.__init__(self, *args, **kwargs) def pubsub(self, **kwargs): return PrefixedRedisPubSub( self.connection_pool, global_keyprefix=self.global_keyprefix, **kwargs, ) class PrefixedRedisPipeline(GlobalKeyPrefixMixin, redis.client.Pipeline): """Custom Redis pipeline that takes global_keyprefix into consideration. As the ``PrefixedStrictRedis`` client uses the `global_keyprefix` to prefix the keys it uses, the pipeline called by the client must be able to prefix the keys as well. """ def __init__(self, *args, **kwargs): self.global_keyprefix = kwargs.pop('global_keyprefix', '') redis.client.Pipeline.__init__(self, *args, **kwargs) class PrefixedRedisPubSub(redis.client.PubSub): """Redis pubsub client that takes global_keyprefix into consideration.""" PUBSUB_COMMANDS = ( "SUBSCRIBE", "UNSUBSCRIBE", "PSUBSCRIBE", "PUNSUBSCRIBE", ) def __init__(self, *args, **kwargs): self.global_keyprefix = kwargs.pop('global_keyprefix', '') super().__init__(*args, **kwargs) def _prefix_args(self, args): args = list(args) command = args.pop(0) if command in self.PUBSUB_COMMANDS: args = [ self.global_keyprefix + str(arg) for arg in args ] return [command, *args] def parse_response(self, *args, **kwargs): """Parse a response from the Redis server. Method wraps ``PubSub.parse_response()`` to remove prefixes of keys returned by redis command. """ ret = super().parse_response(*args, **kwargs) if ret is None: return ret # response formats # SUBSCRIBE and UNSUBSCRIBE # -> [message type, channel, message] # PSUBSCRIBE and PUNSUBSCRIBE # -> [message type, pattern, channel, message] message_type, *channels, message = ret return [ message_type, *[channel[len(self.global_keyprefix):] for channel in channels], message, ] def execute_command(self, *args, **kwargs): return super().execute_command(*self._prefix_args(args), **kwargs) class QoS(virtual.QoS): """Redis Ack Emulation.""" restore_at_shutdown = True def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._vrestore_count = 0 def append(self, message, delivery_tag): delivery = message.delivery_info EX, RK = delivery['exchange'], delivery['routing_key'] # TODO: Remove this once we solely on Redis-py 3.0.0+ if redis.VERSION[0] >= 3: # Redis-py changed the format of zadd args in v3.0.0 zadd_args = [{delivery_tag: time()}] else: zadd_args = [time(), delivery_tag] with self.pipe_or_acquire() as pipe: pipe.zadd(self.unacked_index_key, *zadd_args) \ .hset(self.unacked_key, delivery_tag, dumps([message._raw, EX, RK])) \ .execute() super().append(message, delivery_tag) def restore_unacked(self, client=None): with self.channel.conn_or_acquire(client) as client: for tag in self._delivered: self.restore_by_tag(tag, client=client) self._delivered.clear() def ack(self, delivery_tag): self._remove_from_indices(delivery_tag).execute() super().ack(delivery_tag) def reject(self, delivery_tag, requeue=False): if requeue: self.restore_by_tag(delivery_tag, leftmost=True) else: self._remove_from_indices(delivery_tag).execute() super().ack(delivery_tag) @contextmanager def pipe_or_acquire(self, pipe=None, client=None): if pipe: yield pipe else: with self.channel.conn_or_acquire(client) as client: yield client.pipeline() def _remove_from_indices(self, delivery_tag, pipe=None): with self.pipe_or_acquire(pipe) as pipe: return pipe.zrem(self.unacked_index_key, delivery_tag) \ .hdel(self.unacked_key, delivery_tag) def restore_visible(self, start=0, num=10, interval=10): self._vrestore_count += 1 if (self._vrestore_count - 1) % interval: return with self.channel.conn_or_acquire() as client: ceil = time() - self.visibility_timeout try: with Mutex(client, self.unacked_mutex_key, self.unacked_mutex_expire): visible = client.zrevrangebyscore( self.unacked_index_key, ceil, 0, start=num and start, num=num, withscores=True) for tag, score in visible or []: self.restore_by_tag(tag, client) except MutexHeld: pass def restore_by_tag(self, tag, client=None, leftmost=False): def restore_transaction(pipe): p = pipe.hget(self.unacked_key, tag) pipe.multi() self._remove_from_indices(tag, pipe) if p: M, EX, RK = loads(bytes_to_str(p)) # json is unicode self.channel._do_restore_message(M, EX, RK, pipe, leftmost) with self.channel.conn_or_acquire(client) as client: client.transaction(restore_transaction, self.unacked_key) @cached_property def unacked_key(self): return self.channel.unacked_key @cached_property def unacked_index_key(self): return self.channel.unacked_index_key @cached_property def unacked_mutex_key(self): return self.channel.unacked_mutex_key @cached_property def unacked_mutex_expire(self): return self.channel.unacked_mutex_expire @cached_property def visibility_timeout(self): return self.channel.visibility_timeout class MultiChannelPoller: """Async I/O poller for Redis transport.""" eventflags = READ | ERR #: Set by :meth:`get` while reading from the socket. _in_protected_read = False #: Set of one-shot callbacks to call after reading from socket. after_read = None def __init__(self): # active channels self._channels = set() # file descriptor -> channel map. self._fd_to_chan = {} # channel -> socket map self._chan_to_sock = {} # poll implementation (epoll/kqueue/select) self.poller = poll() # one-shot callbacks called after reading from socket. self.after_read = set() def close(self): for fd in self._chan_to_sock.values(): try: self.poller.unregister(fd) except (KeyError, ValueError): pass self._channels.clear() self._fd_to_chan.clear() self._chan_to_sock.clear() def add(self, channel): self._channels.add(channel) def discard(self, channel): self._channels.discard(channel) def _on_connection_disconnect(self, connection): try: self.poller.unregister(connection._sock) except (AttributeError, TypeError): pass def _register(self, channel, client, type): if (channel, client, type) in self._chan_to_sock: self._unregister(channel, client, type) if client.connection._sock is None: # not connected yet. client.connection.connect() sock = client.connection._sock self._fd_to_chan[sock.fileno()] = (channel, type) self._chan_to_sock[(channel, client, type)] = sock self.poller.register(sock, self.eventflags) def _unregister(self, channel, client, type): self.poller.unregister(self._chan_to_sock[(channel, client, type)]) def _client_registered(self, channel, client, cmd): if getattr(client, 'connection', None) is None: client.connection = client.connection_pool.get_connection('_') return (client.connection._sock is not None and (channel, client, cmd) in self._chan_to_sock) def _register_BRPOP(self, channel): """Enable BRPOP mode for channel.""" ident = channel, channel.client, 'BRPOP' if not self._client_registered(channel, channel.client, 'BRPOP'): channel._in_poll = False self._register(*ident) if not channel._in_poll: # send BRPOP channel._brpop_start() def _register_LISTEN(self, channel): """Enable LISTEN mode for channel.""" if not self._client_registered(channel, channel.subclient, 'LISTEN'): channel._in_listen = False self._register(channel, channel.subclient, 'LISTEN') if not channel._in_listen: channel._subscribe() # send SUBSCRIBE def on_poll_start(self): for channel in self._channels: if channel.active_queues: # BRPOP mode? if channel.qos.can_consume(): self._register_BRPOP(channel) if channel.active_fanout_queues: # LISTEN mode? self._register_LISTEN(channel) def on_poll_init(self, poller): self.poller = poller for channel in self._channels: return channel.qos.restore_visible( num=channel.unacked_restore_limit, ) def maybe_restore_messages(self): for channel in self._channels: if channel.active_queues: # only need to do this once, as they are not local to channel. return channel.qos.restore_visible( num=channel.unacked_restore_limit, ) def maybe_check_subclient_health(self): for channel in self._channels: # only if subclient property is cached client = channel.__dict__.get('subclient') if client is not None \ and callable(getattr(client, 'check_health', None)): client.check_health() def on_readable(self, fileno): chan, type = self._fd_to_chan[fileno] if chan.qos.can_consume(): chan.handlers[type]() def handle_event(self, fileno, event): if event & READ: return self.on_readable(fileno), self elif event & ERR: chan, type = self._fd_to_chan[fileno] chan._poll_error(type) def get(self, callback, timeout=None): self._in_protected_read = True try: for channel in self._channels: if channel.active_queues: # BRPOP mode? if channel.qos.can_consume(): self._register_BRPOP(channel) if channel.active_fanout_queues: # LISTEN mode? self._register_LISTEN(channel) events = self.poller.poll(timeout) if events: for fileno, event in events: ret = self.handle_event(fileno, event) if ret: return # - no new data, so try to restore messages. # - reset active redis commands. self.maybe_restore_messages() raise Empty() finally: self._in_protected_read = False while self.after_read: try: fun = self.after_read.pop() except KeyError: break else: fun() @property def fds(self): return self._fd_to_chan class Channel(virtual.Channel): """Redis Channel.""" QoS = QoS _client = None _subclient = None _closing = False supports_fanout = True keyprefix_queue = '_kombu.binding.%s' keyprefix_fanout = '/{db}.' sep = '\x06\x16' _in_poll = False _in_listen = False _fanout_queues = {} ack_emulation = True unacked_key = 'unacked' unacked_index_key = 'unacked_index' unacked_mutex_key = 'unacked_mutex' unacked_mutex_expire = 300 # 5 minutes unacked_restore_limit = None visibility_timeout = 3600 # 1 hour priority_steps = PRIORITY_STEPS socket_timeout = None socket_connect_timeout = None socket_keepalive = None socket_keepalive_options = None retry_on_timeout = None max_connections = 10 health_check_interval = DEFAULT_HEALTH_CHECK_INTERVAL #: Transport option to disable fanout keyprefix. #: Can also be string, in which case it changes the default #: prefix ('/{db}.') into to something else. The prefix must #: include a leading slash and a trailing dot. #: #: Enabled by default since Kombu 4.x. #: Disable for backwards compatibility with Kombu 3.x. fanout_prefix = True #: If enabled the fanout exchange will support patterns in routing #: and binding keys (like a topic exchange but using PUB/SUB). #: #: Enabled by default since Kombu 4.x. #: Disable for backwards compatibility with Kombu 3.x. fanout_patterns = True #: The global key prefix will be prepended to all keys used #: by Kombu, which can be useful when a redis database is shared #: by different users. By default, no prefix is prepended. global_keyprefix = '' #: Order in which we consume from queues. #: #: Can be either string alias, or a cycle strategy class #: #: - ``round_robin`` #: (:class:`~kombu.utils.scheduling.round_robin_cycle`). #: #: Make sure each queue has an equal opportunity to be consumed from. #: #: - ``sorted`` #: (:class:`~kombu.utils.scheduling.sorted_cycle`). #: #: Consume from queues in alphabetical order. #: If the first queue in the sorted list always contains messages, #: then the rest of the queues will never be consumed from. #: #: - ``priority`` #: (:class:`~kombu.utils.scheduling.priority_cycle`). #: #: Consume from queues in original order, so that if the first #: queue always contains messages, the rest of the queues #: in the list will never be consumed from. #: #: The default is to consume from queues in round robin. queue_order_strategy = 'round_robin' _async_pool = None _pool = None from_transport_options = ( virtual.Channel.from_transport_options + ('sep', 'ack_emulation', 'unacked_key', 'unacked_index_key', 'unacked_mutex_key', 'unacked_mutex_expire', 'visibility_timeout', 'unacked_restore_limit', 'fanout_prefix', 'fanout_patterns', 'global_keyprefix', 'socket_timeout', 'socket_connect_timeout', 'socket_keepalive', 'socket_keepalive_options', 'queue_order_strategy', 'max_connections', 'health_check_interval', 'retry_on_timeout', 'priority_steps') # <-- do not add comma here! ) connection_class = redis.Connection if redis else None connection_class_ssl = redis.SSLConnection if redis else None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self.ack_emulation: # disable visibility timeout self.QoS = virtual.QoS self._registered = False self._queue_cycle = cycle_by_name(self.queue_order_strategy)() self.Client = self._get_client() self.ResponseError = self._get_response_error() self.active_fanout_queues = set() self.auto_delete_queues = set() self._fanout_to_queue = {} self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive} if self.fanout_prefix: if isinstance(self.fanout_prefix, str): self.keyprefix_fanout = self.fanout_prefix else: # previous versions did not set a fanout, so cannot enable # by default. self.keyprefix_fanout = '' # Evaluate connection. try: self.client.ping() except Exception: self._disconnect_pools() raise self.connection.cycle.add(self) # add to channel poller. # and set to true after successfully added channel to the poll. self._registered = True # copy errors, in case channel closed but threads still # are still waiting for data. self.connection_errors = self.connection.connection_errors if register_after_fork is not None: register_after_fork(self, _after_fork_cleanup_channel) def _after_fork(self): self._disconnect_pools() def _disconnect_pools(self): pool = self._pool async_pool = self._async_pool self._async_pool = self._pool = None if pool is not None: pool.disconnect() if async_pool is not None: async_pool.disconnect() def _on_connection_disconnect(self, connection): if self._in_poll is connection: self._in_poll = None if self._in_listen is connection: self._in_listen = None if self.connection and self.connection.cycle: self.connection.cycle._on_connection_disconnect(connection) def _do_restore_message(self, payload, exchange, routing_key, pipe, leftmost=False): try: try: payload['headers']['redelivered'] = True payload['properties']['delivery_info']['redelivered'] = True except KeyError: pass for queue in self._lookup(exchange, routing_key): pri = self._get_message_priority(payload, reverse=False) (pipe.lpush if leftmost else pipe.rpush)( self._q_for_pri(queue, pri), dumps(payload), ) except Exception: crit('Could not restore message: %r', payload, exc_info=True) def _restore(self, message, leftmost=False): if not self.ack_emulation: return super()._restore(message) tag = message.delivery_tag def restore_transaction(pipe): P = pipe.hget(self.unacked_key, tag) pipe.multi() pipe.hdel(self.unacked_key, tag) if P: M, EX, RK = loads(bytes_to_str(P)) # json is unicode self._do_restore_message(M, EX, RK, pipe, leftmost) with self.conn_or_acquire() as client: client.transaction(restore_transaction, self.unacked_key) def _restore_at_beginning(self, message): return self._restore(message, leftmost=True) def basic_consume(self, queue, *args, **kwargs): if queue in self._fanout_queues: exchange, _ = self._fanout_queues[queue] self.active_fanout_queues.add(queue) self._fanout_to_queue[exchange] = queue ret = super().basic_consume(queue, *args, **kwargs) # Update fair cycle between queues. # # We cycle between queues fairly to make sure that # each queue is equally likely to be consumed from, # so that a very busy queue will not block others. # # This works by using Redis's `BRPOP` command and # by rotating the most recently used queue to the # and of the list. See Kombu github issue #166 for # more discussion of this method. self._update_queue_cycle() return ret def basic_cancel(self, consumer_tag): # If we are busy reading messages we may experience # a race condition where a message is consumed after # canceling, so we must delay this operation until reading # is complete (Issue celery/celery#1773). connection = self.connection if connection: if connection.cycle._in_protected_read: return connection.cycle.after_read.add( promise(self._basic_cancel, (consumer_tag,)), ) return self._basic_cancel(consumer_tag) def _basic_cancel(self, consumer_tag): try: queue = self._tag_to_queue[consumer_tag] except KeyError: return try: self.active_fanout_queues.remove(queue) except KeyError: pass else: self._unsubscribe_from(queue) try: exchange, _ = self._fanout_queues[queue] self._fanout_to_queue.pop(exchange) except KeyError: pass ret = super().basic_cancel(consumer_tag) self._update_queue_cycle() return ret def _get_publish_topic(self, exchange, routing_key): if routing_key and self.fanout_patterns: return ''.join([self.keyprefix_fanout, exchange, '/', routing_key]) return ''.join([self.keyprefix_fanout, exchange]) def _get_subscribe_topic(self, queue): exchange, routing_key = self._fanout_queues[queue] return self._get_publish_topic(exchange, routing_key) def _subscribe(self): keys = [self._get_subscribe_topic(queue) for queue in self.active_fanout_queues] if not keys: return c = self.subclient if c.connection._sock is None: c.connection.connect() self._in_listen = c.connection c.psubscribe(keys) def _unsubscribe_from(self, queue): topic = self._get_subscribe_topic(queue) c = self.subclient if c.connection and c.connection._sock: c.unsubscribe([topic]) def _handle_message(self, client, r): if bytes_to_str(r[0]) == 'unsubscribe' and r[2] == 0: client.subscribed = False return if bytes_to_str(r[0]) == 'pmessage': type, pattern, channel, data = r[0], r[1], r[2], r[3] else: type, pattern, channel, data = r[0], None, r[1], r[2] return { 'type': type, 'pattern': pattern, 'channel': channel, 'data': data, } def _receive(self): c = self.subclient ret = [] try: ret.append(self._receive_one(c)) except Empty: pass while c.connection is not None and c.connection.can_read(timeout=0): ret.append(self._receive_one(c)) return any(ret) def _receive_one(self, c): response = None try: response = c.parse_response() except self.connection_errors: self._in_listen = None raise if isinstance(response, (list, tuple)): payload = self._handle_message(c, response) if bytes_to_str(payload['type']).endswith('message'): channel = bytes_to_str(payload['channel']) if payload['data']: if channel[0] == '/': _, _, channel = channel.partition('.') try: message = loads(bytes_to_str(payload['data'])) except (TypeError, ValueError): warning('Cannot process event on channel %r: %s', channel, repr(payload)[:4096], exc_info=1) raise Empty() exchange = channel.split('/', 1)[0] self.connection._deliver( message, self._fanout_to_queue[exchange]) return True def _brpop_start(self, timeout=1): queues = self._queue_cycle.consume(len(self.active_queues)) if not queues: return keys = [self._q_for_pri(queue, pri) for pri in self.priority_steps for queue in queues] + [timeout or 0] self._in_poll = self.client.connection command_args = ['BRPOP', *keys] if self.global_keyprefix: command_args = self.client._prefix_args(command_args) self.client.connection.send_command(*command_args) def _brpop_read(self, **options): try: try: dest__item = self.client.parse_response(self.client.connection, 'BRPOP', **options) except self.connection_errors: # if there's a ConnectionError, disconnect so the next # iteration will reconnect automatically. self.client.connection.disconnect() raise if dest__item: dest, item = dest__item dest = bytes_to_str(dest).rsplit(self.sep, 1)[0] self._queue_cycle.rotate(dest) self.connection._deliver(loads(bytes_to_str(item)), dest) return True else: raise Empty() finally: self._in_poll = None def _poll_error(self, type, **options): if type == 'LISTEN': self.subclient.parse_response() else: self.client.parse_response(self.client.connection, type) def _get(self, queue): with self.conn_or_acquire() as client: for pri in self.priority_steps: item = client.rpop(self._q_for_pri(queue, pri)) if item: return loads(bytes_to_str(item)) raise Empty() def _size(self, queue): with self.conn_or_acquire() as client: with client.pipeline() as pipe: for pri in self.priority_steps: pipe = pipe.llen(self._q_for_pri(queue, pri)) sizes = pipe.execute() return sum(size for size in sizes if isinstance(size, numbers.Integral)) def _q_for_pri(self, queue, pri): pri = self.priority(pri) if pri: return f"{queue}{self.sep}{pri}" return queue def priority(self, n): steps = self.priority_steps return steps[bisect(steps, n) - 1] def _put(self, queue, message, **kwargs): """Deliver message.""" pri = self._get_message_priority(message, reverse=False) with self.conn_or_acquire() as client: client.lpush(self._q_for_pri(queue, pri), dumps(message)) def _put_fanout(self, exchange, message, routing_key, **kwargs): """Deliver fanout message.""" with self.conn_or_acquire() as client: client.publish( self._get_publish_topic(exchange, routing_key), dumps(message), ) def _new_queue(self, queue, auto_delete=False, **kwargs): if auto_delete: self.auto_delete_queues.add(queue) def _queue_bind(self, exchange, routing_key, pattern, queue): if self.typeof(exchange).type == 'fanout': # Mark exchange as fanout. self._fanout_queues[queue] = ( exchange, routing_key.replace('#', '*'), ) with self.conn_or_acquire() as client: client.sadd(self.keyprefix_queue % (exchange,), self.sep.join([routing_key or '', pattern or '', queue or ''])) def _delete(self, queue, exchange, routing_key, pattern, *args, **kwargs): self.auto_delete_queues.discard(queue) with self.conn_or_acquire(client=kwargs.get('client')) as client: client.srem(self.keyprefix_queue % (exchange,), self.sep.join([routing_key or '', pattern or '', queue or ''])) with client.pipeline() as pipe: for pri in self.priority_steps: pipe = pipe.delete(self._q_for_pri(queue, pri)) pipe.execute() def _has_queue(self, queue, **kwargs): with self.conn_or_acquire() as client: with client.pipeline() as pipe: for pri in self.priority_steps: pipe = pipe.exists(self._q_for_pri(queue, pri)) return any(pipe.execute()) def get_table(self, exchange): key = self.keyprefix_queue % exchange with self.conn_or_acquire() as client: values = client.smembers(key) if not values: # table does not exists since all queues bound to the exchange # were deleted. We need just return empty list. return [] return [tuple(bytes_to_str(val).split(self.sep)) for val in values] def _purge(self, queue): with self.conn_or_acquire() as client: with client.pipeline() as pipe: for pri in self.priority_steps: priq = self._q_for_pri(queue, pri) pipe = pipe.llen(priq).delete(priq) sizes = pipe.execute() return sum(sizes[::2]) def close(self): self._closing = True if self._in_poll: try: self._brpop_read() except Empty: pass if not self.closed: # remove from channel poller. self.connection.cycle.discard(self) # delete fanout bindings client = self.__dict__.get('client') # only if property cached if client is not None: for queue in self._fanout_queues: if queue in self.auto_delete_queues: self.queue_delete(queue, client=client) self._disconnect_pools() self._close_clients() super().close() def _close_clients(self): # Close connections for attr in 'client', 'subclient': try: client = self.__dict__[attr] connection, client.connection = client.connection, None connection.disconnect() except (KeyError, AttributeError, self.ResponseError): pass def _prepare_virtual_host(self, vhost): if not isinstance(vhost, numbers.Integral): if not vhost or vhost == '/': vhost = DEFAULT_DB elif vhost.startswith('/'): vhost = vhost[1:] try: vhost = int(vhost) except ValueError: raise ValueError( 'Database is int between 0 and limit - 1, not {}'.format( vhost, )) return vhost def _filter_tcp_connparams(self, socket_keepalive=None, socket_keepalive_options=None, **params): return params def _connparams(self, asynchronous=False): conninfo = self.connection.client connparams = { 'host': conninfo.hostname or '127.0.0.1', 'port': conninfo.port or self.connection.default_port, 'virtual_host': conninfo.virtual_host, 'username': conninfo.userid, 'password': conninfo.password, 'max_connections': self.max_connections, 'socket_timeout': self.socket_timeout, 'socket_connect_timeout': self.socket_connect_timeout, 'socket_keepalive': self.socket_keepalive, 'socket_keepalive_options': self.socket_keepalive_options, 'health_check_interval': self.health_check_interval, 'retry_on_timeout': self.retry_on_timeout, } conn_class = self.connection_class # If the connection class does not support the `health_check_interval` # argument then remove it. if hasattr(conn_class, '__init__'): # check health_check_interval for the class and bases # classes classes = [conn_class] if hasattr(conn_class, '__bases__'): classes += list(conn_class.__bases__) for klass in classes: if accepts_argument(klass.__init__, 'health_check_interval'): break else: # no break connparams.pop('health_check_interval') if conninfo.ssl: # Connection(ssl={}) must be a dict containing the keys: # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile' try: connparams.update(conninfo.ssl) connparams['connection_class'] = self.connection_class_ssl except TypeError: pass host = connparams['host'] if '://' in host: scheme, _, _, username, password, path, query = _parse_url(host) if scheme == 'socket': connparams = self._filter_tcp_connparams(**connparams) connparams.update({ 'connection_class': redis.UnixDomainSocketConnection, 'path': '/' + path}, **query) connparams.pop('socket_connect_timeout', None) connparams.pop('socket_keepalive', None) connparams.pop('socket_keepalive_options', None) connparams['username'] = username connparams['password'] = password connparams.pop('host', None) connparams.pop('port', None) connparams['db'] = self._prepare_virtual_host( connparams.pop('virtual_host', None)) channel = self connection_cls = ( connparams.get('connection_class') or self.connection_class ) if asynchronous: class Connection(connection_cls): def disconnect(self, *args): super().disconnect(*args) # We remove the connection from the poller # only if it has been added properly. if channel._registered: channel._on_connection_disconnect(self) connection_cls = Connection connparams['connection_class'] = connection_cls return connparams def _create_client(self, asynchronous=False): if asynchronous: return self.Client(connection_pool=self.async_pool) return self.Client(connection_pool=self.pool) def _get_pool(self, asynchronous=False): params = self._connparams(asynchronous=asynchronous) self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db']) return redis.ConnectionPool(**params) def _get_client(self): if redis.VERSION < (3, 2, 0): raise VersionMismatch( 'Redis transport requires redis-py versions 3.2.0 or later. ' 'You have {0.__version__}'.format(redis)) if self.global_keyprefix: return functools.partial( PrefixedStrictRedis, global_keyprefix=self.global_keyprefix, ) return redis.Redis @contextmanager def conn_or_acquire(self, client=None): if client: yield client else: yield self._create_client() @property def pool(self): if self._pool is None: self._pool = self._get_pool() return self._pool @property def async_pool(self): if self._async_pool is None: self._async_pool = self._get_pool(asynchronous=True) return self._async_pool @cached_property def client(self): """Client used to publish messages, BRPOP etc.""" return self._create_client(asynchronous=True) @cached_property def subclient(self): """Pub/Sub connection used to consume fanout queues.""" client = self._create_client(asynchronous=True) return client.pubsub() def _update_queue_cycle(self): self._queue_cycle.update(self.active_queues) def _get_response_error(self): from redis import exceptions return exceptions.ResponseError @property def active_queues(self): """Set of queues being consumed from (excluding fanout queues).""" return {queue for queue in self._active_queues if queue not in self.active_fanout_queues} class Transport(virtual.Transport): """Redis Transport.""" Channel = Channel polling_interval = None # disable sleep between unsuccessful polls. default_port = DEFAULT_PORT driver_type = 'redis' driver_name = 'redis' implements = virtual.Transport.implements.extend( asynchronous=True, exchange_type=frozenset(['direct', 'topic', 'fanout']) ) if redis: connection_errors, channel_errors = get_redis_error_classes() def __init__(self, *args, **kwargs): if redis is None: raise ImportError('Missing redis library (pip install redis)') super().__init__(*args, **kwargs) # All channels share the same poller. self.cycle = MultiChannelPoller() def driver_version(self): return redis.__version__ def register_with_event_loop(self, connection, loop): cycle = self.cycle cycle.on_poll_init(loop.poller) cycle_poll_start = cycle.on_poll_start add_reader = loop.add_reader on_readable = self.on_readable def _on_disconnect(connection): if connection._sock: loop.remove(connection._sock) # must have started polling or this will break reconnection if cycle.fds: # stop polling in the event loop try: loop.on_tick.remove(on_poll_start) except KeyError: pass cycle._on_connection_disconnect = _on_disconnect def on_poll_start(): cycle_poll_start() [add_reader(fd, on_readable, fd) for fd in cycle.fds] loop.on_tick.add(on_poll_start) loop.call_repeatedly(10, cycle.maybe_restore_messages) health_check_interval = connection.client.transport_options.get( 'health_check_interval', DEFAULT_HEALTH_CHECK_INTERVAL ) loop.call_repeatedly( health_check_interval, cycle.maybe_check_subclient_health ) def on_readable(self, fileno): """Handle AIO event for one of our file descriptors.""" self.cycle.on_readable(fileno) if sentinel: class SentinelManagedSSLConnection( sentinel.SentinelManagedConnection, redis.SSLConnection): """Connect to a Redis server using Sentinel + TLS. Use Sentinel to identify which Redis server is the current master to connect to and when connecting to the Master server, use an SSL Connection. """ pass class SentinelChannel(Channel): """Channel with explicit Redis Sentinel knowledge. Broker url is supposed to look like: .. code-block:: sentinel://0.0.0.0:26379;sentinel://0.0.0.0:26380/... where each sentinel is separated by a `;`. Other arguments for the sentinel should come from the transport options (see `transport_options` of :class:`~kombu.connection.Connection`). You must provide at least one option in Transport options: * `master_name` - name of the redis group to poll Example: ------- .. code-block:: python >>> import kombu >>> c = kombu.Connection( 'sentinel://sentinel1:26379;sentinel://sentinel2:26379', transport_options={'master_name': 'mymaster'} ) >>> c.connect() """ from_transport_options = Channel.from_transport_options + ( 'master_name', 'min_other_sentinels', 'sentinel_kwargs') connection_class = sentinel.SentinelManagedConnection if sentinel else None connection_class_ssl = SentinelManagedSSLConnection if sentinel else None def _sentinel_managed_pool(self, asynchronous=False): connparams = self._connparams(asynchronous) additional_params = connparams.copy() additional_params.pop('host', None) additional_params.pop('port', None) sentinels = [] for url in self.connection.client.alt: url = _parse_url(url) if url.scheme == 'sentinel': port = url.port or self.connection.default_port sentinels.append((url.hostname, port)) # Fallback for when only one sentinel is provided. if not sentinels: sentinels.append((connparams['host'], connparams['port'])) sentinel_inst = sentinel.Sentinel( sentinels, min_other_sentinels=getattr(self, 'min_other_sentinels', 0), sentinel_kwargs=getattr(self, 'sentinel_kwargs', None), **additional_params) master_name = getattr(self, 'master_name', None) if master_name is None: raise ValueError( "'master_name' transport option must be specified." ) return sentinel_inst.master_for( master_name, redis.Redis, ).connection_pool def _get_pool(self, asynchronous=False): params = self._connparams(asynchronous=asynchronous) self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db']) return self._sentinel_managed_pool(asynchronous) class SentinelTransport(Transport): """Redis Sentinel Transport.""" default_port = 26379 Channel = SentinelChannel kombu-5.5.3/kombu/transport/sqlalchemy/000077500000000000000000000000001477772317200201755ustar00rootroot00000000000000kombu-5.5.3/kombu/transport/sqlalchemy/__init__.py000066400000000000000000000171761477772317200223220ustar00rootroot00000000000000"""SQLAlchemy Transport module for kombu. Kombu transport using SQL Database as the message store. Features ======== * Type: Virtual * Supports Direct: yes * Supports Topic: yes * Supports Fanout: no * Supports Priority: no * Supports TTL: no Connection String ================= .. code-block:: sqla+SQL_ALCHEMY_CONNECTION_STRING sqlalchemy+SQL_ALCHEMY_CONNECTION_STRING For details about ``SQL_ALCHEMY_CONNECTION_STRING`` see SQLAlchemy Engine Configuration documentation. Examples -------- .. code-block:: # PostgreSQL with default driver sqla+postgresql://scott:tiger@localhost/mydatabase # PostgreSQL with psycopg2 driver sqla+postgresql+psycopg2://scott:tiger@localhost/mydatabase # PostgreSQL with pg8000 driver sqla+postgresql+pg8000://scott:tiger@localhost/mydatabase # MySQL with default driver sqla+mysql://scott:tiger@localhost/foo # MySQL with mysqlclient driver (a maintained fork of MySQL-Python) sqla+mysql+mysqldb://scott:tiger@localhost/foo # MySQL with PyMySQL driver sqla+mysql+pymysql://scott:tiger@localhost/foo Transport Options ================= * ``queue_tablename``: Name of table storing queues. * ``message_tablename``: Name of table storing messages. Moreover parameters of :func:`sqlalchemy.create_engine()` function can be passed as transport options. """ from __future__ import annotations import threading from json import dumps, loads from queue import Empty from sqlalchemy import create_engine, text from sqlalchemy.exc import OperationalError from sqlalchemy.orm import sessionmaker from kombu.transport import virtual from kombu.utils import cached_property from kombu.utils.encoding import bytes_to_str from .models import Message as MessageBase from .models import ModelBase from .models import Queue as QueueBase from .models import class_registry, metadata # SQLAlchemy overrides != False to have special meaning and pep8 complains # flake8: noqa VERSION = (1, 4, 1) __version__ = '.'.join(map(str, VERSION)) _MUTEX = threading.RLock() class Channel(virtual.Channel): """The channel class.""" _session = None _engines = {} # engine cache def __init__(self, connection, **kwargs): self._configure_entity_tablenames(connection.client.transport_options) super().__init__(connection, **kwargs) def _configure_entity_tablenames(self, opts): self.queue_tablename = opts.get('queue_tablename', 'kombu_queue') self.message_tablename = opts.get('message_tablename', 'kombu_message') # # Define the model definitions. This registers the declarative # classes with the active SQLAlchemy metadata object. This *must* be # done prior to the ``create_engine`` call. # self.queue_cls and self.message_cls def _engine_from_config(self): conninfo = self.connection.client transport_options = conninfo.transport_options.copy() transport_options.pop('queue_tablename', None) transport_options.pop('message_tablename', None) transport_options.pop('callback', None) transport_options.pop('errback', None) transport_options.pop('max_retries', None) transport_options.pop('interval_start', None) transport_options.pop('interval_step', None) transport_options.pop('interval_max', None) transport_options.pop('retry_errors', None) return create_engine(conninfo.hostname, **transport_options) def _open(self): conninfo = self.connection.client if conninfo.hostname not in self._engines: with _MUTEX: if conninfo.hostname in self._engines: # Engine was created while we were waiting to # acquire the lock. return self._engines[conninfo.hostname] engine = self._engine_from_config() Session = sessionmaker(bind=engine) metadata.create_all(engine) self._engines[conninfo.hostname] = engine, Session return self._engines[conninfo.hostname] @property def session(self): if self._session is None: _, Session = self._open() self._session = Session() return self._session def _get_or_create(self, queue): obj = self.session.query(self.queue_cls) \ .filter(self.queue_cls.name == queue).first() if not obj: with _MUTEX: obj = self.session.query(self.queue_cls) \ .filter(self.queue_cls.name == queue).first() if obj: # Queue was created while we were waiting to # acquire the lock. return obj obj = self.queue_cls(queue) self.session.add(obj) try: self.session.commit() except OperationalError: self.session.rollback() return obj def _new_queue(self, queue, **kwargs): self._get_or_create(queue) def _put(self, queue, payload, **kwargs): obj = self._get_or_create(queue) message = self.message_cls(dumps(payload), obj) self.session.add(message) try: self.session.commit() except OperationalError: self.session.rollback() def _get(self, queue): obj = self._get_or_create(queue) if self.session.bind.name == 'sqlite': self.session.execute(text('BEGIN IMMEDIATE TRANSACTION')) try: msg = self.session.query(self.message_cls) \ .with_for_update() \ .filter(self.message_cls.queue_id == obj.id) \ .filter(self.message_cls.visible != False) \ .order_by(self.message_cls.sent_at) \ .order_by(self.message_cls.id) \ .limit(1) \ .first() if msg: msg.visible = False return loads(bytes_to_str(msg.payload)) raise Empty() finally: self.session.commit() def _query_all(self, queue): obj = self._get_or_create(queue) return self.session.query(self.message_cls) \ .filter(self.message_cls.queue_id == obj.id) def _purge(self, queue): count = self._query_all(queue).delete(synchronize_session=False) try: self.session.commit() except OperationalError: self.session.rollback() return count def _size(self, queue): return self._query_all(queue).count() def _declarative_cls(self, name, base, ns): if name not in class_registry: with _MUTEX: if name in class_registry: # Class was registered while we were waiting to # acquire the lock. return class_registry[name] return type(str(name), (base, ModelBase), ns) return class_registry[name] @cached_property def queue_cls(self): return self._declarative_cls( 'Queue', QueueBase, {'__tablename__': self.queue_tablename} ) @cached_property def message_cls(self): return self._declarative_cls( 'Message', MessageBase, {'__tablename__': self.message_tablename} ) class Transport(virtual.Transport): """The transport class.""" Channel = Channel can_parse_url = True default_port = 0 driver_type = 'sql' driver_name = 'sqlalchemy' connection_errors = (OperationalError, ) def driver_version(self): import sqlalchemy return sqlalchemy.__version__ kombu-5.5.3/kombu/transport/sqlalchemy/models.py000066400000000000000000000043761477772317200220440ustar00rootroot00000000000000"""Kombu transport using SQLAlchemy as the message store.""" from __future__ import annotations import datetime from sqlalchemy import (Boolean, Column, DateTime, ForeignKey, Index, Integer, Sequence, SmallInteger, String, Text) from sqlalchemy.orm import relationship from sqlalchemy.schema import MetaData try: from sqlalchemy.orm import declarative_base, declared_attr except ImportError: # TODO: Remove this once we drop support for SQLAlchemy < 1.4. from sqlalchemy.ext.declarative import declarative_base, declared_attr class_registry = {} metadata = MetaData() ModelBase = declarative_base(metadata=metadata, class_registry=class_registry) class Queue: """The queue class.""" __table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'} id = Column(Integer, Sequence('queue_id_sequence'), primary_key=True, autoincrement=True) name = Column(String(200), unique=True) def __init__(self, name): self.name = name def __str__(self): return f'' @declared_attr def messages(cls): return relationship('Message', backref='queue', lazy='noload') class Message: """The message class.""" __table_args__ = ( Index('ix_kombu_message_timestamp_id', 'timestamp', 'id'), {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'} ) id = Column(Integer, Sequence('message_id_sequence'), primary_key=True, autoincrement=True) visible = Column(Boolean, default=True, index=True) sent_at = Column('timestamp', DateTime, nullable=True, index=True, onupdate=datetime.datetime.now) payload = Column(Text, nullable=False) version = Column(SmallInteger, nullable=False, default=1) __mapper_args__ = {'version_id_col': version} def __init__(self, payload, queue): self.payload = payload self.queue = queue def __str__(self): return ''.format(self) @declared_attr def queue_id(self): return Column( Integer, ForeignKey( '%s.id' % class_registry['Queue'].__tablename__, name='FK_kombu_message_queue' ) ) kombu-5.5.3/kombu/transport/virtual/000077500000000000000000000000001477772317200175215ustar00rootroot00000000000000kombu-5.5.3/kombu/transport/virtual/__init__.py000066400000000000000000000007341477772317200216360ustar00rootroot00000000000000from __future__ import annotations from .base import (AbstractChannel, Base64, BrokerState, Channel, Empty, Management, Message, NotEquivalentError, QoS, Transport, UndeliverableWarning, binding_key_t, queue_binding_t) __all__ = ( 'Base64', 'NotEquivalentError', 'UndeliverableWarning', 'BrokerState', 'QoS', 'Message', 'AbstractChannel', 'Channel', 'Management', 'Transport', 'Empty', 'binding_key_t', 'queue_binding_t', ) kombu-5.5.3/kombu/transport/virtual/base.py000066400000000000000000001027661477772317200210210ustar00rootroot00000000000000"""Virtual transport implementation. Emulates the AMQ API for non-AMQ transports. """ from __future__ import annotations import base64 import socket import sys import warnings from array import array from collections import OrderedDict, defaultdict, namedtuple from itertools import count from multiprocessing.util import Finalize from queue import Empty from time import monotonic, sleep from typing import TYPE_CHECKING from amqp.protocol import queue_declare_ok_t from kombu.exceptions import ChannelError, ResourceError from kombu.log import get_logger from kombu.transport import base from kombu.utils.div import emergency_dump_state from kombu.utils.encoding import bytes_to_str, str_to_bytes from kombu.utils.scheduling import FairCycle from kombu.utils.uuid import uuid from .exchange import STANDARD_EXCHANGE_TYPES if TYPE_CHECKING: from types import TracebackType ARRAY_TYPE_H = 'H' UNDELIVERABLE_FMT = """\ Message could not be delivered: No queues bound to exchange {exchange!r} \ using binding key {routing_key!r}. """ NOT_EQUIVALENT_FMT = """\ Cannot redeclare exchange {0!r} in vhost {1!r} with \ different type, durable, autodelete or arguments value.\ """ W_NO_CONSUMERS = """\ Requeuing undeliverable message for queue %r: No consumers.\ """ RESTORING_FMT = 'Restoring {0!r} unacknowledged message(s)' RESTORE_PANIC_FMT = 'UNABLE TO RESTORE {0} MESSAGES: {1}' logger = get_logger(__name__) #: Key format used for queue argument lookups in BrokerState.bindings. binding_key_t = namedtuple('binding_key_t', ( 'queue', 'exchange', 'routing_key', )) #: BrokerState.queue_bindings generates tuples in this format. queue_binding_t = namedtuple('queue_binding_t', ( 'exchange', 'routing_key', 'arguments', )) class Base64: """Base64 codec.""" def encode(self, s): return bytes_to_str(base64.b64encode(str_to_bytes(s))) def decode(self, s): return base64.b64decode(str_to_bytes(s)) class NotEquivalentError(Exception): """Entity declaration is not equivalent to the previous declaration.""" class UndeliverableWarning(UserWarning): """The message could not be delivered to a queue.""" class BrokerState: """Broker state holds exchanges, queues and bindings.""" #: Mapping of exchange name to #: :class:`kombu.transport.virtual.exchange.ExchangeType` exchanges = None #: This is the actual bindings registry, used to store bindings and to #: test 'in' relationships in constant time. It has the following #: structure:: #: #: { #: (queue, exchange, routing_key): arguments, #: # ..., #: } bindings = None #: The queue index is used to access directly (constant time) #: all the bindings of a certain queue. It has the following structure:: #: #: { #: queue: { #: (queue, exchange, routing_key), #: # ..., #: }, #: # ..., #: } queue_index = None def __init__(self, exchanges=None): self.exchanges = {} if exchanges is None else exchanges self.bindings = {} self.queue_index = defaultdict(set) def clear(self): self.exchanges.clear() self.bindings.clear() self.queue_index.clear() def has_binding(self, queue, exchange, routing_key): return (queue, exchange, routing_key) in self.bindings def binding_declare(self, queue, exchange, routing_key, arguments): key = binding_key_t(queue, exchange, routing_key) self.bindings.setdefault(key, arguments) self.queue_index[queue].add(key) def binding_delete(self, queue, exchange, routing_key): key = binding_key_t(queue, exchange, routing_key) try: del self.bindings[key] except KeyError: pass else: self.queue_index[queue].remove(key) def queue_bindings_delete(self, queue): try: bindings = self.queue_index.pop(queue) except KeyError: pass else: [self.bindings.pop(binding, None) for binding in bindings] def queue_bindings(self, queue): return ( queue_binding_t(key.exchange, key.routing_key, self.bindings[key]) for key in self.queue_index[queue] ) class QoS: """Quality of Service guarantees. Only supports `prefetch_count` at this point. Arguments: --------- channel (ChannelT): Connection channel. prefetch_count (int): Initial prefetch count (defaults to 0). """ #: current prefetch count value prefetch_count = 0 #: :class:`~collections.OrderedDict` of active messages. #: *NOTE*: Can only be modified by the consuming thread. _delivered = None #: acks can be done by other threads than the consuming thread. #: Instead of a mutex, which doesn't perform well here, we mark #: the delivery tags as dirty, so subsequent calls to append() can remove #: them. _dirty = None #: If disabled, unacked messages won't be restored at shutdown. restore_at_shutdown = True def __init__(self, channel, prefetch_count=0): self.channel = channel self.prefetch_count = prefetch_count or 0 # Standard Python dictionaries do not support setting attributes # on the object, hence the use of OrderedDict self._delivered = OrderedDict() self._delivered.restored = False self._dirty = set() self._quick_ack = self._dirty.add self._quick_append = self._delivered.__setitem__ self._on_collect = Finalize( self, self.restore_unacked_once, exitpriority=1, ) def can_consume(self): """Return true if the channel can be consumed from. Used to ensure the client adhers to currently active prefetch limits. """ pcount = self.prefetch_count return not pcount or len(self._delivered) - len(self._dirty) < pcount def can_consume_max_estimate(self): """Return the maximum number of messages allowed to be returned. Returns an estimated number of messages that a consumer may be allowed to consume at once from the broker. This is used for services where bulk 'get message' calls are preferred to many individual 'get message' calls - like SQS. Returns ------- int: greater than zero. """ pcount = self.prefetch_count if pcount: return max(pcount - (len(self._delivered) - len(self._dirty)), 0) def append(self, message, delivery_tag): """Append message to transactional state.""" if self._dirty: self._flush() self._quick_append(delivery_tag, message) def get(self, delivery_tag): return self._delivered[delivery_tag] def _flush(self): """Flush dirty (acked/rejected) tags from.""" dirty = self._dirty delivered = self._delivered while 1: try: dirty_tag = dirty.pop() except KeyError: break delivered.pop(dirty_tag, None) def ack(self, delivery_tag): """Acknowledge message and remove from transactional state.""" self._quick_ack(delivery_tag) def reject(self, delivery_tag, requeue=False): """Remove from transactional state and requeue message.""" if requeue: self.channel._restore_at_beginning(self._delivered[delivery_tag]) self._quick_ack(delivery_tag) def restore_unacked(self): """Restore all unacknowledged messages.""" self._flush() delivered = self._delivered errors = [] restore = self.channel._restore pop_message = delivered.popitem while delivered: try: _, message = pop_message() except KeyError: # pragma: no cover break try: restore(message) except BaseException as exc: errors.append((exc, message)) delivered.clear() return errors def restore_unacked_once(self, stderr=None): """Restore all unacknowledged messages at shutdown/gc collect. Note: ---- Can only be called once for each instance, subsequent calls will be ignored. """ self._on_collect.cancel() self._flush() stderr = sys.stderr if stderr is None else stderr state = self._delivered if not self.restore_at_shutdown or not self.channel.do_restore: return if getattr(state, 'restored', None): assert not state return try: if state: print(RESTORING_FMT.format(len(self._delivered)), file=stderr) unrestored = self.restore_unacked() if unrestored: errors, messages = list(zip(*unrestored)) print(RESTORE_PANIC_FMT.format(len(errors), errors), file=stderr) emergency_dump_state(messages, stderr=stderr) finally: state.restored = True def restore_visible(self, *args, **kwargs): """Restore any pending unacknowledged messages. To be filled in for visibility_timeout style implementations. Note: ---- This is implementation optional, and currently only used by the Redis transport. """ class Message(base.Message): """Message object.""" def __init__(self, payload, channel=None, **kwargs): self._raw = payload properties = payload['properties'] body = payload.get('body') if body: body = channel.decode_body(body, properties.get('body_encoding')) super().__init__( body=body, channel=channel, delivery_tag=properties['delivery_tag'], content_type=payload.get('content-type'), content_encoding=payload.get('content-encoding'), headers=payload.get('headers'), properties=properties, delivery_info=properties.get('delivery_info'), postencode='utf-8', **kwargs) def serializable(self): props = self.properties body, _ = self.channel.encode_body(self.body, props.get('body_encoding')) headers = dict(self.headers) # remove compression header headers.pop('compression', None) return { 'body': body, 'properties': props, 'content-type': self.content_type, 'content-encoding': self.content_encoding, 'headers': headers, } class AbstractChannel: """Abstract channel interface. This is an abstract class defining the channel methods you'd usually want to implement in a virtual channel. Note: ---- Do not subclass directly, but rather inherit from :class:`Channel`. """ def _get(self, queue, timeout=None): """Get next message from `queue`.""" raise NotImplementedError('Virtual channels must implement _get') def _put(self, queue, message): """Put `message` onto `queue`.""" raise NotImplementedError('Virtual channels must implement _put') def _purge(self, queue): """Remove all messages from `queue`.""" raise NotImplementedError('Virtual channels must implement _purge') def _size(self, queue): """Return the number of messages in `queue` as an :class:`int`.""" return 0 def _delete(self, queue, *args, **kwargs): """Delete `queue`. Note: ---- This just purges the queue, if you need to do more you can override this method. """ self._purge(queue) def _new_queue(self, queue, **kwargs): """Create new queue. Note: ---- Your transport can override this method if it needs to do something whenever a new queue is declared. """ def _has_queue(self, queue, **kwargs): """Verify that queue exists. Returns ------- bool: Should return :const:`True` if the queue exists or :const:`False` otherwise. """ return True def _poll(self, cycle, callback, timeout=None): """Poll a list of queues for available messages.""" return cycle.get(callback) def _get_and_deliver(self, queue, callback): message = self._get(queue) callback(message, queue) class Channel(AbstractChannel, base.StdChannel): """Virtual channel. Arguments: --------- connection (ConnectionT): The transport instance this channel is part of. """ #: message class used. Message = Message #: QoS class used. QoS = QoS #: flag to restore unacked messages when channel #: goes out of scope. do_restore = True #: mapping of exchange types and corresponding classes. exchange_types = dict(STANDARD_EXCHANGE_TYPES) #: flag set if the channel supports fanout exchanges. supports_fanout = False #: Binary <-> ASCII codecs. codecs = {'base64': Base64()} #: Default body encoding. #: NOTE: ``transport_options['body_encoding']`` will override this value. body_encoding = 'base64' #: counter used to generate delivery tags for this channel. _delivery_tags = count(1) #: Optional queue where messages with no route is delivered. #: Set by ``transport_options['deadletter_queue']``. deadletter_queue = None # List of options to transfer from :attr:`transport_options`. from_transport_options = ('body_encoding', 'deadletter_queue') # Priority defaults default_priority = 0 min_priority = 0 max_priority = 9 def __init__(self, connection, **kwargs): self.connection = connection self._consumers = set() self._cycle = None self._tag_to_queue = {} self._active_queues = [] self._qos = None self.closed = False # instantiate exchange types self.exchange_types = { typ: cls(self) for typ, cls in self.exchange_types.items() } self.channel_id = self._get_free_channel_id() topts = self.connection.client.transport_options for opt_name in self.from_transport_options: try: setattr(self, opt_name, topts[opt_name]) except KeyError: pass def exchange_declare(self, exchange=None, type='direct', durable=False, auto_delete=False, arguments=None, nowait=False, passive=False): """Declare exchange.""" type = type or 'direct' exchange = exchange or 'amq.%s' % type if passive: if exchange not in self.state.exchanges: raise ChannelError( 'NOT_FOUND - no exchange {!r} in vhost {!r}'.format( exchange, self.connection.client.virtual_host or '/'), (50, 10), 'Channel.exchange_declare', '404', ) return try: prev = self.state.exchanges[exchange] if not self.typeof(exchange).equivalent(prev, exchange, type, durable, auto_delete, arguments): raise NotEquivalentError(NOT_EQUIVALENT_FMT.format( exchange, self.connection.client.virtual_host or '/')) except KeyError: self.state.exchanges[exchange] = { 'type': type, 'durable': durable, 'auto_delete': auto_delete, 'arguments': arguments or {}, 'table': [], } def exchange_delete(self, exchange, if_unused=False, nowait=False): """Delete `exchange` and all its bindings.""" for rkey, _, queue in self.get_table(exchange): self.queue_delete(queue, if_unused=True, if_empty=True) self.state.exchanges.pop(exchange, None) def queue_declare(self, queue=None, passive=False, **kwargs): """Declare queue.""" queue = queue or 'amq.gen-%s' % uuid() if passive and not self._has_queue(queue, **kwargs): raise ChannelError( 'NOT_FOUND - no queue {!r} in vhost {!r}'.format( queue, self.connection.client.virtual_host or '/'), (50, 10), 'Channel.queue_declare', '404', ) else: self._new_queue(queue, **kwargs) return queue_declare_ok_t(queue, self._size(queue), 0) def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): """Delete queue.""" if if_empty and self._size(queue): return for exchange, routing_key, args in self.state.queue_bindings(queue): meta = self.typeof(exchange).prepare_bind( queue, exchange, routing_key, args, ) self._delete(queue, exchange, *meta, **kwargs) self.state.queue_bindings_delete(queue) def after_reply_message_received(self, queue): self.queue_delete(queue) def exchange_bind(self, destination, source='', routing_key='', nowait=False, arguments=None): raise NotImplementedError('transport does not support exchange_bind') def exchange_unbind(self, destination, source='', routing_key='', nowait=False, arguments=None): raise NotImplementedError('transport does not support exchange_unbind') def queue_bind(self, queue, exchange=None, routing_key='', arguments=None, **kwargs): """Bind `queue` to `exchange` with `routing key`.""" exchange = exchange or 'amq.direct' if self.state.has_binding(queue, exchange, routing_key): return # Add binding: self.state.binding_declare(queue, exchange, routing_key, arguments) # Update exchange's routing table: table = self.state.exchanges[exchange].setdefault('table', []) meta = self.typeof(exchange).prepare_bind( queue, exchange, routing_key, arguments, ) table.append(meta) if self.supports_fanout: self._queue_bind(exchange, *meta) def queue_unbind(self, queue, exchange=None, routing_key='', arguments=None, **kwargs): # Remove queue binding: self.state.binding_delete(queue, exchange, routing_key) try: table = self.get_table(exchange) except KeyError: return binding_meta = self.typeof(exchange).prepare_bind( queue, exchange, routing_key, arguments, ) # TODO: the complexity of this operation is O(number of bindings). # Should be optimized. Modifying table in place. table[:] = [meta for meta in table if meta != binding_meta] def list_bindings(self): return ((queue, exchange, rkey) for exchange in self.state.exchanges for rkey, pattern, queue in self.get_table(exchange)) def queue_purge(self, queue, **kwargs): """Remove all ready messages from queue.""" return self._purge(queue) def _next_delivery_tag(self): return uuid() def basic_publish(self, message, exchange, routing_key, **kwargs): """Publish message.""" self._inplace_augment_message(message, exchange, routing_key) if exchange: return self.typeof(exchange).deliver( message, exchange, routing_key, **kwargs ) # anon exchange: routing_key is the destination queue return self._put(routing_key, message, **kwargs) def _inplace_augment_message(self, message, exchange, routing_key): message['body'], body_encoding = self.encode_body( message['body'], self.body_encoding, ) props = message['properties'] props.update( body_encoding=body_encoding, delivery_tag=self._next_delivery_tag(), ) props['delivery_info'].update( exchange=exchange, routing_key=routing_key, ) def basic_consume(self, queue, no_ack, callback, consumer_tag, **kwargs): """Consume from `queue`.""" self._tag_to_queue[consumer_tag] = queue self._active_queues.append(queue) def _callback(raw_message): message = self.Message(raw_message, channel=self) if not no_ack: self.qos.append(message, message.delivery_tag) return callback(message) self.connection._callbacks[queue] = _callback self._consumers.add(consumer_tag) self._reset_cycle() def basic_cancel(self, consumer_tag): """Cancel consumer by consumer tag.""" if consumer_tag in self._consumers: self._consumers.remove(consumer_tag) self._reset_cycle() queue = self._tag_to_queue.pop(consumer_tag, None) try: self._active_queues.remove(queue) except ValueError: pass self.connection._callbacks.pop(queue, None) def basic_get(self, queue, no_ack=False, **kwargs): """Get message by direct access (synchronous).""" try: message = self.Message(self._get(queue), channel=self) if not no_ack: self.qos.append(message, message.delivery_tag) return message except Empty: pass def basic_ack(self, delivery_tag, multiple=False): """Acknowledge message.""" self.qos.ack(delivery_tag) def basic_recover(self, requeue=False): """Recover unacked messages.""" if requeue: return self.qos.restore_unacked() raise NotImplementedError('Does not support recover(requeue=False)') def basic_reject(self, delivery_tag, requeue=False): """Reject message.""" self.qos.reject(delivery_tag, requeue=requeue) def basic_qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): """Change QoS settings for this channel. Note: ---- Only `prefetch_count` is supported. """ self.qos.prefetch_count = prefetch_count def get_exchanges(self): return list(self.state.exchanges) def get_table(self, exchange): """Get table of bindings for `exchange`.""" return self.state.exchanges[exchange]['table'] def typeof(self, exchange, default='direct'): """Get the exchange type instance for `exchange`.""" try: type = self.state.exchanges[exchange]['type'] except KeyError: type = default return self.exchange_types[type] def _lookup(self, exchange, routing_key, default=None): """Find all queues matching `routing_key` for the given `exchange`. Returns ------- list[str]: queue names -- must return `[default]` if default is set and no queues matched. """ if default is None: default = self.deadletter_queue if not exchange: # anon exchange return [routing_key or default] try: R = self.typeof(exchange).lookup( self.get_table(exchange), exchange, routing_key, default, ) except KeyError: R = [] if not R and default is not None: warnings.warn(UndeliverableWarning(UNDELIVERABLE_FMT.format( exchange=exchange, routing_key=routing_key)), ) self._new_queue(default) R = [default] return R def _restore(self, message): """Redeliver message to its original destination.""" delivery_info = message.delivery_info message = message.serializable() message['redelivered'] = True for queue in self._lookup( delivery_info['exchange'], delivery_info['routing_key']): self._put(queue, message) def _restore_at_beginning(self, message): return self._restore(message) def drain_events(self, timeout=None, callback=None): callback = callback or self.connection._deliver if self._consumers and self.qos.can_consume(): if hasattr(self, '_get_many'): return self._get_many(self._active_queues, timeout=timeout) return self._poll(self.cycle, callback, timeout=timeout) raise Empty() def message_to_python(self, raw_message): """Convert raw message to :class:`Message` instance.""" if not isinstance(raw_message, self.Message): return self.Message(payload=raw_message, channel=self) return raw_message def prepare_message(self, body, priority=None, content_type=None, content_encoding=None, headers=None, properties=None): """Prepare message data.""" properties = properties or {} properties.setdefault('delivery_info', {}) properties.setdefault('priority', priority or self.default_priority) return {'body': body, 'content-encoding': content_encoding, 'content-type': content_type, 'headers': headers or {}, 'properties': properties or {}} def flow(self, active=True): """Enable/disable message flow. Raises ------ NotImplementedError: as flow is not implemented by the base virtual implementation. """ raise NotImplementedError('virtual channels do not support flow.') def close(self): """Close channel. Cancel all consumers, and requeue unacked messages. """ if not self.closed: self.closed = True for consumer in list(self._consumers): self.basic_cancel(consumer) if self._qos: self._qos.restore_unacked_once() if self._cycle is not None: self._cycle.close() self._cycle = None if self.connection is not None: self.connection.close_channel(self) self.exchange_types = None def encode_body(self, body, encoding=None): if encoding and encoding.lower() != 'utf-8': return self.codecs.get(encoding).encode(body), encoding return body, encoding def decode_body(self, body, encoding=None): if encoding and encoding.lower() != 'utf-8': return self.codecs.get(encoding).decode(body) return body def _reset_cycle(self): self._cycle = FairCycle( self._get_and_deliver, self._active_queues, Empty) def __enter__(self): return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: self.close() @property def state(self): """Broker state containing exchanges and bindings.""" return self.connection.state @property def qos(self): """:class:`QoS` manager for this channel.""" if self._qos is None: self._qos = self.QoS(self) return self._qos @property def cycle(self): if self._cycle is None: self._reset_cycle() return self._cycle def _get_message_priority(self, message, reverse=False): """Get priority from message. The value is limited to within a boundary of 0 to 9. Note: ---- Higher value has more priority. """ try: priority = max( min(int(message['properties']['priority']), self.max_priority), self.min_priority, ) except (TypeError, ValueError, KeyError): priority = self.default_priority return (self.max_priority - priority) if reverse else priority def _get_free_channel_id(self): # Cast to a set for fast lookups, and keep stored as an array # for lower memory usage. used_channel_ids = set(self.connection._used_channel_ids) for channel_id in range(1, self.connection.channel_max + 1): if channel_id not in used_channel_ids: self.connection._used_channel_ids.append(channel_id) return channel_id raise ResourceError( 'No free channel ids, current={}, channel_max={}'.format( len(self.connection.channels), self.connection.channel_max), (20, 10), ) class Management(base.Management): """Base class for the AMQP management API.""" def __init__(self, transport): super().__init__(transport) self.channel = transport.client.channel() def get_bindings(self): return [{'destination': q, 'source': e, 'routing_key': r} for q, e, r in self.channel.list_bindings()] def close(self): self.channel.close() class Transport(base.Transport): """Virtual transport. Arguments: --------- client (kombu.Connection): The client this is a transport for. """ Channel = Channel Cycle = FairCycle Management = Management #: :class:`~kombu.utils.scheduling.FairCycle` instance #: used to fairly drain events from channels (set by constructor). cycle = None #: port number used when no port is specified. default_port = None #: active channels. channels = None #: queue/callback map. _callbacks = None #: Time to sleep between unsuccessful polls. polling_interval = 1.0 #: Max number of channels channel_max = 65535 implements = base.Transport.implements.extend( asynchronous=False, exchange_type=frozenset(['direct', 'topic']), heartbeats=False, ) def __init__(self, client, **kwargs): self.client = client # :class:`BrokerState` containing declared exchanges and bindings. self.state = BrokerState() self.channels = [] self._avail_channels = [] self._callbacks = {} self.cycle = self.Cycle(self._drain_channel, self.channels, Empty) polling_interval = client.transport_options.get('polling_interval') if polling_interval is not None: self.polling_interval = polling_interval self._used_channel_ids = array(ARRAY_TYPE_H) def create_channel(self, connection): try: return self._avail_channels.pop() except IndexError: channel = self.Channel(connection) self.channels.append(channel) return channel def close_channel(self, channel): try: try: self._used_channel_ids.remove(channel.channel_id) except ValueError: # channel id already removed pass try: self.channels.remove(channel) except ValueError: pass finally: channel.connection = None def establish_connection(self): # creates channel to verify connection. # this channel is then used as the next requested channel. # (returned by ``create_channel``). self._avail_channels.append(self.create_channel(self)) return self # for drain events def close_connection(self, connection): self.cycle.close() for chan_list in self._avail_channels, self.channels: while chan_list: try: channel = chan_list.pop() except LookupError: # pragma: no cover pass else: channel.close() def drain_events(self, connection, timeout=None): time_start = monotonic() get = self.cycle.get polling_interval = self.polling_interval if timeout and polling_interval and polling_interval > timeout: polling_interval = timeout while 1: try: get(self._deliver, timeout=timeout) except Empty: if timeout is not None and monotonic() - time_start >= timeout: raise socket.timeout() if polling_interval is not None: sleep(polling_interval) else: break def _deliver(self, message, queue): if not queue: raise KeyError( 'Received message without destination queue: {}'.format( message)) try: callback = self._callbacks[queue] except KeyError: logger.warning(W_NO_CONSUMERS, queue) self._reject_inbound_message(message) else: callback(message) def _reject_inbound_message(self, raw_message): for channel in self.channels: if channel: message = channel.Message(raw_message, channel=channel) channel.qos.append(message, message.delivery_tag) channel.basic_reject(message.delivery_tag, requeue=True) break def on_message_ready(self, channel, message, queue): if not queue or queue not in self._callbacks: raise KeyError( 'Message for queue {!r} without consumers: {}'.format( queue, message)) self._callbacks[queue](message) def _drain_channel(self, channel, callback, timeout=None): return channel.drain_events(callback=callback, timeout=timeout) @property def default_connection_params(self): return {'port': self.default_port, 'hostname': 'localhost'} kombu-5.5.3/kombu/transport/virtual/exchange.py000066400000000000000000000114361477772317200216620ustar00rootroot00000000000000"""Virtual AMQ Exchange. Implementations of the standard exchanges defined by the AMQ protocol (excluding the `headers` exchange). """ from __future__ import annotations import re from kombu.utils.text import escape_regex class ExchangeType: """Base class for exchanges. Implements the specifics for an exchange type. Arguments: --------- channel (ChannelT): AMQ Channel. """ type = None def __init__(self, channel): self.channel = channel def lookup(self, table, exchange, routing_key, default): """Lookup all queues matching `routing_key` in `exchange`. Returns ------- str: queue name, or 'default' if no queues matched. """ raise NotImplementedError('subclass responsibility') def prepare_bind(self, queue, exchange, routing_key, arguments): """Prepare queue-binding. Returns ------- Tuple[str, Pattern, str]: of `(routing_key, regex, queue)` to be stored for bindings to this exchange. """ return routing_key, None, queue def equivalent(self, prev, exchange, type, durable, auto_delete, arguments): """Return true if `prev` and `exchange` is equivalent.""" return (type == prev['type'] and durable == prev['durable'] and auto_delete == prev['auto_delete'] and (arguments or {}) == (prev['arguments'] or {})) class DirectExchange(ExchangeType): """Direct exchange. The `direct` exchange routes based on exact routing keys. """ type = 'direct' def lookup(self, table, exchange, routing_key, default): return { queue for rkey, _, queue in table if rkey == routing_key } def deliver(self, message, exchange, routing_key, **kwargs): _lookup = self.channel._lookup _put = self.channel._put for queue in _lookup(exchange, routing_key): _put(queue, message, **kwargs) class TopicExchange(ExchangeType): """Topic exchange. The `topic` exchange routes messages based on words separated by dots, using wildcard characters ``*`` (any single word), and ``#`` (one or more words). """ type = 'topic' #: map of wildcard to regex conversions wildcards = {'*': r'.*?[^\.]', '#': r'.*?'} #: compiled regex cache _compiled = {} def lookup(self, table, exchange, routing_key, default): return { queue for rkey, pattern, queue in table if self._match(pattern, routing_key) } def deliver(self, message, exchange, routing_key, **kwargs): _lookup = self.channel._lookup _put = self.channel._put deadletter = self.channel.deadletter_queue for queue in [q for q in _lookup(exchange, routing_key) if q and q != deadletter]: _put(queue, message, **kwargs) def prepare_bind(self, queue, exchange, routing_key, arguments): return routing_key, self.key_to_pattern(routing_key), queue def key_to_pattern(self, rkey): """Get the corresponding regex for any routing key.""" return '^%s$' % (r'\.'.join( self.wildcards.get(word, word) for word in escape_regex(rkey, '.#*').split('.') )) def _match(self, pattern, string): """Match regular expression (cached). Same as :func:`re.match`, except the regex is compiled and cached, then reused on subsequent matches with the same pattern. """ try: compiled = self._compiled[pattern] except KeyError: compiled = self._compiled[pattern] = re.compile(pattern, re.U) return compiled.match(string) class FanoutExchange(ExchangeType): """Fanout exchange. The `fanout` exchange implements broadcast messaging by delivering copies of all messages to all queues bound to the exchange. To support fanout the virtual channel needs to store the table as shared state. This requires that the `Channel.supports_fanout` attribute is set to true, and the `Channel._queue_bind` and `Channel.get_table` methods are implemented. See Also -------- the redis backend for an example implementation of these methods. """ type = 'fanout' def lookup(self, table, exchange, routing_key, default): return {queue for _, _, queue in table} def deliver(self, message, exchange, routing_key, **kwargs): if self.channel.supports_fanout: self.channel._put_fanout( exchange, message, routing_key, **kwargs) #: Map of standard exchange types and corresponding classes. STANDARD_EXCHANGE_TYPES = { 'direct': DirectExchange, 'topic': TopicExchange, 'fanout': FanoutExchange, } kombu-5.5.3/kombu/transport/zookeeper.py000066400000000000000000000143031477772317200204110ustar00rootroot00000000000000# copyright: (c) 2010 - 2013 by Mahendra M. # license: BSD, see LICENSE for more details. """Zookeeper transport module for kombu. Zookeeper based transport. This transport uses the built-in kazoo Zookeeper based queue implementation. **References** - https://zookeeper.apache.org/doc/current/recipes.html#sc_recipes_Queues - https://kazoo.readthedocs.io/en/latest/api/recipe/queue.html **Limitations** This queue does not offer reliable consumption. An entry is removed from the queue prior to being processed. So if an error occurs, the consumer has to re-queue the item or it will be lost. Features ======== * Type: Virtual * Supports Direct: Yes * Supports Topic: Yes * Supports Fanout: No * Supports Priority: Yes * Supports TTL: No Connection String ================= Connects to a zookeeper node as: .. code-block:: zookeeper://SERVER:PORT/VHOST The becomes the base for all the other znodes. So we can use it like a vhost. Transport Options ================= """ from __future__ import annotations import os import socket from queue import Empty from kombu.utils.encoding import bytes_to_str, ensure_bytes from kombu.utils.json import dumps, loads from . import virtual try: import kazoo from kazoo.client import KazooClient from kazoo.recipe.queue import Queue KZ_CONNECTION_ERRORS = ( kazoo.exceptions.SystemErrorException, kazoo.exceptions.ConnectionLossException, kazoo.exceptions.MarshallingErrorException, kazoo.exceptions.UnimplementedException, kazoo.exceptions.OperationTimeoutException, kazoo.exceptions.NoAuthException, kazoo.exceptions.InvalidACLException, kazoo.exceptions.AuthFailedException, kazoo.exceptions.SessionExpiredException, ) KZ_CHANNEL_ERRORS = ( kazoo.exceptions.RuntimeInconsistencyException, kazoo.exceptions.DataInconsistencyException, kazoo.exceptions.BadArgumentsException, kazoo.exceptions.MarshallingErrorException, kazoo.exceptions.UnimplementedException, kazoo.exceptions.OperationTimeoutException, kazoo.exceptions.ApiErrorException, kazoo.exceptions.NoNodeException, kazoo.exceptions.NoAuthException, kazoo.exceptions.NodeExistsException, kazoo.exceptions.NoChildrenForEphemeralsException, kazoo.exceptions.NotEmptyException, kazoo.exceptions.SessionExpiredException, kazoo.exceptions.InvalidCallbackException, socket.error, ) except ImportError: kazoo = None KZ_CONNECTION_ERRORS = KZ_CHANNEL_ERRORS = () DEFAULT_PORT = 2181 __author__ = 'Mahendra M ' class Channel(virtual.Channel): """Zookeeper Channel.""" _client = None _queues = {} def __init__(self, connection, **kwargs): super().__init__(connection, **kwargs) vhost = self.connection.client.virtual_host self._vhost = '/{}'.format(vhost.strip('/')) def _get_path(self, queue_name): return os.path.join(self._vhost, queue_name) def _get_queue(self, queue_name): queue = self._queues.get(queue_name, None) if queue is None: queue = Queue(self.client, self._get_path(queue_name)) self._queues[queue_name] = queue # Ensure that the queue is created len(queue) return queue def _put(self, queue, message, **kwargs): return self._get_queue(queue).put( ensure_bytes(dumps(message)), priority=self._get_message_priority(message, reverse=True), ) def _get(self, queue): queue = self._get_queue(queue) msg = queue.get() if msg is None: raise Empty() return loads(bytes_to_str(msg)) def _purge(self, queue): count = 0 queue = self._get_queue(queue) while True: msg = queue.get() if msg is None: break count += 1 return count def _delete(self, queue, *args, **kwargs): if self._has_queue(queue): self._purge(queue) self.client.delete(self._get_path(queue)) def _size(self, queue): queue = self._get_queue(queue) return len(queue) def _new_queue(self, queue, **kwargs): if not self._has_queue(queue): queue = self._get_queue(queue) def _has_queue(self, queue): return self.client.exists(self._get_path(queue)) is not None def _open(self): conninfo = self.connection.client hosts = [] if conninfo.alt: for host_port in conninfo.alt: if host_port.startswith('zookeeper://'): host_port = host_port[len('zookeeper://'):] if not host_port: continue try: host, port = host_port.split(':', 1) host_port = (host, int(port)) except ValueError: if host_port == conninfo.hostname: host_port = (host_port, conninfo.port or DEFAULT_PORT) else: host_port = (host_port, DEFAULT_PORT) hosts.append(host_port) host_port = (conninfo.hostname, conninfo.port or DEFAULT_PORT) if host_port not in hosts: hosts.insert(0, host_port) conn_str = ','.join([f'{h}:{p}' for h, p in hosts]) conn = KazooClient(conn_str) conn.start() return conn @property def client(self): if self._client is None: self._client = self._open() return self._client class Transport(virtual.Transport): """Zookeeper Transport.""" Channel = Channel polling_interval = 1 default_port = DEFAULT_PORT connection_errors = ( virtual.Transport.connection_errors + KZ_CONNECTION_ERRORS ) channel_errors = ( virtual.Transport.channel_errors + KZ_CHANNEL_ERRORS ) driver_type = 'zookeeper' driver_name = 'kazoo' def __init__(self, *args, **kwargs): if kazoo is None: raise ImportError('The kazoo library is not installed') super().__init__(*args, **kwargs) def driver_version(self): return kazoo.__version__ kombu-5.5.3/kombu/utils/000077500000000000000000000000001477772317200151375ustar00rootroot00000000000000kombu-5.5.3/kombu/utils/__init__.py000066400000000000000000000012721477772317200172520ustar00rootroot00000000000000"""DEPRECATED - Import from modules below.""" from __future__ import annotations from .collections import EqualityDict from .compat import fileno, maybe_fileno, nested, register_after_fork from .div import emergency_dump_state from .functional import (fxrange, fxrangemax, maybe_list, reprcall, retry_over_time) from .imports import symbol_by_name from .objects import cached_property from .uuid import uuid __all__ = ( 'EqualityDict', 'uuid', 'maybe_list', 'fxrange', 'fxrangemax', 'retry_over_time', 'emergency_dump_state', 'cached_property', 'register_after_fork', 'reprkwargs', 'reprcall', 'symbol_by_name', 'nested', 'fileno', 'maybe_fileno', ) kombu-5.5.3/kombu/utils/amq_manager.py000066400000000000000000000013141477772317200177600ustar00rootroot00000000000000"""AMQP Management API utilities.""" from __future__ import annotations def get_manager(client, hostname=None, port=None, userid=None, password=None): """Get pyrabbit manager.""" import pyrabbit opt = client.transport_options.get def get(name, val, default): return (val if val is not None else opt('manager_%s' % name) or getattr(client, name, None) or default) host = get('hostname', hostname, 'localhost') port = port if port is not None else opt('manager_port', 15672) userid = get('userid', userid, 'guest') password = get('password', password, 'guest') return pyrabbit.Client(f'{host}:{port}', userid, password) kombu-5.5.3/kombu/utils/collections.py000066400000000000000000000016561477772317200200370ustar00rootroot00000000000000"""Custom maps, sequences, etc.""" from __future__ import annotations class HashedSeq(list): """Hashed Sequence. Type used for hash() to make sure the hash is not generated multiple times. """ __slots__ = 'hashvalue' def __init__(self, *seq): self[:] = seq self.hashvalue = hash(seq) def __hash__(self): return self.hashvalue def eqhash(o): """Call ``obj.__eqhash__``.""" try: return o.__eqhash__() except AttributeError: return hash(o) class EqualityDict(dict): """Dict using the eq operator for keying.""" def __getitem__(self, key): h = eqhash(key) if h not in self: return self.__missing__(key) return super().__getitem__(h) def __setitem__(self, key, value): return super().__setitem__(eqhash(key), value) def __delitem__(self, key): return super().__delitem__(eqhash(key)) kombu-5.5.3/kombu/utils/compat.py000066400000000000000000000065641477772317200170070ustar00rootroot00000000000000"""Python Compatibility Utilities.""" from __future__ import annotations import numbers import sys from contextlib import contextmanager from functools import wraps from importlib import metadata as importlib_metadata from io import UnsupportedOperation from kombu.exceptions import reraise FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation) try: from billiard.util import register_after_fork except ImportError: # pragma: no cover try: from multiprocessing.util import register_after_fork except ImportError: register_after_fork = None _environment = None def coro(gen): """Decorator to mark generator as co-routine.""" @wraps(gen) def wind_up(*args, **kwargs): it = gen(*args, **kwargs) next(it) return it return wind_up def _detect_environment(): # ## -eventlet- if 'eventlet' in sys.modules: try: import socket from eventlet.patcher import is_monkey_patched as is_eventlet if is_eventlet(socket): return 'eventlet' except ImportError: pass # ## -gevent- if 'gevent' in sys.modules: try: import socket from gevent import socket as _gsocket if socket.socket is _gsocket.socket: return 'gevent' except ImportError: pass return 'default' def detect_environment(): """Detect the current environment: default, eventlet, or gevent.""" global _environment if _environment is None: _environment = _detect_environment() return _environment def entrypoints(namespace): """Return setuptools entrypoints for namespace.""" if sys.version_info >= (3,10): entry_points = importlib_metadata.entry_points(group=namespace) else: entry_points = importlib_metadata.entry_points() try: entry_points = entry_points.get(namespace, []) except AttributeError: entry_points = entry_points.select(group=namespace) return ( (ep, ep.load()) for ep in entry_points ) def fileno(f): """Get fileno from file-like object.""" if isinstance(f, numbers.Integral): return f return f.fileno() def maybe_fileno(f): """Get object fileno, or :const:`None` if not defined.""" try: return fileno(f) except FILENO_ERRORS: pass @contextmanager def nested(*managers): # pragma: no cover """Nest context managers.""" # flake8: noqa exits = [] vars = [] exc = (None, None, None) try: try: for mgr in managers: exit = mgr.__exit__ enter = mgr.__enter__ vars.append(enter()) exits.append(exit) yield vars except: exc = sys.exc_info() finally: while exits: exit = exits.pop() try: if exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() if exc != (None, None, None): # Don't rely on sys.exc_info() still containing # the right information. Another exception may # have been raised and caught by an exit method reraise(exc[0], exc[1], exc[2]) finally: del(exc) kombu-5.5.3/kombu/utils/debug.py000066400000000000000000000040011477772317200165720ustar00rootroot00000000000000"""Debugging support.""" from __future__ import annotations import logging from typing import TYPE_CHECKING from vine.utils import wraps from kombu.log import get_logger if TYPE_CHECKING: from logging import Logger from typing import Any, Callable from kombu.transport.base import Transport __all__ = ('setup_logging', 'Logwrapped') def setup_logging( loglevel: int | None = logging.DEBUG, loggers: list[str] | None = None ) -> None: """Setup logging to stdout.""" loggers = ['kombu.connection', 'kombu.channel'] if not loggers else loggers for logger_name in loggers: logger = get_logger(logger_name) logger.addHandler(logging.StreamHandler()) logger.setLevel(loglevel) class Logwrapped: """Wrap all object methods, to log on call.""" __ignore = ('__enter__', '__exit__') def __init__( self, instance: Transport, logger: Logger | None = None, ident: str | None = None ): self.instance = instance self.logger = get_logger(logger) self.ident = ident def __getattr__(self, key: str) -> Callable: meth = getattr(self.instance, key) if not callable(meth) or key in self.__ignore: return meth @wraps(meth) def __wrapped(*args: list[Any], **kwargs: dict[str, Any]) -> Callable: info = '' if self.ident: info += self.ident.format(self.instance) info += f'{meth.__name__}(' if args: info += ', '.join(map(repr, args)) if kwargs: if args: info += ', ' info += ', '.join(f'{key}={value!r}' for key, value in kwargs.items()) info += ')' self.logger.debug(info) return meth(*args, **kwargs) return __wrapped def __repr__(self) -> str: return repr(self.instance) def __dir__(self) -> list[str]: return dir(self.instance) kombu-5.5.3/kombu/utils/div.py000066400000000000000000000016551477772317200163020ustar00rootroot00000000000000"""Div. Utilities.""" from __future__ import annotations import os import sys from .encoding import default_encode def emergency_dump_state(state, open_file=open, dump=None, stderr=None): """Dump message state to stdout or file.""" from pprint import pformat from tempfile import mkstemp stderr = sys.stderr if stderr is None else stderr if dump is None: import pickle dump = pickle.dump fd, persist = mkstemp() os.close(fd) print(f'EMERGENCY DUMP STATE TO FILE -> {persist} <-', file=stderr) fh = open_file(persist, 'w') try: try: dump(state, fh, protocol=0) except Exception as exc: print( f'Cannot pickle state: {exc!r}. Fallback to pformat.', file=stderr, ) fh.write(default_encode(pformat(state))) finally: fh.flush() fh.close() return persist kombu-5.5.3/kombu/utils/encoding.py000066400000000000000000000043711477772317200173040ustar00rootroot00000000000000"""Text encoding utilities. Utilities to encode text, and to safely emit text from running applications without crashing from the infamous :exc:`UnicodeDecodeError` exception. """ from __future__ import annotations import sys import traceback #: safe_str takes encoding from this file by default. #: :func:`set_default_encoding_file` can used to set the #: default output file. default_encoding_file = None def set_default_encoding_file(file): """Set file used to get codec information.""" global default_encoding_file default_encoding_file = file def get_default_encoding_file(): """Get file used to get codec information.""" return default_encoding_file if sys.platform.startswith('java'): # pragma: no cover def default_encoding(file=None): """Get default encoding.""" return 'utf-8' else: def default_encoding(file=None): """Get default encoding.""" file = file or get_default_encoding_file() return getattr(file, 'encoding', None) or sys.getfilesystemencoding() def str_to_bytes(s): """Convert str to bytes.""" if isinstance(s, str): return s.encode() return s def bytes_to_str(s): """Convert bytes to str.""" if isinstance(s, bytes): return s.decode(errors='replace') return s def from_utf8(s, *args, **kwargs): """Get str from utf-8 encoding.""" return s def ensure_bytes(s): """Ensure s is bytes, not str.""" if not isinstance(s, bytes): return str_to_bytes(s) return s def default_encode(obj): """Encode using default encoding.""" return obj def safe_str(s, errors='replace'): """Safe form of str(), void of unicode errors.""" s = bytes_to_str(s) if not isinstance(s, (str, bytes)): return safe_repr(s, errors) return _safe_str(s, errors) def _safe_str(s, errors='replace', file=None): if isinstance(s, str): return s try: return str(s) except Exception as exc: return ''.format( type(s), exc, '\n'.join(traceback.format_stack())) def safe_repr(o, errors='replace'): """Safe form of repr, void of Unicode errors.""" try: return repr(o) except Exception: return _safe_str(o, errors) kombu-5.5.3/kombu/utils/eventio.py000066400000000000000000000236571477772317200171770ustar00rootroot00000000000000"""Selector Utilities.""" from __future__ import annotations import errno import math import select as __select__ import sys from numbers import Integral from . import fileno from .compat import detect_environment __all__ = ('poll',) _selectf = __select__.select _selecterr = __select__.error xpoll = getattr(__select__, 'poll', None) epoll = getattr(__select__, 'epoll', None) kqueue = getattr(__select__, 'kqueue', None) kevent = getattr(__select__, 'kevent', None) KQ_EV_ADD = getattr(__select__, 'KQ_EV_ADD', 1) KQ_EV_DELETE = getattr(__select__, 'KQ_EV_DELETE', 2) KQ_EV_ENABLE = getattr(__select__, 'KQ_EV_ENABLE', 4) KQ_EV_CLEAR = getattr(__select__, 'KQ_EV_CLEAR', 32) KQ_EV_ERROR = getattr(__select__, 'KQ_EV_ERROR', 16384) KQ_EV_EOF = getattr(__select__, 'KQ_EV_EOF', 32768) KQ_FILTER_READ = getattr(__select__, 'KQ_FILTER_READ', -1) KQ_FILTER_WRITE = getattr(__select__, 'KQ_FILTER_WRITE', -2) KQ_FILTER_AIO = getattr(__select__, 'KQ_FILTER_AIO', -3) KQ_FILTER_VNODE = getattr(__select__, 'KQ_FILTER_VNODE', -4) KQ_FILTER_PROC = getattr(__select__, 'KQ_FILTER_PROC', -5) KQ_FILTER_SIGNAL = getattr(__select__, 'KQ_FILTER_SIGNAL', -6) KQ_FILTER_TIMER = getattr(__select__, 'KQ_FILTER_TIMER', -7) KQ_NOTE_LOWAT = getattr(__select__, 'KQ_NOTE_LOWAT', 1) KQ_NOTE_DELETE = getattr(__select__, 'KQ_NOTE_DELETE', 1) KQ_NOTE_WRITE = getattr(__select__, 'KQ_NOTE_WRITE', 2) KQ_NOTE_EXTEND = getattr(__select__, 'KQ_NOTE_EXTEND', 4) KQ_NOTE_ATTRIB = getattr(__select__, 'KQ_NOTE_ATTRIB', 8) KQ_NOTE_LINK = getattr(__select__, 'KQ_NOTE_LINK', 16) KQ_NOTE_RENAME = getattr(__select__, 'KQ_NOTE_RENAME', 32) KQ_NOTE_REVOKE = getattr(__select__, 'KQ_NOTE_REVOKE', 64) POLLIN = getattr(__select__, 'POLLIN', 1) POLLOUT = getattr(__select__, 'POLLOUT', 4) POLLERR = getattr(__select__, 'POLLERR', 8) POLLHUP = getattr(__select__, 'POLLHUP', 16) POLLNVAL = getattr(__select__, 'POLLNVAL', 32) READ = POLL_READ = 0x001 WRITE = POLL_WRITE = 0x004 ERR = POLL_ERR = 0x008 | 0x010 try: SELECT_BAD_FD = {errno.EBADF, errno.WSAENOTSOCK} except AttributeError: SELECT_BAD_FD = {errno.EBADF} class _epoll: def __init__(self): self._epoll = epoll() def register(self, fd, events): try: self._epoll.register(fd, events) except Exception as exc: if getattr(exc, 'errno', None) != errno.EEXIST: raise return fd def unregister(self, fd): try: self._epoll.unregister(fd) except (OSError, ValueError, KeyError, TypeError): pass except OSError as exc: if getattr(exc, 'errno', None) not in (errno.ENOENT, errno.EPERM): raise def poll(self, timeout): try: return self._epoll.poll(timeout if timeout is not None else -1) except Exception as exc: if getattr(exc, 'errno', None) != errno.EINTR: raise def close(self): self._epoll.close() class _kqueue: w_fflags = (KQ_NOTE_WRITE | KQ_NOTE_EXTEND | KQ_NOTE_ATTRIB | KQ_NOTE_DELETE) def __init__(self): self._kqueue = kqueue() self._active = {} self.on_file_change = None self._kcontrol = self._kqueue.control def register(self, fd, events): self._control(fd, events, KQ_EV_ADD) self._active[fd] = events return fd def unregister(self, fd): events = self._active.pop(fd, None) if events: try: self._control(fd, events, KQ_EV_DELETE) except OSError: pass def watch_file(self, fd): ev = kevent(fd, filter=KQ_FILTER_VNODE, flags=KQ_EV_ADD | KQ_EV_ENABLE | KQ_EV_CLEAR, fflags=self.w_fflags) self._kcontrol([ev], 0) def unwatch_file(self, fd): ev = kevent(fd, filter=KQ_FILTER_VNODE, flags=KQ_EV_DELETE, fflags=self.w_fflags) self._kcontrol([ev], 0) def _control(self, fd, events, flags): if not events: return kevents = [] if events & WRITE: kevents.append(kevent(fd, filter=KQ_FILTER_WRITE, flags=flags)) if not kevents or events & READ: kevents.append( kevent(fd, filter=KQ_FILTER_READ, flags=flags), ) control = self._kcontrol for e in kevents: try: control([e], 0) except ValueError: pass def poll(self, timeout): try: kevents = self._kcontrol(None, 1000, timeout) except Exception as exc: if getattr(exc, 'errno', None) == errno.EINTR: return raise events, file_changes = {}, [] for k in kevents: fd = k.ident if k.filter == KQ_FILTER_READ: events[fd] = events.get(fd, 0) | READ elif k.filter == KQ_FILTER_WRITE: if k.flags & KQ_EV_EOF: events[fd] = ERR else: events[fd] = events.get(fd, 0) | WRITE elif k.filter == KQ_EV_ERROR: events[fd] = events.get(fd, 0) | ERR elif k.filter == KQ_FILTER_VNODE: if k.fflags & KQ_NOTE_DELETE: self.unregister(fd) file_changes.append(k) if file_changes: self.on_file_change(file_changes) return list(events.items()) def close(self): self._kqueue.close() class _poll: def __init__(self): self._poller = xpoll() self._quick_poll = self._poller.poll self._quick_register = self._poller.register self._quick_unregister = self._poller.unregister def register(self, fd, events): fd = fileno(fd) poll_flags = 0 if events & ERR: poll_flags |= POLLERR if events & WRITE: poll_flags |= POLLOUT if events & READ: poll_flags |= POLLIN self._quick_register(fd, poll_flags) return fd def unregister(self, fd): try: fd = fileno(fd) except OSError as exc: # we don't know the previous fd of this object # but it will be removed by the next poll iteration. if getattr(exc, 'errno', None) in SELECT_BAD_FD: return fd raise self._quick_unregister(fd) return fd def poll(self, timeout, round=math.ceil, POLLIN=POLLIN, POLLOUT=POLLOUT, POLLERR=POLLERR, READ=READ, WRITE=WRITE, ERR=ERR, Integral=Integral): timeout = 0 if timeout and timeout < 0 else round((timeout or 0) * 1e3) try: event_list = self._quick_poll(timeout) except (_selecterr, OSError) as exc: if getattr(exc, 'errno', None) == errno.EINTR: return raise ready = [] for fd, event in event_list: events = 0 if event & POLLIN: events |= READ if event & POLLOUT: events |= WRITE if event & POLLERR or event & POLLNVAL or event & POLLHUP: events |= ERR assert events if not isinstance(fd, Integral): fd = fd.fileno() ready.append((fd, events)) return ready def close(self): self._poller = None class _select: def __init__(self): self._all = (self._rfd, self._wfd, self._efd) = set(), set(), set() def register(self, fd, events): fd = fileno(fd) if events & ERR: self._efd.add(fd) if events & WRITE: self._wfd.add(fd) if events & READ: self._rfd.add(fd) return fd def _remove_bad(self): for fd in self._rfd | self._wfd | self._efd: try: _selectf([fd], [], [], 0) except (_selecterr, OSError) as exc: if getattr(exc, 'errno', None) in SELECT_BAD_FD: self.unregister(fd) def unregister(self, fd): try: fd = fileno(fd) except OSError as exc: # we don't know the previous fd of this object # but it will be removed by the next poll iteration. if getattr(exc, 'errno', None) in SELECT_BAD_FD: return raise self._rfd.discard(fd) self._wfd.discard(fd) self._efd.discard(fd) def poll(self, timeout): try: read, write, error = _selectf( self._rfd, self._wfd, self._efd, timeout, ) except (_selecterr, OSError) as exc: if getattr(exc, 'errno', None) == errno.EINTR: return elif getattr(exc, 'errno', None) in SELECT_BAD_FD: return self._remove_bad() raise events = {} for fd in read: if not isinstance(fd, Integral): fd = fd.fileno() events[fd] = events.get(fd, 0) | READ for fd in write: if not isinstance(fd, Integral): fd = fd.fileno() events[fd] = events.get(fd, 0) | WRITE for fd in error: if not isinstance(fd, Integral): fd = fd.fileno() events[fd] = events.get(fd, 0) | ERR return list(events.items()) def close(self): self._rfd.clear() self._wfd.clear() self._efd.clear() def _get_poller(): if detect_environment() != 'default': # greenlet return _select elif epoll: # Py2.6+ Linux return _epoll elif kqueue and 'netbsd' in sys.platform: return _kqueue elif xpoll: return _poll else: return _select def poll(*args, **kwargs): """Create new poller instance.""" return _get_poller()(*args, **kwargs) kombu-5.5.3/kombu/utils/functional.py000066400000000000000000000247131477772317200176620ustar00rootroot00000000000000"""Functional Utilities.""" from __future__ import annotations import inspect import random import threading from collections import OrderedDict, UserDict from collections.abc import Iterable, Mapping from itertools import count, repeat from time import sleep, time from vine.utils import wraps from .encoding import safe_repr as _safe_repr __all__ = ( 'LRUCache', 'memoize', 'lazy', 'maybe_evaluate', 'is_list', 'maybe_list', 'dictfilter', 'retry_over_time', ) KEYWORD_MARK = object() class ChannelPromise: def __init__(self, contract): self.__contract__ = contract def __call__(self): try: return self.__value__ except AttributeError: value = self.__value__ = self.__contract__() return value def __repr__(self): try: return repr(self.__value__) except AttributeError: return f'' class LRUCache(UserDict): """LRU Cache implementation using a doubly linked list to track access. Arguments: --------- limit (int): The maximum number of keys to keep in the cache. When a new key is inserted and the limit has been exceeded, the *Least Recently Used* key will be discarded from the cache. """ def __init__(self, limit=None): self.limit = limit self.mutex = threading.RLock() self.data = OrderedDict() def __getitem__(self, key): with self.mutex: value = self[key] = self.data.pop(key) return value def update(self, *args, **kwargs): with self.mutex: data, limit = self.data, self.limit data.update(*args, **kwargs) if limit and len(data) > limit: # pop additional items in case limit exceeded for _ in range(len(data) - limit): data.popitem(last=False) def popitem(self, last=True): with self.mutex: return self.data.popitem(last) def __setitem__(self, key, value): # remove least recently used key. with self.mutex: if self.limit and len(self.data) >= self.limit: self.data.pop(next(iter(self.data))) self.data[key] = value def __iter__(self): return iter(self.data) def _iterate_items(self): with self.mutex: for k in self: try: yield (k, self.data[k]) except KeyError: # pragma: no cover pass iteritems = _iterate_items def _iterate_values(self): with self.mutex: for k in self: try: yield self.data[k] except KeyError: # pragma: no cover pass itervalues = _iterate_values def _iterate_keys(self): # userdict.keys in py3k calls __getitem__ with self.mutex: return self.data.keys() iterkeys = _iterate_keys def incr(self, key, delta=1): with self.mutex: # this acts as memcached does- store as a string, but return a # integer as long as it exists and we can cast it newval = int(self.data.pop(key)) + delta self[key] = str(newval) return newval def __getstate__(self): d = dict(vars(self)) d.pop('mutex') return d def __setstate__(self, state): self.__dict__ = state self.mutex = threading.RLock() keys = _iterate_keys values = _iterate_values items = _iterate_items def memoize(maxsize=None, keyfun=None, Cache=LRUCache): """Decorator to cache function return value.""" def _memoize(fun): mutex = threading.Lock() cache = Cache(limit=maxsize) @wraps(fun) def _M(*args, **kwargs): if keyfun: key = keyfun(args, kwargs) else: key = args + (KEYWORD_MARK,) + tuple(sorted(kwargs.items())) try: with mutex: value = cache[key] except KeyError: value = fun(*args, **kwargs) _M.misses += 1 with mutex: cache[key] = value else: _M.hits += 1 return value def clear(): """Clear the cache and reset cache statistics.""" cache.clear() _M.hits = _M.misses = 0 _M.hits = _M.misses = 0 _M.clear = clear _M.original_func = fun return _M return _memoize class lazy: """Holds lazy evaluation. Evaluated when called or if the :meth:`evaluate` method is called. The function is re-evaluated on every call. Overloaded operations that will evaluate the promise: :meth:`__str__`, :meth:`__repr__`, :meth:`__cmp__`. """ def __init__(self, fun, *args, **kwargs): self._fun = fun self._args = args self._kwargs = kwargs def __call__(self): return self.evaluate() def evaluate(self): return self._fun(*self._args, **self._kwargs) def __str__(self): return str(self()) def __repr__(self): return repr(self()) def __eq__(self, rhs): return self() == rhs def __ne__(self, rhs): return self() != rhs def __deepcopy__(self, memo): memo[id(self)] = self return self def __reduce__(self): return (self.__class__, (self._fun,), {'_args': self._args, '_kwargs': self._kwargs}) def maybe_evaluate(value): """Evaluate value only if value is a :class:`lazy` instance.""" if isinstance(value, lazy): return value.evaluate() return value def is_list(obj, scalars=(Mapping, str), iters=(Iterable,)): """Return true if the object is iterable. Note: ---- Returns false if object is a mapping or string. """ return isinstance(obj, iters) and not isinstance(obj, scalars or ()) def maybe_list(obj, scalars=(Mapping, str)): """Return list of one element if ``l`` is a scalar.""" return obj if obj is None or is_list(obj, scalars) else [obj] def dictfilter(d=None, **kw): """Remove all keys from dict ``d`` whose value is :const:`None`.""" d = kw if d is None else (dict(d, **kw) if kw else d) return {k: v for k, v in d.items() if v is not None} def shufflecycle(it): it = list(it) # don't modify callers list shuffle = random.shuffle for _ in repeat(None): shuffle(it) yield it[0] def fxrange(start=1.0, stop=None, step=1.0, repeatlast=False): cur = start * 1.0 while 1: if not stop or cur <= stop: yield cur cur += step else: if not repeatlast: break yield cur - step def fxrangemax(start=1.0, stop=None, step=1.0, max=100.0): sum_, cur = 0, start * 1.0 while 1: if sum_ >= max: break yield cur if stop: cur = min(cur + step, stop) else: cur += step sum_ += cur def retry_over_time(fun, catch, args=None, kwargs=None, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30, callback=None, timeout=None): """Retry the function over and over until max retries is exceeded. For each retry we sleep a for a while before we try again, this interval is increased for every retry until the max seconds is reached. Arguments: --------- fun (Callable): The function to try catch (Tuple[BaseException]): Exceptions to catch, can be either tuple or a single exception class. Keyword Arguments: ----------------- args (Tuple): Positional arguments passed on to the function. kwargs (Dict): Keyword arguments passed on to the function. errback (Callable): Callback for when an exception in ``catch`` is raised. The callback must take three arguments: ``exc``, ``interval_range`` and ``retries``, where ``exc`` is the exception instance, ``interval_range`` is an iterator which return the time in seconds to sleep next, and ``retries`` is the number of previous retries. max_retries (int): Maximum number of retries before we give up. If neither of this and timeout is set, we will retry forever. If one of this and timeout is reached, stop. interval_start (float): How long (in seconds) we start sleeping between retries. interval_step (float): By how much the interval is increased for each retry. interval_max (float): Maximum number of seconds to sleep between retries. timeout (int): Maximum seconds waiting before we give up. """ kwargs = {} if not kwargs else kwargs args = [] if not args else args interval_range = fxrange(interval_start, interval_max + interval_start, interval_step, repeatlast=True) end = time() + timeout if timeout else None for retries in count(): try: return fun(*args, **kwargs) except catch as exc: if max_retries is not None and retries >= max_retries: raise if end and time() > end: raise if callback: callback() tts = float(errback(exc, interval_range, retries) if errback else next(interval_range)) if tts: for _ in range(int(tts)): if callback: callback() sleep(1.0) # sleep remainder after int truncation above. sleep(abs(int(tts) - tts)) def reprkwargs(kwargs, sep=', ', fmt='{0}={1}'): return sep.join(fmt.format(k, _safe_repr(v)) for k, v in kwargs.items()) def reprcall(name, args=(), kwargs=None, sep=', '): kwargs = {} if not kwargs else kwargs return '{}({}{}{})'.format( name, sep.join(map(_safe_repr, args or ())), (args and kwargs) and sep or '', reprkwargs(kwargs, sep), ) def accepts_argument(func, argument_name): argument_spec = inspect.getfullargspec(func) return ( argument_name in argument_spec.args or argument_name in argument_spec.kwonlyargs ) # Compat names (before kombu 3.0) promise = lazy maybe_promise = maybe_evaluate kombu-5.5.3/kombu/utils/imports.py000066400000000000000000000040511477772317200172060ustar00rootroot00000000000000"""Import related utilities.""" from __future__ import annotations import importlib import sys from kombu.exceptions import reraise def symbol_by_name(name, aliases=None, imp=None, package=None, sep='.', default=None, **kwargs): """Get symbol by qualified name. The name should be the full dot-separated path to the class:: modulename.ClassName Example:: celery.concurrency.processes.TaskPool ^- class name or using ':' to separate module and symbol:: celery.concurrency.processes:TaskPool If `aliases` is provided, a dict containing short name/long name mappings, the name is looked up in the aliases first. Examples -------- >>> symbol_by_name('celery.concurrency.processes.TaskPool') >>> symbol_by_name('default', { ... 'default': 'celery.concurrency.processes.TaskPool'}) # Does not try to look up non-string names. >>> from celery.concurrency.processes import TaskPool >>> symbol_by_name(TaskPool) is TaskPool True """ aliases = {} if not aliases else aliases if imp is None: imp = importlib.import_module if not isinstance(name, str): return name # already a class name = aliases.get(name) or name sep = ':' if ':' in name else sep module_name, _, cls_name = name.rpartition(sep) if not module_name: cls_name, module_name = None, package if package else cls_name try: try: module = imp(module_name, package=package, **kwargs) except ValueError as exc: reraise(ValueError, ValueError(f"Couldn't import {name!r}: {exc}"), sys.exc_info()[2]) return getattr(module, cls_name) if cls_name else module except (ImportError, AttributeError): if default is None: raise return default kombu-5.5.3/kombu/utils/json.py000066400000000000000000000077701477772317200164750ustar00rootroot00000000000000"""JSON Serialization Utilities.""" from __future__ import annotations import base64 import json import uuid from datetime import date, datetime, time from decimal import Decimal from typing import Any, Callable, TypeVar textual_types = () try: from django.utils.functional import Promise textual_types += (Promise,) except ImportError: pass class JSONEncoder(json.JSONEncoder): """Kombu custom json encoder.""" def default(self, o): reducer = getattr(o, "__json__", None) if reducer is not None: return reducer() if isinstance(o, textual_types): return str(o) for t, (marker, encoder) in _encoders.items(): if isinstance(o, t): return ( encoder(o) if marker is None else _as(marker, encoder(o)) ) # Bytes is slightly trickier, so we cannot put them directly # into _encoders, because we use two formats: bytes, and base64. if isinstance(o, bytes): try: return _as("bytes", o.decode("utf-8")) except UnicodeDecodeError: return _as("base64", base64.b64encode(o).decode("utf-8")) return super().default(o) def _as(t: str, v: Any): return {"__type__": t, "__value__": v} def dumps( s, _dumps=json.dumps, cls=JSONEncoder, default_kwargs=None, **kwargs ): """Serialize object to json string.""" default_kwargs = default_kwargs or {} return _dumps(s, cls=cls, **dict(default_kwargs, **kwargs)) def object_hook(o: dict): """Hook function to perform custom deserialization.""" if o.keys() == {"__type__", "__value__"}: decoder = _decoders.get(o["__type__"]) if decoder: return decoder(o["__value__"]) else: raise ValueError("Unsupported type", type, o) else: return o def loads(s, _loads=json.loads, decode_bytes=True, object_hook=object_hook): """Deserialize json from string.""" # None of the json implementations supports decoding from # a buffer/memoryview, or even reading from a stream # (load is just loads(fp.read())) # but this is Python, we love copying strings, preferably many times # over. Note that pickle does support buffer/memoryview # if isinstance(s, memoryview): s = s.tobytes().decode("utf-8") elif isinstance(s, bytearray): s = s.decode("utf-8") elif decode_bytes and isinstance(s, bytes): s = s.decode("utf-8") return _loads(s, object_hook=object_hook) DecoderT = EncoderT = Callable[[Any], Any] T = TypeVar("T") EncodedT = TypeVar("EncodedT") def register_type( t: type[T], marker: str | None, encoder: Callable[[T], EncodedT], decoder: Callable[[EncodedT], T] = lambda d: d, ): """Add support for serializing/deserializing native python type. If marker is `None`, the encoding is a pure transformation and the result is not placed in an envelope, so `decoder` is unnecessary. Decoding must instead be handled outside this library. """ _encoders[t] = (marker, encoder) if marker is not None: _decoders[marker] = decoder _encoders: dict[type, tuple[str | None, EncoderT]] = {} _decoders: dict[str, DecoderT] = { "bytes": lambda o: o.encode("utf-8"), "base64": lambda o: base64.b64decode(o.encode("utf-8")), } def _register_default_types(): # NOTE: datetime should be registered before date, # because datetime is also instance of date. register_type(datetime, "datetime", datetime.isoformat, datetime.fromisoformat) register_type( date, "date", lambda o: o.isoformat(), lambda o: datetime.fromisoformat(o).date(), ) register_type(time, "time", lambda o: o.isoformat(), time.fromisoformat) register_type(Decimal, "decimal", str, Decimal) register_type( uuid.UUID, "uuid", lambda o: {"hex": o.hex}, lambda o: uuid.UUID(**o), ) _register_default_types() kombu-5.5.3/kombu/utils/limits.py000066400000000000000000000047671477772317200170300ustar00rootroot00000000000000"""Token bucket implementation for rate limiting.""" from __future__ import annotations from collections import deque from time import monotonic __all__ = ('TokenBucket',) class TokenBucket: """Token Bucket Algorithm. See Also -------- https://en.wikipedia.org/wiki/Token_Bucket Most of this code was stolen from an entry in the ASPN Python Cookbook: https://code.activestate.com/recipes/511490/ Warning: ------- Thread Safety: This implementation is not thread safe. Access to a `TokenBucket` instance should occur within the critical section of any multithreaded code. """ #: The rate in tokens/second that the bucket will be refilled. fill_rate = None #: Maximum number of tokens in the bucket. capacity = 1 #: Timestamp of the last time a token was taken out of the bucket. timestamp = None def __init__(self, fill_rate, capacity=1): self.capacity = float(capacity) self._tokens = capacity self.fill_rate = float(fill_rate) self.timestamp = monotonic() self.contents = deque() def add(self, item): self.contents.append(item) def pop(self): return self.contents.popleft() def clear_pending(self): self.contents.clear() def can_consume(self, tokens=1): """Check if one or more tokens can be consumed. Returns ------- bool: true if the number of tokens can be consumed from the bucket. If they can be consumed, a call will also consume the requested number of tokens from the bucket. Calls will only consume `tokens` (the number requested) or zero tokens -- it will never consume a partial number of tokens. """ if tokens <= self._get_tokens(): self._tokens -= tokens return True return False def expected_time(self, tokens=1): """Return estimated time of token availability. Returns ------- float: the time in seconds. """ _tokens = self._get_tokens() tokens = max(tokens, _tokens) return (tokens - _tokens) / self.fill_rate def _get_tokens(self): if self._tokens < self.capacity: now = monotonic() delta = self.fill_rate * (now - self.timestamp) self._tokens = min(self.capacity, self._tokens + delta) self.timestamp = now return self._tokens kombu-5.5.3/kombu/utils/objects.py000066400000000000000000000037731477772317200171540ustar00rootroot00000000000000"""Object Utilities.""" from __future__ import annotations from threading import RLock __all__ = ('cached_property',) try: from functools import cached_property as _cached_property except ImportError: # TODO: Remove this fallback once we drop support for Python < 3.8 from cached_property import threaded_cached_property as _cached_property _NOT_FOUND = object() class cached_property(_cached_property): """Implementation of Cached property.""" def __init__(self, fget=None, fset=None, fdel=None): super().__init__(fget) self.__set = fset self.__del = fdel if not hasattr(self, 'attrname'): # This is a backport so we set this ourselves. self.attrname = self.func.__name__ if not hasattr(self, 'lock'): # Prior to Python 3.12, functools.cached_property has an # undocumented lock which is required for thread-safe __set__ # and __delete__. Create one if it isn't already present. self.lock = RLock() def __get__(self, instance, owner=None): # TODO: Remove this after we drop support for Python<3.8 # or fix the signature in the cached_property package with self.lock: return super().__get__(instance, owner) def __set__(self, instance, value): if instance is None: return self with self.lock: if self.__set is not None: value = self.__set(instance, value) cache = instance.__dict__ cache[self.attrname] = value def __delete__(self, instance): if instance is None: return self with self.lock: value = instance.__dict__.pop(self.attrname, _NOT_FOUND) if self.__del and value is not _NOT_FOUND: self.__del(instance, value) def setter(self, fset): return self.__class__(self.func, fset, self.__del) def deleter(self, fdel): return self.__class__(self.func, self.__set, fdel) kombu-5.5.3/kombu/utils/scheduling.py000066400000000000000000000055571477772317200176520ustar00rootroot00000000000000"""Scheduling Utilities.""" from __future__ import annotations from itertools import count from .imports import symbol_by_name __all__ = ( 'FairCycle', 'priority_cycle', 'round_robin_cycle', 'sorted_cycle', ) CYCLE_ALIASES = { 'priority': 'kombu.utils.scheduling:priority_cycle', 'round_robin': 'kombu.utils.scheduling:round_robin_cycle', 'sorted': 'kombu.utils.scheduling:sorted_cycle', } class FairCycle: """Cycle between resources. Consume from a set of resources, where each resource gets an equal chance to be consumed from. Arguments: --------- fun (Callable): Callback to call. resources (Sequence[Any]): List of resources. predicate (type): Exception predicate. """ def __init__(self, fun, resources, predicate=Exception): self.fun = fun self.resources = resources self.predicate = predicate self.pos = 0 def _next(self): while 1: try: resource = self.resources[self.pos] self.pos += 1 return resource except IndexError: self.pos = 0 if not self.resources: raise self.predicate() def get(self, callback, **kwargs): """Get from next resource.""" for tried in count(0): # for infinity resource = self._next() try: return self.fun(resource, callback, **kwargs) except self.predicate: # reraise when retries exhausted. if tried >= len(self.resources) - 1: raise def close(self): """Close cycle.""" def __repr__(self): """``repr(cycle)``.""" return ''.format( self=self, size=len(self.resources)) class round_robin_cycle: """Iterator that cycles between items in round-robin.""" def __init__(self, it=None): self.items = it if it is not None else [] def update(self, it): """Update items from iterable.""" self.items[:] = it def consume(self, n): """Consume n items.""" return self.items[:n] def rotate(self, last_used): """Move most recently used item to end of list.""" items = self.items try: items.append(items.pop(items.index(last_used))) except ValueError: pass return last_used class priority_cycle(round_robin_cycle): """Cycle that repeats items in order.""" def rotate(self, last_used): """Unused in this implementation.""" class sorted_cycle(priority_cycle): """Cycle in sorted order.""" def consume(self, n): """Consume n items.""" return sorted(self.items[:n]) def cycle_by_name(name): """Get cycle class by name.""" return symbol_by_name(name, CYCLE_ALIASES) kombu-5.5.3/kombu/utils/text.py000066400000000000000000000042301477772317200164740ustar00rootroot00000000000000"""Text Utilities.""" # flake8: noqa from __future__ import annotations from difflib import SequenceMatcher from typing import Iterable, Iterator from kombu import version_info_t def escape_regex(p, white=''): # type: (str, str) -> str """Escape string for use within a regular expression.""" # what's up with re.escape? that code must be neglected or something return ''.join(c if c.isalnum() or c in white else ('\\000' if c == '\000' else '\\' + c) for c in p) def fmatch_iter(needle: str, haystack: Iterable[str], min_ratio: float = 0.6) -> Iterator[tuple[float, str]]: """Fuzzy match: iteratively. Yields ------ Tuple: of ratio and key. """ for key in haystack: ratio = SequenceMatcher(None, needle, key).ratio() if ratio >= min_ratio: yield ratio, key def fmatch_best(needle: str, haystack: Iterable[str], min_ratio: float = 0.6) -> str | None: """Fuzzy match - Find best match (scalar).""" try: return sorted( fmatch_iter(needle, haystack, min_ratio), reverse=True, )[0][1] except IndexError: return None def version_string_as_tuple(s: str) -> version_info_t: """Convert version string to version info tuple.""" v = _unpack_version(*s.split('.')) # X.Y.3a1 -> (X, Y, 3, 'a1') if isinstance(v.micro, str): v = version_info_t(v.major, v.minor, *_splitmicro(*v[2:])) # X.Y.3a1-40 -> (X, Y, 3, 'a1', '40') if not v.serial and v.releaselevel and '-' in v.releaselevel: v = version_info_t(*list(v[0:3]) + v.releaselevel.split('-')) return v def _unpack_version( major: str, minor: str | int = 0, micro: str | int = 0, releaselevel: str = '', serial: str = '' ) -> version_info_t: return version_info_t(int(major), int(minor), micro, releaselevel, serial) def _splitmicro(micro: str, releaselevel: str = '', serial: str = '') -> tuple[int, str, str]: for index, char in enumerate(micro): if not char.isdigit(): break else: return int(micro or 0), releaselevel, serial return int(micro[:index]), micro[index:], serial kombu-5.5.3/kombu/utils/time.py000066400000000000000000000004201477772317200164430ustar00rootroot00000000000000"""Time Utilities.""" from __future__ import annotations __all__ = ('maybe_s_to_ms',) def maybe_s_to_ms(v: int | float | None) -> int | None: """Convert seconds to milliseconds, but return None for None.""" return int(float(v) * 1000.0) if v is not None else v kombu-5.5.3/kombu/utils/url.py000066400000000000000000000075051477772317200163220ustar00rootroot00000000000000"""URL Utilities.""" # flake8: noqa from __future__ import annotations from collections.abc import Mapping from functools import partial from typing import NamedTuple from urllib.parse import parse_qsl, quote, unquote, urlparse try: import ssl ssl_available = True except ImportError: # pragma: no cover ssl_available = False from ..log import get_logger safequote = partial(quote, safe='') logger = get_logger(__name__) class urlparts(NamedTuple): """Named tuple representing parts of the URL.""" scheme: str hostname: str port: int username: str password: str path: str query: Mapping def parse_url(url): # type: (str) -> Dict """Parse URL into mapping of components.""" scheme, host, port, user, password, path, query = _parse_url(url) if query: keys = [key for key in query.keys() if key.startswith('ssl_')] for key in keys: if key == "ssl_check_hostname": query[key] = query[key].lower() != 'false' elif key == 'ssl_cert_reqs': query[key] = parse_ssl_cert_reqs(query[key]) if query[key] is None: logger.warning('Defaulting to insecure SSL behaviour.') if 'ssl' not in query: query['ssl'] = {} query['ssl'][key] = query[key] del query[key] return dict(transport=scheme, hostname=host, port=port, userid=user, password=password, virtual_host=path, **query) def url_to_parts(url): # type: (str) -> urlparts """Parse URL into :class:`urlparts` tuple of components.""" scheme = urlparse(url).scheme schemeless = url[len(scheme) + 3:] # parse with HTTP URL semantics parts = urlparse('http://' + schemeless) path = parts.path or '' path = path[1:] if path and path[0] == '/' else path return urlparts( scheme, unquote(parts.hostname or '') or None, parts.port, unquote(parts.username or '') or None, unquote(parts.password or '') or None, unquote(path or '') or None, dict(parse_qsl(parts.query)), ) _parse_url = url_to_parts def as_url(scheme, host=None, port=None, user=None, password=None, path=None, query=None, sanitize=False, mask='**'): # type: (str, str, int, str, str, str, str, bool, str) -> str """Generate URL from component parts.""" parts = [f'{scheme}://'] if user or password: if user: parts.append(safequote(user)) if password: if sanitize: parts.extend([':', mask] if mask else [':']) else: parts.extend([':', safequote(password)]) parts.append('@') parts.append(safequote(host) if host else '') if port: parts.extend([':', port]) parts.extend(['/', path]) return ''.join(str(part) for part in parts if part) def sanitize_url(url, mask='**'): # type: (str, str) -> str """Return copy of URL with password removed.""" return as_url(*_parse_url(url), sanitize=True, mask=mask) def maybe_sanitize_url(url, mask='**'): # type: (Any, str) -> Any """Sanitize url, or do nothing if url undefined.""" if isinstance(url, str) and '://' in url: return sanitize_url(url, mask) return url def parse_ssl_cert_reqs(query_value): # type: (str) -> Any """Given the query parameter for ssl_cert_reqs, return the SSL constant or None.""" if ssl_available: query_value_to_constant = { 'CERT_REQUIRED': ssl.CERT_REQUIRED, 'CERT_OPTIONAL': ssl.CERT_OPTIONAL, 'CERT_NONE': ssl.CERT_NONE, 'required': ssl.CERT_REQUIRED, 'optional': ssl.CERT_OPTIONAL, 'none': ssl.CERT_NONE, } return query_value_to_constant[query_value] else: return None kombu-5.5.3/kombu/utils/uuid.py000066400000000000000000000005071477772317200164610ustar00rootroot00000000000000"""UUID utilities.""" from __future__ import annotations from typing import Callable from uuid import UUID, uuid4 def uuid(_uuid: Callable[[], UUID] = uuid4) -> str: """Generate unique id in UUID4 format. See Also -------- For now this is provided by :func:`uuid.uuid4`. """ return str(_uuid()) kombu-5.5.3/pyproject.toml000066400000000000000000000007421477772317200156010ustar00rootroot00000000000000[tool.codespell] ignore-words-list = "assertin" skip = "./.*,docs/AUTHORS.txt,docs/history/*,docs/spelling_wordlist.txt,Changelog.rst,CONTRIBUTORS.txt,*.key" [tool.coverage.run] branch = true cover_pylib = false include = ["*kombu/*"] omit = ["kombu.tests.*"] [tool.coverage.report] exclude_lines = [ "pragma: no cover", "if TYPE_CHECKING:", "except ImportError:" ] omit = [ "*/python?.?/*", "*/site-packages/*", "*/pypy/*", "*kombu/utils/debug.py", ] kombu-5.5.3/requirements/000077500000000000000000000000001477772317200154055ustar00rootroot00000000000000kombu-5.5.3/requirements/default.txt000066400000000000000000000001751477772317200175750ustar00rootroot00000000000000amqp>=5.1.1,<6.0.0 vine==5.1.0 backports.zoneinfo[tzdata]>=0.2.1; python_version<"3.9" tzdata>=2025.2; python_version>="3.9" kombu-5.5.3/requirements/dev.txt000066400000000000000000000001351477772317200167230ustar00rootroot00000000000000https://github.com/celery/py-amqp/zipball/main https://github.com/celery/vine/zipball/master kombu-5.5.3/requirements/docs.txt000066400000000000000000000002431477772317200170750ustar00rootroot00000000000000git+https://github.com/celery/sphinx_celery.git -r extras/redis.txt -r extras/mongodb.txt -r extras/sqlalchemy.txt -r extras/azureservicebus.txt -r extras/sqs.txt kombu-5.5.3/requirements/extras/000077500000000000000000000000001477772317200167135ustar00rootroot00000000000000kombu-5.5.3/requirements/extras/azureservicebus.txt000066400000000000000000000000311477772317200226670ustar00rootroot00000000000000azure-servicebus>=7.10.0 kombu-5.5.3/requirements/extras/azurestoragequeues.txt000066400000000000000000000000631477772317200234160ustar00rootroot00000000000000azure-storage-queue>=12.6.0 azure-identity>=1.12.0 kombu-5.5.3/requirements/extras/brotli.txt000066400000000000000000000001571477772317200207520ustar00rootroot00000000000000brotlipy>=0.7.0;platform_python_implementation=="PyPy" brotli>=1.0.9;platform_python_implementation=="CPython" kombu-5.5.3/requirements/extras/confluentkafka.txt000066400000000000000000000000271477772317200224460ustar00rootroot00000000000000confluent-kafka>=2.2.0 kombu-5.5.3/requirements/extras/consul.txt000066400000000000000000000000261477772317200207550ustar00rootroot00000000000000python-consul2==0.1.5 kombu-5.5.3/requirements/extras/couchdb.txt000066400000000000000000000000221477772317200210550ustar00rootroot00000000000000pycouchdb==1.16.0 kombu-5.5.3/requirements/extras/etcd.txt000066400000000000000000000000231477772317200203660ustar00rootroot00000000000000python-etcd>=0.4.3 kombu-5.5.3/requirements/extras/gcpubsub.txt000066400000000000000000000001341477772317200212640ustar00rootroot00000000000000google-cloud-pubsub>=2.18.4 google-cloud-monitoring>=2.16.0 grpcio==1.67.0 protobuf==4.25.5 kombu-5.5.3/requirements/extras/librabbitmq.txt000066400000000000000000000000541477772317200217430ustar00rootroot00000000000000librabbitmq>=2.0.0; python_version < '3.11' kombu-5.5.3/requirements/extras/lzma.txt000066400000000000000000000001431477772317200204150ustar00rootroot00000000000000--extra-index-url https://pypi.anaconda.org/nehaljwani/simple # Temporary index for Windows wheels kombu-5.5.3/requirements/extras/mongodb.txt000066400000000000000000000000171477772317200210770ustar00rootroot00000000000000pymongo>=4.1.1 kombu-5.5.3/requirements/extras/msgpack.txt000066400000000000000000000000171477772317200210770ustar00rootroot00000000000000msgpack==1.1.0 kombu-5.5.3/requirements/extras/pyro.txt000066400000000000000000000000141477772317200204400ustar00rootroot00000000000000pyro4==4.82 kombu-5.5.3/requirements/extras/qpid.txt000066400000000000000000000000431477772317200204060ustar00rootroot00000000000000qpid-python>=0.26 qpid-tools>=0.26 kombu-5.5.3/requirements/extras/redis.txt000066400000000000000000000000451477772317200205610ustar00rootroot00000000000000redis>=4.5.2,!=4.5.5,!=5.0.2,<=5.2.1 kombu-5.5.3/requirements/extras/slmq.txt000066400000000000000000000000331477772317200204240ustar00rootroot00000000000000softlayer_messaging>=1.0.3 kombu-5.5.3/requirements/extras/sqlalchemy.txt000066400000000000000000000000301477772317200216070ustar00rootroot00000000000000sqlalchemy>=1.4.48,<2.1 kombu-5.5.3/requirements/extras/sqs.txt000066400000000000000000000000411477772317200202550ustar00rootroot00000000000000boto3>=1.26.143 urllib3>=1.26.16 kombu-5.5.3/requirements/extras/yaml.txt000066400000000000000000000000151477772317200204120ustar00rootroot00000000000000PyYAML>=3.10 kombu-5.5.3/requirements/extras/zookeeper.txt000066400000000000000000000000151477772317200214530ustar00rootroot00000000000000kazoo>=2.8.0 kombu-5.5.3/requirements/extras/zstd.txt000066400000000000000000000000221477772317200204320ustar00rootroot00000000000000zstandard==0.23.0 kombu-5.5.3/requirements/funtest.txt000066400000000000000000000003171477772317200176370ustar00rootroot00000000000000# redis transport redis>=4.5.2,!=5.0.2,!=4.5.5 # MongoDB transport pymongo==4.10.1 # Zookeeper transport kazoo==2.10.0 # SQS transport boto3>=1.26.143 # Qpid transport qpid-python>=0.26 qpid-tools>=0.26 kombu-5.5.3/requirements/pkgutils.txt000066400000000000000000000002551477772317200200120ustar00rootroot00000000000000setuptools>=47.0.0 wheel>=0.29.0 flake8==7.1.2 tox>=4.4.8 sphinx2rst>=1.0 bumpversion==0.6.0 pydocstyle==6.3.0 mypy==1.14.1 typing_extensions==4.12.2; python_version<"3.10" kombu-5.5.3/requirements/test-ci.txt000066400000000000000000000010651477772317200175200ustar00rootroot00000000000000pytest-cov==5.0.0; python_version<"3.9" pytest-cov==6.0.0; python_version>="3.9" codecov==2.1.13; sys_platform == 'win32' librabbitmq>=2.0.0; sys_platform == 'win32' -r extras/redis.txt pymongo>=4.1.1; sys_platform != 'win32' -r extras/yaml.txt -r extras/msgpack.txt -r extras/azureservicebus.txt -r extras/azurestoragequeues.txt boto3>=1.26.143; sys_platform != 'win32' urllib3>=1.26.16; sys_platform != 'win32' -r extras/consul.txt -r extras/zookeeper.txt -r extras/brotli.txt -r extras/zstd.txt -r extras/sqlalchemy.txt -r extras/etcd.txt -r extras/gcpubsub.txt kombu-5.5.3/requirements/test-integration.txt000066400000000000000000000000611477772317200214430ustar00rootroot00000000000000pytest-xdist==3.6.1 pytest-rerunfailures>=11.1.2 kombu-5.5.3/requirements/test.txt000066400000000000000000000002551477772317200171270ustar00rootroot00000000000000hypothesis<7 Pyro4==4.82 pytest-freezer==0.4.9 pytest-sugar==1.0.0 pytest==8.3.5 pre-commit>=3.5.0,<3.8.0; python_version < '3.9' pre-commit>=4.0.1; python_version >= '3.9' kombu-5.5.3/setup.cfg000066400000000000000000000041151477772317200145040ustar00rootroot00000000000000[tool:pytest] testpaths = t/unit/ python_classes = test_* [build_sphinx] source-dir = docs/ build-dir = docs/_build all_files = 1 [flake8] # classes can be lowercase, arguments and variables can be uppercase # whenever it makes the code more readable. max-line-length = 117 extend-ignore = # classes can be lowercase, arguments and variables can be uppercase # whenever it makes the code more readable. W504, N806, N802, N801, N803 # incompatible with black https://github.com/psf/black/issues/315#issuecomment-395457972 E203, # Missing docstring in public method D102, # Missing docstring in public package D104, # Missing docstring in magic method D105, # Missing docstring in __init__ D107, # First line should be in imperative mood; try rephrasing D401, # No blank lines allowed between a section header and its content D412, # ambiguous variable name '...' E741, # ambiguous class definition '...' E742, per-file-ignores = t/*,setup.py,examples/*,docs/*,extra/*: # docstrings D, [isort] add_imports = from __future__ import annotations [mypy] warn_unused_configs = True strict = False follow_imports = skip show_error_codes = True disallow_untyped_defs = True ignore_missing_imports = True files = kombu/abstract.py, kombu/utils/debug.py, kombu/utils/time.py, kombu/utils/uuid.py, t/unit/utils/test_uuid.py, kombu/utils/text.py, kombu/exceptions.py, t/unit/test_exceptions.py, kombu/clocks.py, t/unit/test_clocks.py, kombu/__init__.py, kombu/asynchronous/__init__.py, kombu/asynchronous/aws/__init__.py, kombu/asynchronous/aws/ext.py, kombu/asynchronous/aws/sqs/__init__.py, kombu/asynchronous/aws/sqs/ext.py, kombu/asynchronous/http/__init__.py, kombu/transport/__init__.py, kombu/transport/virtual/__init__.py, kombu/utils/__init__.py, kombu/matcher.py, kombu/asynchronous/semaphore.py [pep257] ignore = D102,D107,D104,D203,D105,D213,D401,D413,D417 [bdist_rpm] requires = amqp >= 5. [metadata] license_file = LICENSE kombu-5.5.3/setup.py000066400000000000000000000076771477772317200144150ustar00rootroot00000000000000#!/usr/bin/env python3 from __future__ import annotations import os import re import sys import setuptools from setuptools import setup # -- Parse meta re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)') re_doc = re.compile(r'^"""(.+?)"""') def add_default(m): attr_name, attr_value = m.groups() return ((attr_name, attr_value.strip("\"'")),) def add_doc(m): return (('doc', m.groups()[0]),) pats = {re_meta: add_default, re_doc: add_doc} here = os.path.abspath(os.path.dirname(__file__)) meta_fh = open(os.path.join(here, 'kombu/__init__.py')) try: meta = {} for line in meta_fh: if line.strip() == '# -eof meta-': break for pattern, handler in pats.items(): m = pattern.match(line.strip()) if m: meta.update(handler(m)) finally: meta_fh.close() # -- def fullsplit(path, result=None): if result is None: result = [] head, tail = os.path.split(path) if head == '': return [tail] + result if head == path: return result return fullsplit(head, [tail] + result) # if os.path.exists('README.rst'): # long_description = codecs.open('README.rst', 'r', 'utf-8').read() # else: # long_description = 'See https://pypi.org/project/kombu/' # -*- Installation Requires -*- py_version = sys.version_info is_pypy = hasattr(sys, 'pypy_version_info') def strip_comments(line): return line.split('#', 1)[0].strip() def reqs(*f): with open(os.path.join(os.getcwd(), "requirements", *f)) as reqs_file: return [r for r in (strip_comments(line) for line in reqs_file) if r] def extras(*p): return reqs('extras', *p) def readme(): with open('README.rst') as f: return f.read() setup( name='kombu', packages=setuptools.find_packages(exclude=['t', 't.*']), version=meta['version'], description=meta['doc'], keywords='messaging message amqp rabbitmq redis actor producer consumer', author=meta['author'], author_email=meta['contact'], url=meta['homepage'], project_urls={ 'Source': 'https://github.com/celery/kombu' }, platforms=['any'], license='BSD-3-Clause', python_requires=">=3.8", install_requires=reqs('default.txt'), tests_require=reqs('test.txt'), extras_require={ 'msgpack': extras('msgpack.txt'), 'yaml': extras('yaml.txt'), 'redis': extras('redis.txt'), 'mongodb': extras('mongodb.txt'), 'sqs': extras('sqs.txt'), 'gcpubsub': extras('gcpubsub.txt'), 'zookeeper': extras('zookeeper.txt'), 'sqlalchemy': extras('sqlalchemy.txt'), 'librabbitmq': extras('librabbitmq.txt'), 'pyro': extras('pyro.txt'), 'slmq': extras('slmq.txt'), 'azurestoragequeues': extras('azurestoragequeues.txt'), 'azureservicebus': extras('azureservicebus.txt'), 'qpid': extras('qpid.txt'), 'consul': extras('consul.txt'), 'confluentkafka': extras('confluentkafka.txt'), }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Intended Audience :: Developers', 'Topic :: Communications', 'Topic :: System :: Distributed Computing', 'Topic :: System :: Networking', 'Topic :: Software Development :: Libraries :: Python Modules', ], ) kombu-5.5.3/t/000077500000000000000000000000001477772317200131255ustar00rootroot00000000000000kombu-5.5.3/t/__init__.py000066400000000000000000000000001477772317200152240ustar00rootroot00000000000000kombu-5.5.3/t/integration/000077500000000000000000000000001477772317200154505ustar00rootroot00000000000000kombu-5.5.3/t/integration/__init__.py000066400000000000000000000001701477772317200175570ustar00rootroot00000000000000from __future__ import annotations import os import sys sys.path.insert(0, os.pardir) sys.path.insert(0, os.getcwd()) kombu-5.5.3/t/integration/common.py000066400000000000000000000477451477772317200173330ustar00rootroot00000000000000from __future__ import annotations import socket from contextlib import closing from time import sleep import pytest import kombu class BasicFunctionality: def test_connect(self, connection): assert connection.connect() assert connection.connection connection.close() assert connection.connection is None assert connection.connect() assert connection.connection connection.close() def test_failed_connect(self, invalid_connection): # method raises transport exception with pytest.raises(Exception): invalid_connection.connect() def test_failed_connection(self, invalid_connection): # method raises transport exception with pytest.raises(Exception): invalid_connection.connection def test_failed_channel(self, invalid_connection): # method raises transport exception with pytest.raises(Exception): invalid_connection.channel() def test_failed_default_channel(self, invalid_connection): invalid_connection.transport_options = {'max_retries': 1} # method raises transport exception with pytest.raises(Exception): invalid_connection.default_channel def test_default_channel_autoconnect(self, connection): connection.connect() connection.close() assert connection.connection is None assert connection.default_channel assert connection.connection connection.close() def test_channel(self, connection): chan = connection.channel() assert chan assert connection.connection def test_default_channel(self, connection): chan = connection.default_channel assert chan assert connection.connection def test_publish_consume(self, connection): test_queue = kombu.Queue('test', routing_key='test') def callback(body, message): assert body == {'hello': 'world'} assert message.content_type == 'application/x-python-serialize' message.delivery_info['routing_key'] == 'test' message.delivery_info['exchange'] == '' message.ack() assert message.payload == body with connection as conn: with conn.channel() as channel: producer = kombu.Producer(channel) producer.publish( {'hello': 'world'}, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle' ) consumer = kombu.Consumer( conn, [test_queue], accept=['pickle'] ) consumer.register_callback(callback) with consumer: conn.drain_events(timeout=1) def test_consume_empty_queue(self, connection): def callback(body, message): assert False, 'Callback should not be called' test_queue = kombu.Queue('test_empty', routing_key='test_empty') with connection as conn: with conn.channel(): consumer = kombu.Consumer( conn, [test_queue], accept=['pickle'] ) consumer.register_callback(callback) with consumer: with pytest.raises(socket.timeout): conn.drain_events(timeout=1) def test_simple_queue_publish_consume(self, connection): with connection as conn: with closing(conn.SimpleQueue('simple_queue_test')) as queue: queue.put({'Hello': 'World'}, headers={'k1': 'v1'}) message = queue.get(timeout=1) assert message.payload == {'Hello': 'World'} assert message.content_type == 'application/json' assert message.content_encoding == 'utf-8' assert message.headers == {'k1': 'v1'} message.ack() def test_simple_buffer_publish_consume(self, connection): with connection as conn: with closing(conn.SimpleBuffer('simple_buffer_test')) as buf: buf.put({'Hello': 'World'}, headers={'k1': 'v1'}) message = buf.get(timeout=1) assert message.payload == {'Hello': 'World'} assert message.content_type == 'application/json' assert message.content_encoding == 'utf-8' assert message.headers == {'k1': 'v1'} message.ack() class BaseExchangeTypes: def _callback(self, body, message): message.ack() assert body == {'hello': 'world'} assert message.content_type == 'application/x-python-serialize' message.delivery_info['routing_key'] == 'test' message.delivery_info['exchange'] == '' assert message.payload == body def _create_consumer(self, connection, queue): consumer = kombu.Consumer( connection, [queue], accept=['pickle'] ) consumer.register_callback(self._callback) return consumer def _consume_from(self, connection, consumer): with consumer: connection.drain_events(timeout=1) def _consume(self, connection, queue): with self._create_consumer(connection, queue): connection.drain_events(timeout=1) def _publish(self, channel, exchange, queues=None, routing_key=None): producer = kombu.Producer(channel, exchange=exchange) if routing_key: producer.publish( {'hello': 'world'}, declare=list(queues) if queues else None, serializer='pickle', routing_key=routing_key ) else: producer.publish( {'hello': 'world'}, declare=list(queues) if queues else None, serializer='pickle' ) def test_direct(self, connection): ex = kombu.Exchange('test_direct', type='direct') test_queue = kombu.Queue('direct1', exchange=ex) with connection as conn: with conn.channel() as channel: self._publish(channel, ex, [test_queue]) self._consume(conn, test_queue) def test_direct_routing_keys(self, connection): ex = kombu.Exchange('test_rk_direct', type='direct') test_queue1 = kombu.Queue('rk_direct1', exchange=ex, routing_key='d1') test_queue2 = kombu.Queue('rk_direct2', exchange=ex, routing_key='d2') with connection as conn: with conn.channel() as channel: self._publish(channel, ex, [test_queue1, test_queue2], 'd1') self._consume(conn, test_queue1) # direct2 queue should not have data with pytest.raises(socket.timeout): self._consume(conn, test_queue2) # test that publishing using key which is not used results in # discarted message. self._publish(channel, ex, [test_queue1, test_queue2], 'd3') with pytest.raises(socket.timeout): self._consume(conn, test_queue1) with pytest.raises(socket.timeout): self._consume(conn, test_queue2) def test_fanout(self, connection): ex = kombu.Exchange('test_fanout', type='fanout') test_queue1 = kombu.Queue('fanout1', exchange=ex) test_queue2 = kombu.Queue('fanout2', exchange=ex) with connection as conn: with conn.channel() as channel: self._publish(channel, ex, [test_queue1, test_queue2]) self._consume(conn, test_queue1) self._consume(conn, test_queue2) def test_topic(self, connection): ex = kombu.Exchange('test_topic', type='topic') test_queue1 = kombu.Queue('topic1', exchange=ex, routing_key='t.*') test_queue2 = kombu.Queue('topic2', exchange=ex, routing_key='t.*') test_queue3 = kombu.Queue('topic3', exchange=ex, routing_key='t') with connection as conn: with conn.channel() as channel: self._publish( channel, ex, [test_queue1, test_queue2, test_queue3], routing_key='t.1' ) self._consume(conn, test_queue1) self._consume(conn, test_queue2) with pytest.raises(socket.timeout): # topic3 queue should not have data self._consume(conn, test_queue3) def test_publish_empty_exchange(self, connection): ex = kombu.Exchange('test_empty_exchange', type='topic') with connection as conn: with conn.channel() as channel: self._publish( channel, ex, routing_key='t.1' ) class BaseTimeToLive: def test_publish_consume(self, connection): test_queue = kombu.Queue('ttl_test', routing_key='ttl_test') def callback(body, message): assert False, 'Callback should not be called' with connection as conn: with conn.channel() as channel: producer = kombu.Producer(channel) producer.publish( {'hello': 'world'}, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle', expiration=2 ) consumer = kombu.Consumer( conn, [test_queue], accept=['pickle'] ) consumer.register_callback(callback) sleep(3) with consumer: with pytest.raises(socket.timeout): conn.drain_events(timeout=1) def test_simple_queue_publish_consume(self, connection): with connection as conn: with closing(conn.SimpleQueue('ttl_simple_queue_test')) as queue: queue.put( {'Hello': 'World'}, headers={'k1': 'v1'}, expiration=2 ) sleep(3) with pytest.raises(queue.Empty): queue.get(timeout=1) def test_simple_buffer_publish_consume(self, connection): with connection as conn: with closing(conn.SimpleBuffer('ttl_simple_buffer_test')) as buf: buf.put({'Hello': 'World'}, headers={'k1': 'v1'}, expiration=2) sleep(3) with pytest.raises(buf.Empty): buf.get(timeout=1) class BasePriority: PRIORITY_ORDER = 'asc' def test_publish_consume(self, connection): # py-amqp transport has higher numbers higher priority # redis transport has lower numbers higher priority if self.PRIORITY_ORDER == 'asc': prio_high = 6 prio_low = 3 else: prio_high = 3 prio_low = 6 test_queue = kombu.Queue( 'priority_test', routing_key='priority_test', max_priority=10 ) received_messages = [] def callback(body, message): received_messages.append(body) message.ack() with connection as conn: with conn.channel() as channel: producer = kombu.Producer(channel) for msg, prio in [ [{'msg': 'first'}, prio_low], [{'msg': 'second'}, prio_high], [{'msg': 'third'}, prio_low], ]: producer.publish( msg, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle', priority=prio ) # Sleep to make sure that queue sorted based on priority sleep(0.5) consumer = kombu.Consumer( conn, [test_queue], accept=['pickle'] ) consumer.register_callback(callback) with consumer: conn.drain_events(timeout=1) # Second message must be received first assert received_messages[0] == {'msg': 'second'} assert received_messages[1] == {'msg': 'first'} assert received_messages[2] == {'msg': 'third'} def test_publish_requeue_consume(self, connection): # py-amqp transport has higher numbers higher priority # redis transport has lower numbers higher priority if self.PRIORITY_ORDER == 'asc': prio_max = 9 prio_high = 6 prio_low = 3 else: prio_max = 0 prio_high = 3 prio_low = 6 test_queue = kombu.Queue( 'priority_requeue_test', routing_key='priority_requeue_test', max_priority=10 ) received_messages = [] received_message_bodies = [] def callback(body, message): received_messages.append(message) received_message_bodies.append(body) # don't ack the message so it can be requeued with connection as conn: with conn.channel() as channel: producer = kombu.Producer(channel) for msg, prio in [ [{'msg': 'first'}, prio_low], [{'msg': 'second'}, prio_high], [{'msg': 'third'}, prio_low], ]: producer.publish( msg, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle', priority=prio ) # Sleep to make sure that queue sorted based on priority sleep(0.5) consumer = kombu.Consumer( conn, [test_queue], accept=['pickle'] ) consumer.register_callback(callback) with consumer: # drain_events() returns just on number in # Virtual transports conn.drain_events(timeout=1) # requeue the messages for msg in received_messages: msg.requeue() received_messages.clear() received_message_bodies.clear() # add a fourth max priority message producer.publish( {'msg': 'fourth'}, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle', priority=prio_max ) # Sleep to make sure that queue sorted based on priority sleep(0.5) with consumer: conn.drain_events(timeout=1) # Fourth message must be received first assert received_message_bodies[0] == {'msg': 'fourth'} assert received_message_bodies[1] == {'msg': 'second'} assert received_message_bodies[2] == {'msg': 'first'} assert received_message_bodies[3] == {'msg': 'third'} def test_simple_queue_publish_consume(self, connection): if self.PRIORITY_ORDER == 'asc': prio_high = 7 prio_low = 1 else: prio_high = 1 prio_low = 7 with connection as conn: with closing( conn.SimpleQueue( 'priority_simple_queue_test', queue_opts={'max_priority': 10} ) ) as queue: for msg, prio in [ [{'msg': 'first'}, prio_low], [{'msg': 'second'}, prio_high], [{'msg': 'third'}, prio_low], ]: queue.put( msg, headers={'k1': 'v1'}, priority=prio ) # Sleep to make sure that queue sorted based on priority sleep(0.5) # Second message must be received first for data in [ {'msg': 'second'}, {'msg': 'first'}, {'msg': 'third'}, ]: msg = queue.get(timeout=1) msg.ack() assert msg.payload == data def test_simple_buffer_publish_consume(self, connection): if self.PRIORITY_ORDER == 'asc': prio_high = 6 prio_low = 2 else: prio_high = 2 prio_low = 6 with connection as conn: with closing( conn.SimpleBuffer( 'priority_simple_buffer_test', queue_opts={'max_priority': 10} ) ) as buf: for msg, prio in [ [{'msg': 'first'}, prio_low], [{'msg': 'second'}, prio_high], [{'msg': 'third'}, prio_low], ]: buf.put( msg, headers={'k1': 'v1'}, priority=prio ) # Sleep to make sure that queue sorted based on priority sleep(0.5) # Second message must be received first for data in [ {'msg': 'second'}, {'msg': 'first'}, {'msg': 'third'}, ]: msg = buf.get(timeout=1) msg.ack() assert msg.payload == data class BaseMessage: def test_ack(self, connection): with connection as conn: with closing(conn.SimpleQueue('test_ack')) as queue: queue.put({'Hello': 'World'}, headers={'k1': 'v1'}) message = queue.get_nowait() message.ack() with pytest.raises(queue.Empty): queue.get_nowait() def test_reject_no_requeue(self, connection): with connection as conn: with closing(conn.SimpleQueue('test_reject_no_requeue')) as queue: queue.put({'Hello': 'World'}, headers={'k1': 'v1'}) message = queue.get_nowait() message.reject(requeue=False) with pytest.raises(queue.Empty): queue.get_nowait() def test_reject_requeue(self, connection): with connection as conn: with closing(conn.SimpleQueue('test_reject_requeue')) as queue: queue.put({'Hello': 'World'}, headers={'k1': 'v1'}) message = queue.get_nowait() message.reject(requeue=True) message2 = queue.get_nowait() assert message.body == message2.body message2.ack() def test_requeue(self, connection): with connection as conn: with closing(conn.SimpleQueue('test_requeue')) as queue: queue.put({'Hello': 'World'}, headers={'k1': 'v1'}) message = queue.get_nowait() message.requeue() message2 = queue.get_nowait() assert message.body == message2.body message2.ack() class BaseFailover(BasicFunctionality): def test_connect(self, failover_connection): super().test_connect(failover_connection) def test_publish_consume(self, failover_connection): super().test_publish_consume(failover_connection) def test_consume_empty_queue(self, failover_connection): super().test_consume_empty_queue(failover_connection) def test_simple_buffer_publish_consume(self, failover_connection): super().test_simple_buffer_publish_consume( failover_connection ) kombu-5.5.3/t/integration/test_kafka.py000066400000000000000000000026701477772317200201430ustar00rootroot00000000000000from __future__ import annotations import pytest import kombu from .common import (BaseExchangeTypes, BaseFailover, BaseMessage, BasicFunctionality) def get_connection(hostname, port): return kombu.Connection( f'confluentkafka://{hostname}:{port}', ) def get_failover_connection(hostname, port): return kombu.Connection( f'confluentkafka://localhost:12345;confluentkafka://{hostname}:{port}', connect_timeout=10, ) @pytest.fixture() def invalid_connection(): return kombu.Connection('confluentkafka://localhost:12345') @pytest.fixture() def connection(): return get_connection( hostname='localhost', port='9092' ) @pytest.fixture() def failover_connection(): return get_failover_connection( hostname='localhost', port='9092' ) @pytest.mark.env('kafka') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_KafkaBasicFunctionality(BasicFunctionality): pass @pytest.mark.env('kafka') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_KafkaBaseExchangeTypes(BaseExchangeTypes): @pytest.mark.skip('fanout is not implemented') def test_fanout(self, connection): pass @pytest.mark.env('kafka') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_KafkaFailover(BaseFailover): pass @pytest.mark.env('kafka') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_KafkaMessage(BaseMessage): pass kombu-5.5.3/t/integration/test_mongodb.py000066400000000000000000000143361477772317200205150ustar00rootroot00000000000000from __future__ import annotations import os import pytest import kombu from .common import (BaseExchangeTypes, BaseMessage, BasePriority, BasicFunctionality) def get_connection(hostname, port, vhost): return kombu.Connection( f'mongodb://{hostname}:{port}/{vhost}', transport_options={'ttl': True}, ) @pytest.fixture() def invalid_connection(): return kombu.Connection('mongodb://localhost:12345?connectTimeoutMS=1') @pytest.fixture() def connection(request): return get_connection( hostname=os.environ.get('MONGODB_HOST', 'localhost'), port=os.environ.get('MONGODB_27017_TCP', '27017'), vhost=getattr( request.config, "slaveinput", {} ).get("slaveid", 'tests'), ) @pytest.mark.env('mongodb') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_MongoDBBasicFunctionality(BasicFunctionality): pass @pytest.mark.env('mongodb') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_MongoDBBaseExchangeTypes(BaseExchangeTypes): # MongoDB consumer skips old messages upon initialization. # Ensure that it's created before test messages are published. def test_fanout(self, connection): ex = kombu.Exchange('test_fanout', type='fanout') test_queue1 = kombu.Queue('fanout1', exchange=ex) consumer1 = self._create_consumer(connection, test_queue1) test_queue2 = kombu.Queue('fanout2', exchange=ex) consumer2 = self._create_consumer(connection, test_queue2) with connection as conn: with conn.channel() as channel: self._publish(channel, ex, [test_queue1, test_queue2]) self._consume_from(conn, consumer1) self._consume_from(conn, consumer2) @pytest.mark.env('mongodb') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_MongoDBPriority(BasePriority): # drain_events() consumes only one value unlike in py-amqp. def test_publish_consume(self, connection): test_queue = kombu.Queue( 'priority_test', routing_key='priority_test', max_priority=10 ) received_messages = [] def callback(body, message): received_messages.append(body) message.ack() with connection as conn: with conn.channel() as channel: producer = kombu.Producer(channel) for msg, prio in [ [{'msg': 'first'}, 3], [{'msg': 'second'}, 6], [{'msg': 'third'}, 3], ]: producer.publish( msg, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle', priority=prio ) consumer = kombu.Consumer( conn, [test_queue], accept=['pickle'] ) consumer.register_callback(callback) with consumer: conn.drain_events(timeout=1) conn.drain_events(timeout=1) conn.drain_events(timeout=1) # Second message must be received first assert received_messages[0] == {'msg': 'second'} assert received_messages[1] == {'msg': 'first'} assert received_messages[2] == {'msg': 'third'} def test_publish_requeue_consume(self, connection): test_queue = kombu.Queue( 'priority_requeue_test', routing_key='priority_requeue_test', max_priority=10 ) received_messages = [] received_message_bodies = [] def callback(body, message): received_messages.append(message) received_message_bodies.append(body) # don't ack the message so it can be requeued with connection as conn: with conn.channel() as channel: producer = kombu.Producer(channel) for msg, prio in [ [{'msg': 'first'}, 3], [{'msg': 'second'}, 6], [{'msg': 'third'}, 3], ]: producer.publish( msg, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle', priority=prio ) consumer = kombu.Consumer( conn, [test_queue], accept=['pickle'] ) consumer.register_callback(callback) with consumer: conn.drain_events(timeout=1) conn.drain_events(timeout=1) conn.drain_events(timeout=1) # requeue the messages for msg in received_messages: msg.requeue() received_messages.clear() received_message_bodies.clear() # add a fourth higher priority message producer.publish( {'msg': 'fourth'}, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle', priority=9 # highest priority ) with consumer: conn.drain_events(timeout=1) conn.drain_events(timeout=1) conn.drain_events(timeout=1) conn.drain_events(timeout=1) # Fourth message must be received first assert received_message_bodies[0] == {'msg': 'fourth'} assert received_message_bodies[1] == {'msg': 'second'} assert received_message_bodies[2] == {'msg': 'first'} assert received_message_bodies[3] == {'msg': 'third'} @pytest.mark.env('mongodb') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_MongoDBMessage(BaseMessage): pass kombu-5.5.3/t/integration/test_py_amqp.py000066400000000000000000000103211477772317200205240ustar00rootroot00000000000000from __future__ import annotations import os import uuid import pytest from amqp.exceptions import NotFound import kombu from kombu.connection import ConnectionPool from .common import (BaseExchangeTypes, BaseFailover, BaseMessage, BasePriority, BaseTimeToLive, BasicFunctionality) def get_connection(hostname, port, vhost): return kombu.Connection(f'pyamqp://{hostname}:{port}') def get_failover_connection(hostname, port, vhost): return kombu.Connection( f'pyamqp://localhost:12345;pyamqp://{hostname}:{port}' ) def get_confirm_connection(hostname, port): return kombu.Connection( f"pyamqp://{hostname}:{port}", transport_options={"confirm_publish": True} ) @pytest.fixture() def invalid_connection(): return kombu.Connection('pyamqp://localhost:12345') @pytest.fixture() def connection(request): return get_connection( hostname=os.environ.get('RABBITMQ_HOST', 'localhost'), port=os.environ.get('RABBITMQ_5672_TCP', '5672'), vhost=getattr( request.config, "slaveinput", {} ).get("slaveid", None), ) @pytest.fixture() def failover_connection(request): return get_failover_connection( hostname=os.environ.get('RABBITMQ_HOST', 'localhost'), port=os.environ.get('RABBITMQ_5672_TCP', '5672'), vhost=getattr( request.config, "slaveinput", {} ).get("slaveid", None), ) @pytest.fixture() def confirm_publish_connection(): return get_confirm_connection( hostname=os.environ.get("RABBITMQ_HOST", "localhost"), port=os.environ.get("RABBITMQ_5672_TCP", "5672"), ) @pytest.mark.env('py-amqp') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_PyAMQPBasicFunctionality(BasicFunctionality): pass @pytest.mark.env('py-amqp') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_PyAMQPBaseExchangeTypes(BaseExchangeTypes): pass @pytest.mark.env('py-amqp') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_PyAMQPTimeToLive(BaseTimeToLive): pass @pytest.mark.env('py-amqp') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_PyAMQPPriority(BasePriority): pass @pytest.mark.env('py-amqp') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_PyAMQPFailover(BaseFailover): pass @pytest.mark.env('py-amqp') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_PyAMQPMessage(BaseMessage): pass @pytest.mark.env("py-amqp") @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_PyAMQPConnectionPool: def test_publish_confirm_does_not_block(self, confirm_publish_connection): """Tests that the connection pool closes connections in case of an exception. In case an exception occurs while the connection is in use, the pool should close the exception. In case the connection is not closed before releasing it back to the pool, the connection would remain in an unusable state, causing causing the next publish call to time out or block forever in case no timeout is specified. """ pool = ConnectionPool(connection=confirm_publish_connection, limit=1) try: with pool.acquire(block=True) as connection: producer = kombu.Producer(connection) queue = kombu.Queue( f"test-queue-{uuid.uuid4()}", channel=connection ) queue.declare() producer.publish( {"foo": "bar"}, routing_key=str(uuid.uuid4()), retry=False ) assert connection.connected queue.delete() try: queue.get() except NotFound: raise except NotFound: pass with pool.acquire(block=True) as connection: assert not connection.connected producer = kombu.Producer(connection) queue = kombu.Queue( f"test-queue-{uuid.uuid4()}", channel=connection ) queue.declare() # In case the connection is broken, we should get a Timeout here producer.publish( {"foo": "bar"}, routing_key=str(uuid.uuid4()), retry=False, timeout=3 ) kombu-5.5.3/t/integration/test_redis.py000066400000000000000000000212531477772317200201720ustar00rootroot00000000000000from __future__ import annotations import os import socket from time import sleep import pytest import redis import kombu from kombu.transport.redis import Transport from .common import (BaseExchangeTypes, BaseMessage, BasePriority, BasicFunctionality) def get_connection( hostname, port, vhost, user_name=None, password=None, transport_options=None): credentials = f'{user_name}:{password}@' if user_name else '' return kombu.Connection( f'redis://{credentials}{hostname}:{port}', transport_options=transport_options ) @pytest.fixture(params=[None, {'global_keyprefix': '_prefixed_'}]) def connection(request): # this fixture yields plain connections to broker and TLS encrypted return get_connection( hostname=os.environ.get('REDIS_HOST', 'localhost'), port=os.environ.get('REDIS_6379_TCP', '6379'), vhost=getattr( request.config, "slaveinput", {} ).get("slaveid", None), transport_options=request.param ) @pytest.fixture() def invalid_connection(): return kombu.Connection('redis://localhost:12345') @pytest.mark.env('redis') def test_failed_credentials(): """Tests denied connection when wrong credentials were provided""" with pytest.raises(redis.exceptions.AuthenticationError): get_connection( hostname=os.environ.get('REDIS_HOST', 'localhost'), port=os.environ.get('REDIS_6379_TCP', '6379'), vhost=None, user_name='wrong_redis_user', password='wrong_redis_password' ).connect() @pytest.mark.env('redis') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_RedisBasicFunctionality(BasicFunctionality): def test_failed_connection__ConnectionError(self, invalid_connection): # method raises transport exception with pytest.raises(redis.exceptions.ConnectionError) as ex: invalid_connection.connection assert ex.type in Transport.connection_errors @pytest.mark.env('redis') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_RedisBaseExchangeTypes(BaseExchangeTypes): pass @pytest.mark.env('redis') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_RedisPriority(BasePriority): # Comparing to py-amqp transport has Redis transport several # differences: # 1. Order of priorities is reversed # 2. drain_events() consumes only single value # redis transport has lower numbers higher priority PRIORITY_ORDER = 'desc' def test_publish_consume(self, connection): test_queue = kombu.Queue( 'priority_test', routing_key='priority_test', max_priority=10 ) received_messages = [] def callback(body, message): received_messages.append(body) message.ack() with connection as conn: with conn.channel() as channel: producer = kombu.Producer(channel) for msg, prio in [ [{'msg': 'first'}, 6], [{'msg': 'second'}, 3], [{'msg': 'third'}, 6], ]: producer.publish( msg, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle', priority=prio ) # Sleep to make sure that queue sorted based on priority sleep(0.5) consumer = kombu.Consumer( conn, [test_queue], accept=['pickle'] ) consumer.register_callback(callback) with consumer: # drain_events() returns just on number in # Virtual transports conn.drain_events(timeout=1) conn.drain_events(timeout=1) conn.drain_events(timeout=1) # Second message must be received first assert received_messages[0] == {'msg': 'second'} assert received_messages[1] == {'msg': 'first'} assert received_messages[2] == {'msg': 'third'} def test_publish_requeue_consume(self, connection): test_queue = kombu.Queue( 'priority_requeue_test', routing_key='priority_requeue_test', max_priority=10 ) received_messages = [] received_message_bodies = [] def callback(body, message): received_messages.append(message) received_message_bodies.append(body) # don't ack the message so it can be requeued with connection as conn: with conn.channel() as channel: producer = kombu.Producer(channel) for msg, prio in [ [{'msg': 'first'}, 6], [{'msg': 'second'}, 3], [{'msg': 'third'}, 6], ]: producer.publish( msg, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle', priority=prio ) # Sleep to make sure that queue sorted based on priority sleep(0.5) consumer = kombu.Consumer( conn, [test_queue], accept=['pickle'] ) consumer.register_callback(callback) with consumer: # drain_events() consumes only one value unlike in py-amqp. conn.drain_events(timeout=1) conn.drain_events(timeout=1) conn.drain_events(timeout=1) # requeue the messages for msg in received_messages: msg.requeue() received_messages.clear() received_message_bodies.clear() # add a fourth higher priority message producer.publish( {'msg': 'fourth'}, retry=True, exchange=test_queue.exchange, routing_key=test_queue.routing_key, declare=[test_queue], serializer='pickle', priority=0 # highest priority ) with consumer: conn.drain_events(timeout=1) conn.drain_events(timeout=1) conn.drain_events(timeout=1) conn.drain_events(timeout=1) # Fourth message must be received first assert received_message_bodies[0] == {'msg': 'fourth'} assert received_message_bodies[1] == {'msg': 'second'} assert received_message_bodies[2] == {'msg': 'first'} assert received_message_bodies[3] == {'msg': 'third'} @pytest.mark.env('redis') @pytest.mark.flaky(reruns=5, reruns_delay=2) class test_RedisMessage(BaseMessage): pass @pytest.mark.env('redis') def test_RedisConnectTimeout(monkeypatch): # simulate a connection timeout for a new connection def connect_timeout(self): raise socket.timeout monkeypatch.setattr( redis.connection.Connection, "_connect", connect_timeout) # ensure the timeout raises a TimeoutError with pytest.raises(redis.exceptions.TimeoutError): # note the host/port here is irrelevant because # connect will raise a socket.timeout kombu.Connection('redis://localhost:12345').connect() @pytest.mark.env('redis') def test_RedisConnection_check_hostname(monkeypatch): # simulate a connection timeout for a new connection def connect_check_certificate(self): if self.check_hostname: raise OSError("check_hostname=True") raise socket.timeout("check_hostname=False") monkeypatch.setattr( redis.connection.SSLConnection, "_connect", connect_check_certificate) # ensure the timeout raises a TimeoutError with pytest.raises(redis.exceptions.TimeoutError): # note the host/port here is irrelevant because # connect will raise a socket.timeout, not a CertificateError kombu.Connection('rediss://localhost:12345?ssl_check_hostname=false').connect() with pytest.raises(redis.exceptions.ConnectionError): # note the host/port here is irrelevant because # connect will raise a CertificateError due to hostname mismatch kombu.Connection('rediss://localhost:12345?ssl_check_hostname=true').connect() kombu-5.5.3/t/mocks.py000066400000000000000000000135701477772317200146210ustar00rootroot00000000000000from __future__ import annotations import time from itertools import count from typing import TYPE_CHECKING from unittest.mock import Mock from kombu.transport import base from kombu.utils import json if TYPE_CHECKING: from types import TracebackType class _ContextMock(Mock): """Dummy class implementing __enter__ and __exit__ as the :keyword:`with` statement requires these to be implemented in the class, not just the instance.""" def __enter__(self): return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: pass def ContextMock(*args, **kwargs): """Mock that mocks :keyword:`with` statement contexts.""" obj = _ContextMock(*args, **kwargs) obj.attach_mock(_ContextMock(), '__enter__') obj.attach_mock(_ContextMock(), '__exit__') obj.__enter__.return_value = obj # if __exit__ return a value the exception is ignored, # so it must return None here. obj.__exit__.return_value = None return obj def PromiseMock(*args, **kwargs): m = Mock(*args, **kwargs) def on_throw(exc=None, *args, **kwargs): if exc: raise exc raise m.throw.side_effect = on_throw m.set_error_state.side_effect = on_throw m.throw1.side_effect = on_throw return m class MockPool: def __init__(self, value=None): self.value = value or ContextMock() def acquire(self, **kwargs): return self.value class Message(base.Message): def __init__(self, *args, **kwargs): self.throw_decode_error = kwargs.get('throw_decode_error', False) super().__init__(*args, **kwargs) def decode(self): if self.throw_decode_error: raise ValueError("can't decode message") return super().decode() class Channel(base.StdChannel): open = True throw_decode_error = False _ids = count(1) def __init__(self, connection): self.connection = connection self.called = [] self.deliveries = count(1) self.to_deliver = [] self.events = {'basic_return': set()} self.channel_id = next(self._ids) def _called(self, name): self.called.append(name) def __contains__(self, key): return key in self.called def exchange_declare(self, *args, **kwargs): self._called('exchange_declare') def prepare_message(self, body, priority=0, content_type=None, content_encoding=None, headers=None, properties={}): self._called('prepare_message') return {'body': body, 'headers': headers, 'properties': properties, 'priority': priority, 'content_type': content_type, 'content_encoding': content_encoding} def basic_publish(self, message, exchange='', routing_key='', mandatory=False, immediate=False, **kwargs): self._called('basic_publish') return message, exchange, routing_key def exchange_delete(self, *args, **kwargs): self._called('exchange_delete') def queue_declare(self, *args, **kwargs): self._called('queue_declare') def queue_bind(self, *args, **kwargs): self._called('queue_bind') def queue_unbind(self, *args, **kwargs): self._called('queue_unbind') def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): self._called('queue_delete') def basic_get(self, *args, **kwargs): self._called('basic_get') try: return self.to_deliver.pop() except IndexError: pass def queue_purge(self, *args, **kwargs): self._called('queue_purge') def basic_consume(self, *args, **kwargs): self._called('basic_consume') def basic_cancel(self, *args, **kwargs): self._called('basic_cancel') def basic_ack(self, *args, **kwargs): self._called('basic_ack') def basic_recover(self, requeue=False): self._called('basic_recover') def exchange_bind(self, *args, **kwargs): self._called('exchange_bind') def exchange_unbind(self, *args, **kwargs): self._called('exchange_unbind') def close(self): self._called('close') def message_to_python(self, message, *args, **kwargs): self._called('message_to_python') return Message(body=json.dumps(message), channel=self, delivery_tag=next(self.deliveries), throw_decode_error=self.throw_decode_error, content_type='application/json', content_encoding='utf-8') def flow(self, active): self._called('flow') def basic_reject(self, delivery_tag, requeue=False): if requeue: return self._called('basic_reject:requeue') return self._called('basic_reject') def basic_qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): self._called('basic_qos') class Connection: connected = True def __init__(self, client): self.client = client def channel(self): return Channel(self) class Transport(base.Transport): def establish_connection(self): return Connection(self.client) def create_channel(self, connection): return connection.channel() def drain_events(self, connection, **kwargs): return 'event' def close_connection(self, connection): connection.connected = False class TimeoutingTransport(Transport): recoverable_connection_errors = (TimeoutError,) def __init__(self, connect_timeout=1, **kwargs): self.connect_timeout = connect_timeout super().__init__(**kwargs) def establish_connection(self): time.sleep(self.connect_timeout) raise TimeoutError('timed out') kombu-5.5.3/t/skip.py000066400000000000000000000004331477772317200144450ustar00rootroot00000000000000from __future__ import annotations import sys import pytest if_pypy = pytest.mark.skipif( getattr(sys, 'pypy_version_info', None), reason='PyPy not supported.' ) if_win32 = pytest.mark.skipif( sys.platform.startswith('win32'), reason='Does not work on Windows' ) kombu-5.5.3/t/unit/000077500000000000000000000000001477772317200141045ustar00rootroot00000000000000kombu-5.5.3/t/unit/__init__.py000066400000000000000000000000001477772317200162030ustar00rootroot00000000000000kombu-5.5.3/t/unit/asynchronous/000077500000000000000000000000001477772317200166375ustar00rootroot00000000000000kombu-5.5.3/t/unit/asynchronous/__init__.py000066400000000000000000000000001477772317200207360ustar00rootroot00000000000000kombu-5.5.3/t/unit/asynchronous/aws/000077500000000000000000000000001477772317200174315ustar00rootroot00000000000000kombu-5.5.3/t/unit/asynchronous/aws/__init__.py000066400000000000000000000000001477772317200215300ustar00rootroot00000000000000kombu-5.5.3/t/unit/asynchronous/aws/case.py000066400000000000000000000003101477772317200207100ustar00rootroot00000000000000from __future__ import annotations import pytest import t.skip pytest.importorskip('boto3') pytest.importorskip('urllib3') @t.skip.if_pypy @pytest.mark.usefixtures('hub') class AWSCase: pass kombu-5.5.3/t/unit/asynchronous/aws/sqs/000077500000000000000000000000001477772317200202375ustar00rootroot00000000000000kombu-5.5.3/t/unit/asynchronous/aws/sqs/__init__.py000066400000000000000000000000001477772317200223360ustar00rootroot00000000000000kombu-5.5.3/t/unit/asynchronous/aws/sqs/test_connection.py000066400000000000000000000546031477772317200240170ustar00rootroot00000000000000from __future__ import annotations import json from unittest import mock from unittest.mock import MagicMock, Mock from kombu.asynchronous.aws.ext import AWSRequest, boto3 from kombu.asynchronous.aws.sqs.connection import (AsyncSQSConnection, _query_object_encode) from kombu.asynchronous.aws.sqs.message import AsyncMessage from kombu.asynchronous.aws.sqs.queue import AsyncQueue from kombu.utils.uuid import uuid from t.mocks import PromiseMock from ..case import AWSCase SQS_URL = 'https://sqs.us-west-2.amazonaws.com/' class test_AsyncSQSConnection(AWSCase): def setup_method(self): session = boto3.session.Session( aws_access_key_id='AAA', aws_secret_access_key='AAAA', region_name='us-west-2', ) self.sqs_client = session.client('sqs') self.x = AsyncSQSConnection(self.sqs_client, 'ak', 'sk', http_client=Mock()) self.x.get_object = Mock(name='X.get_object') self.x.get_status = Mock(name='X.get_status') self.x.get_list = Mock(name='X.get_list') self.callback = PromiseMock(name='callback') self.sqs_client.get_queue_url = MagicMock(return_value={ 'QueueUrl': 'http://aws.com' }) def MockRequest(self): return AWSRequest( method='POST', url='https://aws.com', ) def MockOperationModel(self, operation_name, method): mock = MagicMock() mock.configure_mock( http=MagicMock( get=MagicMock( return_value=method, ) ), name=operation_name, metadata={ 'jsonVersion': '1.0', 'targetPrefix': 'sqs', } ) return mock def MockServiceModel(self, operation_name, method): service_model = MagicMock() service_model.protocol = 'json', service_model.operation_model = MagicMock( return_value=self.MockOperationModel(operation_name, method) ) return service_model def assert_requests_equal(self, req1, req2): assert req1.url == req2.url assert req1.method == req2.method assert req1.data == req2.data assert req1.params == req2.params assert dict(req1.headers) == dict(req2.headers) def test_fetch_attributes_on_construction(self): """Verify default fetch_message_attributes can be set at construction.""" x = AsyncSQSConnection( self.sqs_client, 'ak', 'sk', http_client=Mock(), fetch_message_attributes=["AttributeOne"], ) assert x.fetch_message_attributes == ["AttributeOne"] # Default value for backwards compatibility assert self.x.fetch_message_attributes == ["ApproximateReceiveCount"] def test_create_query_request_get(self): # Query Protocol GET call per # https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests-xml.html operation_name = 'CreateQueue' params = { 'DefaultVisibilityTimeout': 40, 'QueueName': 'celery-test', 'Version': '2012-11-05', } verb = 'GET' req = self.x._create_query_request(operation_name, params, SQS_URL, verb) self.assert_requests_equal(req, AWSRequest( url=SQS_URL, method=verb, data=None, params={ 'Action': operation_name, **params }, headers={}, )) prepared = req.prepare() # without signing for test assert prepared.method == 'GET' assert prepared.url == ( 'https://sqs.us-west-2.amazonaws.com/?' 'DefaultVisibilityTimeout=40' '&QueueName=celery-test' '&Version=2012-11-05' '&Action=CreateQueue' ) assert prepared.headers == {} assert prepared.body is None def test_create_query_request(self): operation_name = 'ReceiveMessage' params = { 'MaxNumberOfMessages': 10, 'AttributeName.1': 'ApproximateReceiveCount', 'WaitTimeSeconds': 20 } queue_url = f'{SQS_URL}123456789012/celery-test' verb = 'POST' req = self.x._create_query_request(operation_name, params, queue_url, verb) self.assert_requests_equal(req, AWSRequest( url=queue_url, method=verb, data={ 'Action': operation_name, **params }, headers={ 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', }, )) prepared = req.prepare() # without signing for test assert prepared.method == 'POST' assert prepared.url == queue_url assert prepared.headers == { 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', 'Content-Length': mock.ANY, } assert prepared.body == ( 'MaxNumberOfMessages=10' '&AttributeName.1=ApproximateReceiveCount' '&WaitTimeSeconds=20' '&Action=ReceiveMessage' ) def test_create_json_request(self): operation_name = 'ReceiveMessage' method = 'POST' params = { 'MaxNumberOfMessages': 10, 'AttributeNames': ['ApproximateReceiveCount'], 'WaitTimeSeconds': 20 } queue_url = f'{SQS_URL}123456789012/celery-test' self.x.sqs_connection = Mock() self.x.sqs_connection._request_signer = Mock() self.x.sqs_connection._endpoint.host = SQS_URL self.x.sqs_connection.meta.service_model = Mock() self.x.sqs_connection.meta.service_model.protocol = 'json', self.x.sqs_connection.meta.service_model.operation_model = MagicMock( return_value=self.MockOperationModel(operation_name, method) ) req = self.x._create_json_request(operation_name, params, queue_url) self.assert_requests_equal(req, AWSRequest( url=SQS_URL, method=method, data=json.dumps({ **params, "QueueUrl": queue_url }).encode(), headers={ 'Content-Type': 'application/x-amz-json-1.0', 'X-Amz-Target': f'sqs.{operation_name}' }, )) prepared = req.prepare() # without signing for test assert prepared.method == 'POST' assert prepared.url == SQS_URL assert prepared.headers == { 'Content-Type': 'application/x-amz-json-1.0', 'X-Amz-Target': 'sqs.ReceiveMessage', 'Content-Length': mock.ANY, } assert json.loads(prepared.body) == { 'MaxNumberOfMessages': 10, 'AttributeNames': ['ApproximateReceiveCount'], 'WaitTimeSeconds': 20, 'QueueUrl': queue_url, } def test_make_request__with_query_protocol(self): # Do the necessary mocking. self.x.sqs_connection = Mock() self.x.sqs_connection._request_signer = Mock() self.x.sqs_connection.meta.service_model.protocol = 'query' self.x._create_query_request = Mock(return_value=self.MockRequest()) # Execute the make_request called and confirm we are creating a # query request. operation = 'ReceiveMessage', params = { 'MaxNumberOfMessages': 10, 'WaitTimeSeconds': 20 } pparams = { 'json': {'AttributeNames': ['ApproximateReceiveCount']}, 'query': {'AttributeName.1': 'ApproximateReceiveCount'}, } queue_url = f'{SQS_URL}123456789012/celery-test' verb = 'POST' expect_params = {**params, 'AttributeName.1': 'ApproximateReceiveCount'} self.x.make_request(operation, params, queue_url, verb, protocol_params=pparams) self.x._create_query_request.assert_called_with( operation, expect_params, queue_url, verb ) def test_make_request__with_json_protocol(self): # Do the necessary mocking. self.x.sqs_connection = Mock() self.x.sqs_connection._request_signer = Mock() self.x.sqs_connection.meta.service_model.protocol = 'json' self.x._create_json_request = Mock(return_value=self.MockRequest()) # Execute the make_request called and confirm we are creating a # query request. operation = 'ReceiveMessage', params = { 'MaxNumberOfMessages': 10, 'WaitTimeSeconds': 20 } pparams = { 'json': {'AttributeNames': ['ApproximateReceiveCount']}, 'query': {'AttributeName.1': 'ApproximateReceiveCount'}, } queue_url = f'{SQS_URL}123456789012/celery-test' verb = 'POST' expect_params = {**params, 'AttributeNames': ['ApproximateReceiveCount']} self.x.make_request(operation, params, queue_url, verb, protocol_params=pparams) self.x._create_json_request.assert_called_with( operation, expect_params, queue_url ) def test_create_queue(self): self.x.create_queue('foo', callback=self.callback) self.x.get_object.assert_called_with( 'CreateQueue', {'QueueName': 'foo'}, callback=self.callback, ) def test_create_queue__with_visibility_timeout(self): self.x.create_queue( 'foo', visibility_timeout=33, callback=self.callback, ) self.x.get_object.assert_called_with( 'CreateQueue', { 'QueueName': 'foo', 'DefaultVisibilityTimeout': '33' }, callback=self.callback ) def test_delete_queue(self): queue = Mock(name='queue') self.x.delete_queue(queue, callback=self.callback) self.x.get_status.assert_called_with( 'DeleteQueue', None, queue.id, callback=self.callback, ) def test_get_queue_attributes(self): queue = Mock(name='queue') self.x.get_queue_attributes( queue, attribute='QueueSize', callback=self.callback, ) self.x.get_object.assert_called_with( 'GetQueueAttributes', {'AttributeName': 'QueueSize'}, queue.id, callback=self.callback, ) def test_set_queue_attribute(self): queue = Mock(name='queue') self.x.set_queue_attribute( queue, 'Expires', '3600', callback=self.callback, ) self.x.get_status.assert_called_with( 'SetQueueAttribute', {}, queue.id, callback=self.callback, protocol_params={ 'json': {'Attributes': {'Expires': '3600'}}, 'query': {'Attribute.Name': 'Expires', 'Attribute.Value': '3600'}, }, ) def test_receive_message(self): queue = Mock(name='queue') self.x.receive_message( queue, self.x.get_queue_url('queue'), 4, callback=self.callback, ) self.x.get_list.assert_called_with( 'ReceiveMessage', { 'MaxNumberOfMessages': 4, }, [('Message', AsyncMessage)], 'http://aws.com', callback=self.callback, parent=queue, protocol_params={ 'json': {'AttributeNames': ['ApproximateReceiveCount']}, 'query': {'AttributeName.1': 'ApproximateReceiveCount'}, }, ) def test_receive_message__with_visibility_timeout(self): queue = Mock(name='queue') self.x.receive_message( queue, self.x.get_queue_url('queue'), 4, 3666, callback=self.callback, ) self.x.get_list.assert_called_with( 'ReceiveMessage', { 'MaxNumberOfMessages': 4, 'VisibilityTimeout': 3666, }, [('Message', AsyncMessage)], 'http://aws.com', callback=self.callback, parent=queue, protocol_params={ 'json': {'AttributeNames': ['ApproximateReceiveCount']}, 'query': {'AttributeName.1': 'ApproximateReceiveCount'}, }, ) def test_receive_message__with_wait_time_seconds(self): queue = Mock(name='queue') self.x.receive_message( queue, self.x.get_queue_url('queue'), 4, wait_time_seconds=303, callback=self.callback, ) self.x.get_list.assert_called_with( 'ReceiveMessage', { 'MaxNumberOfMessages': 4, 'WaitTimeSeconds': 303, }, [('Message', AsyncMessage)], 'http://aws.com', callback=self.callback, parent=queue, protocol_params={ 'json': {'AttributeNames': ['ApproximateReceiveCount']}, 'query': {'AttributeName.1': 'ApproximateReceiveCount'}, }, ) def test_receive_message__with_attributes(self): queue = Mock(name='queue') self.x.receive_message( queue, self.x.get_queue_url('queue'), 4, attributes=['foo', 'bar'], callback=self.callback, ) self.x.get_list.assert_called_with( 'ReceiveMessage', { 'MaxNumberOfMessages': 4, }, [('Message', AsyncMessage)], 'http://aws.com', callback=self.callback, parent=queue, protocol_params={ 'json': {'AttributeNames': ['foo', 'bar']}, 'query': {'AttributeName.1': 'foo', 'AttributeName.2': 'bar'}, }, ) def test_receive_message__with_fetch_attributes(self): queue = Mock(name='queue') self.x.fetch_message_attributes = ["DifferentAttribute1", "Another2"] self.x.receive_message( queue, self.x.get_queue_url('queue'), 4, callback=self.callback, ) self.x.get_list.assert_called_with( 'ReceiveMessage', { 'MaxNumberOfMessages': 4, }, [('Message', AsyncMessage)], 'http://aws.com', callback=self.callback, parent=queue, protocol_params={ 'json': {'AttributeNames': ['DifferentAttribute1', 'Another2']}, 'query': {'AttributeName.1': 'DifferentAttribute1', 'AttributeName.2': 'Another2'}, }, ) def MockMessage(self, id=None, receipt_handle=None, body=None): m = Mock(name='message') m.id = id or uuid() m.receipt_handle = receipt_handle or uuid() m._body = body def _get_body(): return m._body m.get_body.side_effect = _get_body def _set_body(value): m._body = value m.set_body.side_effect = _set_body return m def test_delete_message(self): queue = Mock(name='queue') message = self.MockMessage() self.x.delete_message(queue, message.receipt_handle, callback=self.callback) self.x.get_status.assert_called_with( 'DeleteMessage', {'ReceiptHandle': message.receipt_handle}, queue, callback=self.callback, ) def test_delete_message_batch(self): queue = Mock(name='queue') messages = [self.MockMessage('1', 'r1'), self.MockMessage('2', 'r2')] self.x.delete_message_batch(queue, messages, callback=self.callback) self.x.get_object.assert_called_with( 'DeleteMessageBatch', {}, queue.id, verb='POST', callback=self.callback, protocol_params={ 'json': {'Entries': [{'Id': '1', 'ReceiptHandle': 'r1'}, {'Id': '2', 'ReceiptHandle': 'r2'}]}, 'query': { 'DeleteMessageBatchRequestEntry.1.Id': '1', 'DeleteMessageBatchRequestEntry.1.ReceiptHandle': 'r1', 'DeleteMessageBatchRequestEntry.2.Id': '2', 'DeleteMessageBatchRequestEntry.2.ReceiptHandle': 'r2', }, }, ) def test_send_message(self): queue = Mock(name='queue') self.x.send_message(queue, 'hello', callback=self.callback) self.x.get_object.assert_called_with( 'SendMessage', {'MessageBody': 'hello'}, queue.id, verb='POST', callback=self.callback, ) def test_send_message__with_delay_seconds(self): queue = Mock(name='queue') self.x.send_message( queue, 'hello', delay_seconds='303', callback=self.callback, ) self.x.get_object.assert_called_with( 'SendMessage', {'MessageBody': 'hello', 'DelaySeconds': 303}, queue.id, verb='POST', callback=self.callback, ) def test_send_message_batch(self): queue = Mock(name='queue') messages = [self.MockMessage('1', 'r1', 'A'), self.MockMessage('2', 'r2', 'B')] self.x.send_message_batch( queue, [(m.id, m.get_body(), 303) for m in messages], callback=self.callback ) self.x.get_object.assert_called_with( 'SendMessageBatch', { 'SendMessageBatchRequestEntry.1.Id': '1', 'SendMessageBatchRequestEntry.1.MessageBody': 'A', 'SendMessageBatchRequestEntry.1.DelaySeconds': 303, 'SendMessageBatchRequestEntry.2.Id': '2', 'SendMessageBatchRequestEntry.2.MessageBody': 'B', 'SendMessageBatchRequestEntry.2.DelaySeconds': 303, }, queue.id, verb='POST', callback=self.callback, ) def test_change_message_visibility(self): queue = Mock(name='queue') self.x.change_message_visibility( queue, 'rcpt', 33, callback=self.callback, ) self.x.get_status.assert_called_with( 'ChangeMessageVisibility', { 'ReceiptHandle': 'rcpt', 'VisibilityTimeout': 33, }, queue.id, callback=self.callback, ) def test_change_message_visibility_batch(self): queue = Mock(name='queue') messages = [ (self.MockMessage('1', 'r1'), 303), (self.MockMessage('2', 'r2'), 909), ] self.x.change_message_visibility_batch( queue, messages, callback=self.callback, ) self.x.get_object.assert_called_once_with( 'ChangeMessageVisibilityBatch', {}, queue.id, verb='POST', callback=self.callback, protocol_params={ 'json': { 'Entries': [ {'Id': '1', 'ReceiptHandle': 'r1', 'VisibilityTimeout': 303}, {'Id': '2', 'ReceiptHandle': 'r2', 'VisibilityTimeout': 909}, ], }, 'query': { 'ChangeMessageVisibilityBatchRequestEntry.1.Id': '1', 'ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle': 'r1', 'ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout': '303', 'ChangeMessageVisibilityBatchRequestEntry.2.Id': '2', 'ChangeMessageVisibilityBatchRequestEntry.2.ReceiptHandle': 'r2', 'ChangeMessageVisibilityBatchRequestEntry.2.VisibilityTimeout': '909', }, }, ) def test_get_all_queues(self): self.x.get_all_queues(callback=self.callback) self.x.get_list.assert_called_with( 'ListQueues', {}, [('QueueUrl', AsyncQueue)], callback=self.callback, ) def test_get_all_queues__with_prefix(self): self.x.get_all_queues(prefix='kombu.', callback=self.callback) self.x.get_list.assert_called_with( 'ListQueues', {'QueueNamePrefix': 'kombu.'}, [('QueueUrl', AsyncQueue)], callback=self.callback, ) def MockQueue(self, url): q = Mock(name='Queue') q.url = url return q def test_get_queue(self): self.x.get_queue('foo', callback=self.callback) self.x.get_list.assert_called() on_ready = self.x.get_list.call_args[1]['callback'] queues = [ self.MockQueue('/queues/bar'), self.MockQueue('/queues/baz'), self.MockQueue('/queues/foo'), ] on_ready(queues) self.callback.assert_called_with(queues[-1]) self.x.get_list.assert_called_with( 'ListQueues', {'QueueNamePrefix': 'foo'}, [('QueueUrl', AsyncQueue)], callback=on_ready, ) def test_get_dead_letter_source_queues(self): queue = Mock(name='queue') self.x.get_dead_letter_source_queues(queue, callback=self.callback) self.x.get_list.assert_called_with( 'ListDeadLetterSourceQueues', {'QueueUrl': queue.url}, [('QueueUrl', AsyncQueue)], callback=self.callback, ) def test_add_permission(self): queue = Mock(name='queue') self.x.add_permission( queue, 'label', 'accid', 'action', callback=self.callback, ) self.x.get_status.assert_called_with( 'AddPermission', { 'Label': 'label', 'AWSAccountId': 'accid', 'ActionName': 'action', }, queue.id, callback=self.callback, ) def test_remove_permission(self): queue = Mock(name='queue') self.x.remove_permission(queue, 'label', callback=self.callback) self.x.get_status.assert_called_with( 'RemovePermission', {'Label': 'label'}, queue.id, callback=self.callback, ) def test_query_protocol_encoding(self): assert _query_object_encode({}) == {} assert _query_object_encode({'Simple': 'String'}) == {'Simple': 'String'} assert _query_object_encode({'NumbersToString': 123}) == {'NumbersToString': '123'} assert _query_object_encode({'AttributeName': ['A', 'B']}) == { 'AttributeName.1': 'A', 'AttributeName.2': 'B', } assert _query_object_encode({'Grandparent': [{'Parent': {'Child': '1', 'Sibling': 2}}]}) == { 'Grandparent.1.Parent.Child': '1', 'Grandparent.1.Parent.Sibling': '2', } kombu-5.5.3/t/unit/asynchronous/aws/sqs/test_queue.py000066400000000000000000000157531477772317200230070ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock import pytest from kombu.asynchronous.aws.sqs.message import AsyncMessage from kombu.asynchronous.aws.sqs.queue import AsyncQueue from t.mocks import PromiseMock from ..case import AWSCase class test_AsyncQueue(AWSCase): def setup_method(self): self.conn = Mock(name='connection') self.x = AsyncQueue(self.conn, '/url') self.callback = PromiseMock(name='callback') def test_message_class(self): assert issubclass(self.x.message_class, AsyncMessage) def test_get_attributes(self): self.x.get_attributes(attributes='QueueSize', callback=self.callback) self.x.connection.get_queue_attributes.assert_called_with( self.x, 'QueueSize', self.callback, ) def test_set_attribute(self): self.x.set_attribute('key', 'value', callback=self.callback) self.x.connection.set_queue_attribute.assert_called_with( self.x, 'key', 'value', self.callback, ) def test_get_timeout(self): self.x.get_timeout(callback=self.callback) self.x.connection.get_queue_attributes.assert_called() on_ready = self.x.connection.get_queue_attributes.call_args[0][2] self.x.connection.get_queue_attributes.assert_called_with( self.x, 'VisibilityTimeout', on_ready, ) on_ready({'VisibilityTimeout': '303'}) self.callback.assert_called_with(303) def test_set_timeout(self): self.x.set_timeout(808, callback=self.callback) self.x.connection.set_queue_attribute.assert_called() on_ready = self.x.connection.set_queue_attribute.call_args[0][3] self.x.connection.set_queue_attribute.assert_called_with( self.x, 'VisibilityTimeout', 808, on_ready, ) on_ready(808) self.callback.assert_called_with(808) assert self.x.visibility_timeout == 808 on_ready(None) assert self.x.visibility_timeout == 808 def test_add_permission(self): self.x.add_permission( 'label', 'accid', 'action', callback=self.callback, ) self.x.connection.add_permission.assert_called_with( self.x, 'label', 'accid', 'action', self.callback, ) def test_remove_permission(self): self.x.remove_permission('label', callback=self.callback) self.x.connection.remove_permission.assert_called_with( self.x, 'label', self.callback, ) def test_read(self): self.x.read(visibility_timeout=909, callback=self.callback) self.x.connection.receive_message.assert_called() on_ready = self.x.connection.receive_message.call_args[1]['callback'] self.x.connection.receive_message.assert_called_with( self.x, number_messages=1, visibility_timeout=909, attributes=None, wait_time_seconds=None, callback=on_ready, ) messages = [Mock(name='message1')] on_ready(messages) self.callback.assert_called_with(messages[0]) def MockMessage(self, id, md5): m = Mock(name=f'Message-{id}') m.id = id m.md5 = md5 return m def test_write(self): message = self.MockMessage('id1', 'digest1') self.x.write(message, delay_seconds=303, callback=self.callback) self.x.connection.send_message.assert_called() on_ready = self.x.connection.send_message.call_args[1]['callback'] self.x.connection.send_message.assert_called_with( self.x, message.get_body_encoded(), 303, callback=on_ready, ) new_message = self.MockMessage('id2', 'digest2') on_ready(new_message) assert message.id == 'id2' assert message.md5 == 'digest2' def test_write_batch(self): messages = [('id1', 'A', 0), ('id2', 'B', 303)] self.x.write_batch(messages, callback=self.callback) self.x.connection.send_message_batch.assert_called_with( self.x, messages, callback=self.callback, ) def test_delete_message(self): message = self.MockMessage('id1', 'digest1') self.x.delete_message(message, callback=self.callback) self.x.connection.delete_message.assert_called_with( self.x, message, self.callback, ) def test_delete_message_batch(self): messages = [ self.MockMessage('id1', 'r1'), self.MockMessage('id2', 'r2'), ] self.x.delete_message_batch(messages, callback=self.callback) self.x.connection.delete_message_batch.assert_called_with( self.x, messages, callback=self.callback, ) def test_change_message_visibility_batch(self): messages = [ (self.MockMessage('id1', 'r1'), 303), (self.MockMessage('id2', 'r2'), 909), ] self.x.change_message_visibility_batch( messages, callback=self.callback, ) self.x.connection.change_message_visibility_batch.assert_called_with( self.x, messages, callback=self.callback, ) def test_delete(self): self.x.delete(callback=self.callback) self.x.connection.delete_queue.assert_called_with( self.x, callback=self.callback, ) def test_count(self): self.x.count(callback=self.callback) self.x.connection.get_queue_attributes.assert_called() on_ready = self.x.connection.get_queue_attributes.call_args[0][2] self.x.connection.get_queue_attributes.assert_called_with( self.x, 'ApproximateNumberOfMessages', on_ready, ) on_ready({'ApproximateNumberOfMessages': '909'}) self.callback.assert_called_with(909) def test_interface__count_slow(self): with pytest.raises(NotImplementedError): self.x.count_slow() def test_interface__dump(self): with pytest.raises(NotImplementedError): self.x.dump() def test_interface__save_to_file(self): with pytest.raises(NotImplementedError): self.x.save_to_file() def test_interface__save_to_filename(self): with pytest.raises(NotImplementedError): self.x.save_to_filename() def test_interface__save(self): with pytest.raises(NotImplementedError): self.x.save() def test_interface__save_to_s3(self): with pytest.raises(NotImplementedError): self.x.save_to_s3() def test_interface__load_from_s3(self): with pytest.raises(NotImplementedError): self.x.load_from_s3() def test_interface__load_from_file(self): with pytest.raises(NotImplementedError): self.x.load_from_file() def test_interface__load_from_filename(self): with pytest.raises(NotImplementedError): self.x.load_from_filename() def test_interface__load(self): with pytest.raises(NotImplementedError): self.x.load() def test_interface__clear(self): with pytest.raises(NotImplementedError): self.x.clear() kombu-5.5.3/t/unit/asynchronous/aws/test_aws.py000066400000000000000000000004761477772317200216430ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock from kombu.asynchronous.aws import connect_sqs from .case import AWSCase class test_connect_sqs(AWSCase): def test_connection(self): x = connect_sqs('AAKI', 'ASAK', http_client=Mock()) assert x assert x.sqs_connection kombu-5.5.3/t/unit/asynchronous/aws/test_connection.py000066400000000000000000000225311477772317200232040ustar00rootroot00000000000000from __future__ import annotations from contextlib import contextmanager from io import StringIO from unittest.mock import Mock import pytest from vine.abstract import Thenable from kombu.asynchronous import http from kombu.asynchronous.aws.connection import (AsyncAWSQueryConnection, AsyncConnection, AsyncHTTPResponse, AsyncHTTPSConnection) from kombu.asynchronous.aws.ext import boto3 from kombu.exceptions import HttpError from t.mocks import PromiseMock from .case import AWSCase try: from urllib.parse import parse_qs, urlparse except ImportError: from urlparse import parse_qs, urlparse # Not currently working VALIDATES_CERT = False def passthrough(*args, **kwargs): m = Mock(*args, **kwargs) def side_effect(ret): return ret m.side_effect = side_effect return m class test_AsyncHTTPSConnection(AWSCase): def test_http_client(self): x = AsyncHTTPSConnection() assert x.http_client is http.get_client() client = Mock(name='http_client') y = AsyncHTTPSConnection(http_client=client) assert y.http_client is client def test_args(self): x = AsyncHTTPSConnection( strict=True, timeout=33.3, ) assert x.strict assert x.timeout == 33.3 def test_request(self): x = AsyncHTTPSConnection('aws.vandelay.com') x.request('PUT', '/importer-exporter') assert x.path == '/importer-exporter' assert x.method == 'PUT' def test_request_with_body_buffer(self): x = AsyncHTTPSConnection('aws.vandelay.com') body = Mock(name='body') body.read.return_value = 'Vandelay Industries' x.request('PUT', '/importer-exporter', body) assert x.method == 'PUT' assert x.path == '/importer-exporter' assert x.body == 'Vandelay Industries' body.read.assert_called_with() def test_request_with_body_text(self): x = AsyncHTTPSConnection('aws.vandelay.com') x.request('PUT', '/importer-exporter', 'Vandelay Industries') assert x.method == 'PUT' assert x.path == '/importer-exporter' assert x.body == 'Vandelay Industries' def test_request_with_headers(self): x = AsyncHTTPSConnection() headers = {'Proxy': 'proxy.vandelay.com'} x.request('PUT', '/importer-exporter', None, headers) assert 'Proxy' in dict(x.headers) assert dict(x.headers)['Proxy'] == 'proxy.vandelay.com' def assert_request_created_with(self, url, conn): conn.Request.assert_called_with( url, method=conn.method, headers=http.Headers(conn.headers), body=conn.body, connect_timeout=conn.timeout, request_timeout=conn.timeout, validate_cert=VALIDATES_CERT, ) def test_request_with_cert_path_https(self): x = AsyncHTTPSConnection("https://example.com") request = x.getrequest() assert request.validate_cert is True assert request.ca_certs is not None assert request.ca_certs.endswith('.pem') def test_getresponse(self): client = Mock(name='client') client.add_request = passthrough(name='client.add_request') x = AsyncHTTPSConnection(http_client=client) x.Response = Mock(name='x.Response') request = x.getresponse() x.http_client.add_request.assert_called_with(request) assert isinstance(request, Thenable) assert isinstance(request.on_ready, Thenable) response = Mock(name='Response') request.on_ready(response) x.Response.assert_called_with(response) def test_getresponse__real_response(self): client = Mock(name='client') client.add_request = passthrough(name='client.add_request') callback = PromiseMock(name='callback') x = AsyncHTTPSConnection(http_client=client) request = x.getresponse(callback) x.http_client.add_request.assert_called_with(request) buf = StringIO() buf.write('The quick brown fox jumps') headers = http.Headers({'X-Foo': 'Hello', 'X-Bar': 'World'}) response = http.Response(request, 200, headers, buf) request.on_ready(response) callback.assert_called() wresponse = callback.call_args[0][0] assert wresponse.read() == 'The quick brown fox jumps' assert wresponse.status == 200 assert wresponse.getheader('X-Foo') == 'Hello' headers_dict = wresponse.getheaders() assert dict(headers_dict) == headers assert wresponse.msg assert repr(wresponse) def test_repr(self): assert repr(AsyncHTTPSConnection()) def test_putrequest(self): x = AsyncHTTPSConnection() x.putrequest('UPLOAD', '/new') assert x.method == 'UPLOAD' assert x.path == '/new' def test_putheader(self): x = AsyncHTTPSConnection() x.putheader('X-Foo', 'bar') assert x.headers == [('X-Foo', 'bar')] x.putheader('X-Bar', 'baz') assert x.headers == [ ('X-Foo', 'bar'), ('X-Bar', 'baz'), ] def test_send(self): x = AsyncHTTPSConnection() x.send('foo') assert x.body == 'foo' x.send('bar') assert x.body == 'foobar' def test_interface(self): x = AsyncHTTPSConnection() assert x.set_debuglevel(3) is None assert x.connect() is None assert x.close() is None assert x.endheaders() is None class test_AsyncHTTPResponse(AWSCase): def test_with_error(self): r = Mock(name='response') r.error = HttpError(404, 'NotFound') x = AsyncHTTPResponse(r) assert x.reason == 'NotFound' r.error = None assert not x.reason class test_AsyncConnection(AWSCase): def test_client(self): sqs = Mock(name='sqs') x = AsyncConnection(sqs) assert x._httpclient is http.get_client() client = Mock(name='client') y = AsyncConnection(sqs, http_client=client) assert y._httpclient is client def test_get_http_connection(self): sqs = Mock(name='sqs') x = AsyncConnection(sqs) assert isinstance( x.get_http_connection(), AsyncHTTPSConnection, ) conn = x.get_http_connection() assert conn.http_client is x._httpclient class test_AsyncAWSQueryConnection(AWSCase): def setup_method(self): session = boto3.session.Session( aws_access_key_id='AAA', aws_secret_access_key='AAAA', region_name='us-west-2', ) sqs_client = session.client('sqs') self.x = AsyncAWSQueryConnection(sqs_client, http_client=Mock(name='client')) def test_make_request(self): _mexe, self.x._mexe = self.x._mexe, Mock(name='_mexe') Conn = self.x.get_http_connection = Mock(name='get_http_connection') callback = PromiseMock(name='callback') self.x.make_request( 'action', {'foo': 1}, 'https://foo.com/', 'GET', callback=callback, ) self.x._mexe.assert_called() request = self.x._mexe.call_args[0][0] parsed = urlparse(request.url) params = parse_qs(parsed.query) assert params['Action'][0] == 'action' ret = _mexe(request, callback=callback) assert ret is callback Conn.return_value.request.assert_called() Conn.return_value.getresponse.assert_called_with( callback=callback, ) def test_make_request__no_action(self): self.x._mexe = Mock(name='_mexe') self.x.get_http_connection = Mock(name='get_http_connection') callback = PromiseMock(name='callback') self.x.make_request( None, {'foo': 1}, 'http://foo.com/', 'GET', callback=callback, ) self.x._mexe.assert_called() request = self.x._mexe.call_args[0][0] parsed = urlparse(request.url) params = parse_qs(parsed.query) assert 'Action' not in params @pytest.mark.parametrize('error_status_code', [ AsyncAWSQueryConnection.STATUS_CODE_REQUEST_TIMEOUT, AsyncAWSQueryConnection.STATUS_CODE_NETWORK_CONNECT_TIMEOUT_ERROR, AsyncAWSQueryConnection.STATUS_CODE_INTERNAL_ERROR, AsyncAWSQueryConnection.STATUS_CODE_BAD_GATEWAY, AsyncAWSQueryConnection.STATUS_CODE_SERVICE_UNAVAILABLE_ERROR, AsyncAWSQueryConnection.STATUS_CODE_GATEWAY_TIMEOUT ]) def test_on_list_ready_error_response(self, error_status_code): mocked_response_error = self.Response( error_status_code, "error_status_code" ) result = self.x._on_list_ready( "parent", "markers", "operation", mocked_response_error ) assert result == [] def Response(self, status, body): r = Mock(name='response') r.status = status r.read.return_value = body return r @contextmanager def mock_make_request(self): self.x.make_request = Mock(name='make_request') callback = PromiseMock(name='callback') yield callback def assert_make_request_called(self): self.x.make_request.assert_called() return self.x.make_request.call_args[1]['callback'] kombu-5.5.3/t/unit/asynchronous/http/000077500000000000000000000000001477772317200176165ustar00rootroot00000000000000kombu-5.5.3/t/unit/asynchronous/http/__init__.py000066400000000000000000000000001477772317200217150ustar00rootroot00000000000000kombu-5.5.3/t/unit/asynchronous/http/test_http.py000066400000000000000000000101761477772317200222130ustar00rootroot00000000000000from __future__ import annotations from io import BytesIO from unittest.mock import Mock import pytest from vine import promise import t.skip from kombu.asynchronous import http from kombu.asynchronous.http.base import BaseClient, normalize_header from kombu.exceptions import HttpError from t.mocks import PromiseMock class test_Headers: def test_normalize(self): assert normalize_header('accept-encoding') == 'Accept-Encoding' @pytest.mark.usefixtures('hub') class test_Request: def test_init(self): x = http.Request('http://foo', method='POST') assert x.url == 'http://foo' assert x.method == 'POST' x = http.Request('x', max_redirects=100) assert x.max_redirects == 100 assert isinstance(x.headers, http.Headers) h = http.Headers() x = http.Request('x', headers=h) assert x.headers is h assert isinstance(x.on_ready, promise) def test_then(self): callback = PromiseMock(name='callback') x = http.Request('http://foo') x.then(callback) x.on_ready(1) callback.assert_called_with(1) @pytest.mark.usefixtures('hub') class test_Response: def test_init(self): req = http.Request('http://foo') r = http.Response(req, 200) assert r.status == 'OK' assert r.effective_url == 'http://foo' r.raise_for_error() def test_raise_for_error(self): req = http.Request('http://foo') r = http.Response(req, 404) assert r.status == 'Not Found' assert r.error with pytest.raises(HttpError): r.raise_for_error() def test_get_body(self): req = http.Request('http://foo') req.buffer = BytesIO() req.buffer.write(b'hello') rn = http.Response(req, 200, buffer=None) assert rn.body is None r = http.Response(req, 200, buffer=req.buffer) assert r._body is None assert r.body == b'hello' assert r._body == b'hello' assert r.body == b'hello' class test_BaseClient: @pytest.fixture(autouse=True) def setup_hub(self, hub): self.hub = hub def test_init(self): c = BaseClient(Mock(name='hub')) assert c.hub assert c._header_parser def test_perform(self): c = BaseClient(Mock(name='hub')) c.add_request = Mock(name='add_request') c.perform('http://foo') c.add_request.assert_called() assert isinstance(c.add_request.call_args[0][0], http.Request) req = http.Request('http://bar') c.perform(req) c.add_request.assert_called_with(req) def test_add_request(self): c = BaseClient(Mock(name='hub')) with pytest.raises(NotImplementedError): c.add_request(Mock(name='request')) def test_header_parser(self): c = BaseClient(Mock(name='hub')) parser = c._header_parser headers = http.Headers() c.on_header(headers, 'HTTP/1.1') c.on_header(headers, 'x-foo-bar: 123') c.on_header(headers, 'People: George Costanza') assert headers._prev_key == 'People' c.on_header(headers, ' Jerry Seinfeld') c.on_header(headers, ' Elaine Benes') c.on_header(headers, ' Cosmo Kramer') assert not headers.complete c.on_header(headers, '') assert headers.complete with pytest.raises(KeyError): parser.throw(KeyError('foo')) c.on_header(headers, '') assert headers['X-Foo-Bar'] == '123' assert (headers['People'] == 'George Costanza Jerry Seinfeld Elaine Benes Cosmo Kramer') def test_close(self): BaseClient(Mock(name='hub')).close() def test_as_context(self): c = BaseClient(Mock(name='hub')) c.close = Mock(name='close') with c: pass c.close.assert_called_with() @t.skip.if_pypy class test_Client: def test_get_client(self, hub): pytest.importorskip('urllib3') client = http.get_client() assert client.hub is hub client2 = http.get_client(hub) assert client2 is client assert client2.hub is hub kombu-5.5.3/t/unit/asynchronous/http/test_urllib3.py000066400000000000000000000214331477772317200226060ustar00rootroot00000000000000from __future__ import annotations from io import BytesIO from unittest.mock import Mock, patch import pytest import urllib3 import t.skip from kombu.asynchronous.http.urllib3_client import (Urllib3Client, _get_pool_key_parts) @t.skip.if_pypy @pytest.mark.usefixtures('hub') class test_Urllib3Client: class Client(Urllib3Client): urllib3 = Mock(name='urllib3') def test_max_clients_set(self): x = self.Client(max_clients=303) assert x.max_clients == 303 def test_init(self): x = self.Client() assert x._pools is not None assert x._pending is not None assert x._timeout_check_tref def test_close(self): with patch( 'kombu.asynchronous.http.urllib3_client.urllib3.PoolManager' ): x = self.Client() x._timeout_check_tref = Mock(name='timeout_check_tref') x.close() x._timeout_check_tref.cancel.assert_called_with() for pool in x._pools.values(): pool.close.assert_called_with() def test_add_request(self): with patch( 'kombu.asynchronous.http.urllib3_client.urllib3.PoolManager' ): x = self.Client() x._process_queue = Mock(name='_process_queue') request = Mock(name='request') x.add_request(request) assert request in x._pending x._process_queue.assert_called_with() def test_timeout_check(self): with patch( 'kombu.asynchronous.http.urllib3_client.urllib3.PoolManager' ): hub = Mock(name='hub') x = self.Client(hub) x._process_pending_requests = Mock(name='process_pending') x._timeout_check() x._process_pending_requests.assert_called_with() def test_process_request(self): with patch( 'kombu.asynchronous.http.urllib3_client.urllib3.PoolManager' ) as _pool_manager: x = self.Client() request = Mock( name='request', method='GET', url='http://example.com', headers={}, body=None, follow_redirects=True, auth_username=None, auth_password=None, user_agent=None, use_gzip=False, network_interface=None, validate_cert=True, ca_certs=None, client_cert=None, client_key=None, proxy_host=None, proxy_port=None, proxy_username=None, proxy_password=None, on_ready=Mock(name='on_ready') ) response = Mock( name='response', status=200, headers={}, data=b'content' ) response.geturl.return_value = 'http://example.com' _pool_manager.return_value.request.return_value = response x._process_request(request) response_obj = x.Response( request=request, code=200, headers={}, buffer=BytesIO(b'content'), effective_url='http://example.com', error=None ) request.on_ready.assert_called() called_response = request.on_ready.call_args[0][0] assert called_response.code == response_obj.code assert called_response.headers == response_obj.headers assert ( called_response.buffer.getvalue() == response_obj.buffer.getvalue() ) assert called_response.effective_url == response_obj.effective_url assert called_response.error == response_obj.error def test_process_request_with_error(self): with patch( 'kombu.asynchronous.http.urllib3_client.urllib3.PoolManager' ) as _pool_manager: x = self.Client() x.close() request = Mock( name='request', method='GET', url='http://example.com', headers={}, body=None, follow_redirects=True, auth_username=None, auth_password=None, user_agent=None, use_gzip=False, network_interface=None, validate_cert=True, ca_certs=None, client_cert=None, client_key=None, proxy_host=None, proxy_port=None, proxy_username=None, proxy_password=None, on_ready=Mock(name='on_ready') ) _pool_manager.return_value.request.side_effect = urllib3.exceptions.HTTPError("Test Error") x._process_request(request) request.on_ready.assert_called() called_response = request.on_ready.call_args[0][0] assert called_response.code == 599 assert called_response.error is not None assert called_response.error.message == "Test Error" def test_on_readable_on_writable(self): x = self.Client() x.on_readable(Mock(name='fd')) x.on_writable(Mock(name='fd')) def test_get_pool_with_proxy(self): with patch( 'kombu.asynchronous.http.urllib3_client.urllib3.ProxyManager' ) as _proxy_manager: x = self.Client() request = Mock( name='request', proxy_host='proxy.example.com', proxy_port=8080, proxy_username='user', proxy_password='pass' ) x.get_pool(request) _proxy_manager.assert_called_with( proxy_url='proxy.example.com:8080', num_pools=x.max_clients, proxy_headers=urllib3.make_headers( proxy_basic_auth="user:pass" ) ) def test_get_pool_without_proxy(self): with patch( 'kombu.asynchronous.http.urllib3_client.urllib3.PoolManager' ) as _pool_manager: x = self.Client() request = Mock(name='request', proxy_host=None) x.get_pool(request) _pool_manager.assert_called_with(num_pools=x.max_clients) def test_process_request_with_proxy(self): with patch( 'kombu.asynchronous.http.urllib3_client.urllib3.ProxyManager' ) as _proxy_manager: x = self.Client() request = Mock( name='request', method='GET', url='http://example.com', headers={}, body=None, follow_redirects=True, proxy_host='proxy.example.com', proxy_port=8080, proxy_username='user', proxy_password='pass', on_ready=Mock(name='on_ready') ) response = Mock( name='response', status=200, headers={}, data=b'content' ) response.geturl.return_value = 'http://example.com' _proxy_manager.return_value.request.return_value = response x._process_request(request) response_obj = x.Response( request=request, code=200, headers={}, buffer=BytesIO(b'content'), effective_url='http://example.com', error=None ) request.on_ready.assert_called() called_response = request.on_ready.call_args[0][0] assert called_response.code == response_obj.code assert called_response.headers == response_obj.headers assert ( called_response.buffer.getvalue() == response_obj.buffer.getvalue() ) assert called_response.effective_url == response_obj.effective_url assert called_response.error == response_obj.error def test_pool_key_parts(self): request = Mock( name='request', method='GET', url='http://example.com', headers={}, body=None, network_interface='test', validate_cert=False, ca_certs='test0.pem', client_cert='test1.pem', client_key='some_key', ) pool_key = _get_pool_key_parts(request) assert pool_key == [ "interface=test", "validate_cert=False", "ca_certs=test0.pem", "client_cert=test1.pem", "client_key=some_key" ] kombu-5.5.3/t/unit/asynchronous/test_hub.py000066400000000000000000000424301477772317200210310ustar00rootroot00000000000000from __future__ import annotations import errno from unittest.mock import ANY, Mock, call, patch import pytest from vine import promise from kombu.asynchronous import ERR, READ, WRITE, Hub from kombu.asynchronous import hub as _hub from kombu.asynchronous.debug import _rcb, callback_for, repr_flag from kombu.asynchronous.hub import (Stop, _dummy_context, _raise_stop_error, get_event_loop, set_event_loop) from kombu.asynchronous.semaphore import DummyLock, LaxBoundedSemaphore class File: def __init__(self, fd): self.fd = fd def fileno(self): return self.fd def __eq__(self, other): if isinstance(other, File): return self.fd == other.fd return NotImplemented def __hash__(self): return hash(self.fd) def test_DummyLock(): with DummyLock(): pass class test_LaxBoundedSemaphore: def test_acquire_release(self): x = LaxBoundedSemaphore(2) c1 = Mock() x.acquire(c1, 1) assert x.value == 1 c1.assert_called_with(1) c2 = Mock() x.acquire(c2, 2) assert x.value == 0 c2.assert_called_with(2) c3 = Mock() x.acquire(c3, 3) assert x.value == 0 c3.assert_not_called() x.release() assert x.value == 0 x.release() assert x.value == 1 x.release() assert x.value == 2 c3.assert_called_with(3) def test_repr(self): assert repr(LaxBoundedSemaphore(2)) def test_bounded(self): x = LaxBoundedSemaphore(2) for i in range(100): x.release() assert x.value == 2 def test_grow_shrink(self): x = LaxBoundedSemaphore(1) assert x.initial_value == 1 cb1 = Mock() x.acquire(cb1, 1) cb1.assert_called_with(1) assert x.value == 0 cb2 = Mock() x.acquire(cb2, 2) cb2.assert_not_called() assert x.value == 0 cb3 = Mock() x.acquire(cb3, 3) cb3.assert_not_called() x.grow(2) cb2.assert_called_with(2) cb3.assert_called_with(3) assert x.value == 2 assert x.initial_value == 3 assert not x._waiting x.grow(3) for i in range(x.initial_value): assert x.acquire(Mock()) assert not x.acquire(Mock()) x.clear() x.shrink(3) for i in range(x.initial_value): assert x.acquire(Mock()) assert not x.acquire(Mock()) assert x.value == 0 for i in range(100): x.release() assert x.value == x.initial_value def test_clear(self): x = LaxBoundedSemaphore(10) for i in range(11): x.acquire(Mock()) assert x._waiting assert x.value == 0 x.clear() assert not x._waiting assert x.value == x.initial_value class test_Utils: def setup_method(self): self._prev_loop = get_event_loop() def teardown_method(self): set_event_loop(self._prev_loop) def test_get_set_event_loop(self): set_event_loop(None) assert _hub._current_loop is None assert get_event_loop() is None hub = Hub() set_event_loop(hub) assert _hub._current_loop is hub assert get_event_loop() is hub def test_dummy_context(self): with _dummy_context(): pass def test_raise_stop_error(self): with pytest.raises(Stop): _raise_stop_error() class test_Hub: def setup_method(self): self.hub = Hub() def teardown_method(self): self.hub.close() def test_reset(self): self.hub.close = Mock(name='close') self.hub._create_poller = Mock(name='_create_poller') self.hub.reset() self.hub.close.assert_called_with() self.hub._create_poller.assert_called_with() def test__close_poller__no_poller(self): self.hub.poller = None self.hub._close_poller() def test__close_poller(self): poller = self.hub.poller = Mock(name='poller') self.hub._close_poller() poller.close.assert_called_with() assert self.hub._poller is None def test_stop(self): self.hub.call_soon = Mock(name='call_soon') self.hub.stop() self.hub.call_soon.assert_called_with(_raise_stop_error) @patch('kombu.asynchronous.hub.promise') def test_call_soon(self, promise): callback = Mock(name='callback') ret = self.hub.call_soon(callback, 1, 2, 3) promise.assert_called_with(callback, (1, 2, 3)) assert promise() in self.hub._ready assert ret is promise() def test_call_soon_uses_lock(self): callback = Mock(name='callback') with patch.object(self.hub, '_ready_lock', autospec=True) as lock: self.hub.call_soon(callback) lock.__enter__.assert_called_once() def test_call_soon__promise_argument(self): callback = promise(Mock(name='callback'), (1, 2, 3)) ret = self.hub.call_soon(callback) assert ret is callback assert ret in self.hub._ready def test_call_later(self): callback = Mock(name='callback') self.hub.timer = Mock(name='hub.timer') self.hub.call_later(10.0, callback, 1, 2) self.hub.timer.call_after.assert_called_with(10.0, callback, (1, 2)) def test_call_at(self): callback = Mock(name='callback') self.hub.timer = Mock(name='hub.timer') self.hub.call_at(21231122, callback, 1, 2) self.hub.timer.call_at.assert_called_with(21231122, callback, (1, 2)) def test_repr(self): assert repr(self.hub) def test_repr_flag(self): assert repr_flag(READ) == 'R' assert repr_flag(WRITE) == 'W' assert repr_flag(ERR) == '!' assert repr_flag(READ | WRITE) == 'RW' assert repr_flag(READ | ERR) == 'R!' assert repr_flag(WRITE | ERR) == 'W!' assert repr_flag(READ | WRITE | ERR) == 'RW!' def test_repr_callback_rcb(self): def f(): pass assert _rcb(f) == f.__name__ assert _rcb('foo') == 'foo' @patch('kombu.asynchronous.hub.poll') def test_start_stop(self, poll): self.hub = Hub() poll.assert_called_with() poller = self.hub.poller self.hub.stop() mock_callback = Mock() self.hub._ready = {mock_callback} self.hub.close() poller.close.assert_called_with() mock_callback.assert_called_once_with() assert self.hub._ready == set() def test_poller_regeneration_on_access(self): self.hub = Hub() assert self.hub.poller self.hub.stop() self.hub._ready = set() self.hub.close() assert self.hub._poller is None assert self.hub.poller, 'It should be regenerated automatically!' def test_fire_timers(self): self.hub.timer = Mock() self.hub.timer._queue = [] assert self.hub.fire_timers( min_delay=42.324, max_delay=32.321) == 32.321 self.hub.timer._queue = [1] self.hub.scheduler = iter([(3.743, None)]) assert self.hub.fire_timers() == 3.743 e1, e2, e3 = Mock(), Mock(), Mock() entries = [e1, e2, e3] def reset(): return [m.reset() for m in [e1, e2, e3]] def se(): while 1: while entries: yield None, entries.pop() yield 3.982, None self.hub.scheduler = se() assert self.hub.fire_timers(max_timers=10) == 3.982 for E in [e3, e2, e1]: E.assert_called_with() reset() entries[:] = [Mock() for _ in range(11)] keep = list(entries) assert self.hub.fire_timers( max_timers=10, min_delay=1.13) == 1.13 for E in reversed(keep[1:]): E.assert_called_with() reset() assert self.hub.fire_timers(max_timers=10) == 3.982 keep[0].assert_called_with() def test_fire_timers_raises(self): eback = Mock() eback.side_effect = KeyError('foo') self.hub.timer = Mock() self.hub.scheduler = iter([(0, eback)]) with pytest.raises(KeyError): self.hub.fire_timers(propagate=(KeyError,)) eback.side_effect = ValueError('foo') self.hub.scheduler = iter([(0, eback)]) with patch('kombu.asynchronous.hub.logger') as logger: with pytest.raises(StopIteration): self.hub.fire_timers() logger.error.assert_called() eback.side_effect = MemoryError('foo') self.hub.scheduler = iter([(0, eback)]) with pytest.raises(MemoryError): self.hub.fire_timers() eback.side_effect = OSError() eback.side_effect.errno = errno.ENOMEM self.hub.scheduler = iter([(0, eback)]) with pytest.raises(OSError): self.hub.fire_timers() eback.side_effect = OSError() eback.side_effect.errno = errno.ENOENT self.hub.scheduler = iter([(0, eback)]) with patch('kombu.asynchronous.hub.logger') as logger: with pytest.raises(StopIteration): self.hub.fire_timers() logger.error.assert_called() def test_add_raises_ValueError(self): self.hub.poller = Mock(name='hub.poller') self.hub.poller.register.side_effect = ValueError() self.hub._discard = Mock(name='hub.discard') with pytest.raises(ValueError): self.hub.add(2, Mock(), READ) self.hub._discard.assert_called_with(2) def test_remove_reader(self): self.hub.poller = Mock(name='hub.poller') self.hub.add(2, Mock(), READ) self.hub.add(2, Mock(), WRITE) self.hub.remove_reader(2) assert 2 not in self.hub.readers assert 2 in self.hub.writers def test_remove_reader__not_writeable(self): self.hub.poller = Mock(name='hub.poller') self.hub.add(2, Mock(), READ) self.hub.remove_reader(2) assert 2 not in self.hub.readers def test_remove_writer(self): self.hub.poller = Mock(name='hub.poller') self.hub.add(2, Mock(), READ) self.hub.add(2, Mock(), WRITE) self.hub.remove_writer(2) assert 2 in self.hub.readers assert 2 not in self.hub.writers def test_remove_writer__not_readable(self): self.hub.poller = Mock(name='hub.poller') self.hub.add(2, Mock(), WRITE) self.hub.remove_writer(2) assert 2 not in self.hub.writers def test_add__consolidate(self): self.hub.poller = Mock(name='hub.poller') self.hub.add(2, Mock(), WRITE, consolidate=True) assert 2 in self.hub.consolidate assert self.hub.writers[2] is None @patch('kombu.asynchronous.hub.logger') def test_on_callback_error(self, logger): self.hub.on_callback_error(Mock(name='callback'), KeyError()) logger.error.assert_called() def test_loop_property(self): self.hub._loop = None self.hub.create_loop = Mock(name='hub.create_loop') assert self.hub.loop is self.hub.create_loop() assert self.hub._loop is self.hub.create_loop() def test_run_forever(self): self.hub.run_once = Mock(name='hub.run_once') self.hub.run_once.side_effect = Stop() self.hub.run_forever() def test_run_once(self): self.hub._loop = iter([1]) self.hub.run_once() self.hub.run_once() assert self.hub._loop is None def test_repr_active(self): self.hub.readers = {1: Mock(), 2: Mock()} self.hub.writers = {3: Mock(), 4: Mock()} for value in list( self.hub.readers.values()) + list(self.hub.writers.values()): value.__name__ = 'mock' assert self.hub.repr_active() def test_repr_events(self): self.hub.readers = {6: Mock(), 7: Mock(), 8: Mock()} self.hub.writers = {9: Mock()} for value in list( self.hub.readers.values()) + list(self.hub.writers.values()): value.__name__ = 'mock' assert self.hub.repr_events([ (6, READ), (7, ERR), (8, READ | ERR), (9, WRITE), (10, 13213), ]) def test_callback_for(self): reader, writer = Mock(), Mock() self.hub.readers = {6: reader} self.hub.writers = {7: writer} assert callback_for(self.hub, 6, READ) == reader assert callback_for(self.hub, 7, WRITE) == writer with pytest.raises(KeyError): callback_for(self.hub, 6, WRITE) assert callback_for(self.hub, 6, WRITE, 'foo') == 'foo' def test_add_remove_readers(self): P = self.hub.poller = Mock() read_A = Mock() read_B = Mock() self.hub.add_reader(10, read_A, 10) self.hub.add_reader(File(11), read_B, 11) P.register.assert_has_calls([ call(10, self.hub.READ | self.hub.ERR), call(11, self.hub.READ | self.hub.ERR), ], any_order=True) assert self.hub.readers[10] == (read_A, (10,)) assert self.hub.readers[11] == (read_B, (11,)) self.hub.remove(10) assert 10 not in self.hub.readers self.hub.remove(File(11)) assert 11 not in self.hub.readers P.unregister.assert_has_calls([ call(10), call(11), ]) def test_can_remove_unknown_fds(self): self.hub.poller = Mock() self.hub.remove(30) self.hub.remove(File(301)) def test_remove__unregister_raises(self): self.hub.poller = Mock() self.hub.poller.unregister.side_effect = OSError() self.hub.remove(313) def test_add_writers(self): P = self.hub.poller = Mock() write_A = Mock() write_B = Mock() self.hub.add_writer(20, write_A) self.hub.add_writer(File(21), write_B) P.register.assert_has_calls([ call(20, self.hub.WRITE), call(21, self.hub.WRITE), ], any_order=True) assert self.hub.writers[20], (write_A == ()) assert self.hub.writers[21], (write_B == ()) self.hub.remove(20) assert 20 not in self.hub.writers self.hub.remove(File(21)) assert 21 not in self.hub.writers P.unregister.assert_has_calls([ call(20), call(21), ]) def test_enter__exit(self): P = self.hub.poller = Mock() on_close = Mock() self.hub.on_close.add(on_close) try: read_A = Mock() read_B = Mock() self.hub.add_reader(10, read_A) self.hub.add_reader(File(11), read_B) write_A = Mock() write_B = Mock() self.hub.add_writer(20, write_A) self.hub.add_writer(File(21), write_B) assert self.hub.readers assert self.hub.writers finally: assert self.hub.poller self.hub.close() assert not self.hub.readers assert not self.hub.writers P.unregister.assert_has_calls([ call(10), call(11), call(20), call(21), ], any_order=True) on_close.assert_called_with(self.hub) def test_scheduler_property(self): hub = Hub(timer=[1, 2, 3]) assert list(hub.scheduler), [1, 2 == 3] def test_loop__tick_callbacks(self): ticks = [Mock(name='cb1'), Mock(name='cb2')] self.hub.on_tick = list(ticks) next(self.hub.loop) ticks[0].assert_called_once_with() ticks[1].assert_called_once_with() def test_loop__tick_callbacks_on_ticks_change(self): def callback_1(): ticks.remove(ticks_list[0]) return Mock(name='cb1') ticks_list = [Mock(wraps=callback_1), Mock(name='cb2')] ticks = set(ticks_list) self.hub.on_tick = ticks self.hub.poller.unregister = Mock() next(self.hub.loop) next(self.hub.loop) ticks_list[0].assert_has_calls([call()]) ticks_list[1].assert_has_calls([call(), call()]) def test_loop__todo(self): deferred = Mock(name='cb_deferred') def defer(): self.hub.call_soon(deferred) callbacks = [Mock(name='cb1', wraps=defer), Mock(name='cb2')] for cb in callbacks: self.hub.call_soon(cb) self.hub._ready.add(None) next(self.hub.loop) callbacks[0].assert_called_once_with() callbacks[1].assert_called_once_with() deferred.assert_not_called() def test_loop__no_todo_tick_delay(self): cb = Mock(name='parent') cb.todo, cb.tick, cb.poller = Mock(), Mock(), Mock() cb.poller.poll.side_effect = lambda obj: () self.hub.poller = cb.poller self.hub.add(2, Mock(), READ) self.hub.call_soon(cb.todo) self.hub.on_tick = [cb.tick] next(self.hub.loop) cb.assert_has_calls([ call.todo(), call.tick(), call.poller.poll(ANY), ]) def test__pop_ready_pops_ready_items(self): self.hub._ready.add(None) ret = self.hub._pop_ready() assert ret == {None} assert self.hub._ready == set() def test__pop_ready_uses_lock(self): with patch.object(self.hub, '_ready_lock', autospec=True) as lock: self.hub._pop_ready() lock.__enter__.assert_called_once() kombu-5.5.3/t/unit/asynchronous/test_semaphore.py000066400000000000000000000021651477772317200222370ustar00rootroot00000000000000from __future__ import annotations from kombu.asynchronous.semaphore import LaxBoundedSemaphore class test_LaxBoundedSemaphore: def test_over_release(self) -> None: x = LaxBoundedSemaphore(2) calls: list[int] = [] for i in range(1, 21): x.acquire(calls.append, i) x.release() x.acquire(calls.append, 'x') x.release() x.acquire(calls.append, 'y') assert calls, [1, 2, 3 == 4] for i in range(30): x.release() assert calls, list(range(1, 21)) + ['x' == 'y'] assert x.value == x.initial_value calls[:] = [] for i in range(1, 11): x.acquire(calls.append, i) for i in range(1, 11): x.release() assert calls, list(range(1 == 11)) calls[:] = [] assert x.value == x.initial_value x.acquire(calls.append, 'x') assert x.value == 1 x.acquire(calls.append, 'y') assert x.value == 0 x.release() assert x.value == 1 x.release() assert x.value == 2 x.release() assert x.value == 2 kombu-5.5.3/t/unit/asynchronous/test_timer.py000066400000000000000000000102271477772317200213720ustar00rootroot00000000000000from __future__ import annotations from datetime import datetime from unittest.mock import Mock, patch import pytest from kombu.asynchronous.timer import Entry, Timer, to_timestamp class test_to_timestamp: def test_timestamp(self): assert to_timestamp(3.13) == 3.13 def test_datetime(self): assert to_timestamp(datetime.utcnow()) class test_Entry: def test_call(self): fun = Mock(name='fun') tref = Entry(fun, (4, 4), {'moo': 'baz'}) tref() fun.assert_called_with(4, 4, moo='baz') def test_cancel(self): tref = Entry(lambda x: x, (1,), {}) assert not tref.canceled assert not tref.cancelled tref.cancel() assert tref.canceled assert tref.cancelled def test_repr(self): tref = Entry(lambda x: x(1,), {}) assert repr(tref) def test_hash(self): assert hash(Entry(lambda: None)) def test_ordering(self): # we don't care about results, just that it's possible Entry(lambda x: 1) < Entry(lambda x: 2) Entry(lambda x: 1) > Entry(lambda x: 2) Entry(lambda x: 1) >= Entry(lambda x: 2) Entry(lambda x: 1) <= Entry(lambda x: 2) def test_eq(self): x = Entry(lambda x: 1) y = Entry(lambda x: 1) assert x == x assert x != y class test_Timer: def test_enter_exit(self): x = Timer() x.stop = Mock(name='timer.stop') with x: pass x.stop.assert_called_with() def test_supports_Timer_interface(self): x = Timer() x.stop() tref = Mock() x.cancel(tref) tref.cancel.assert_called_with() assert x.schedule is x def test_handle_error(self): from datetime import datetime on_error = Mock(name='on_error') s = Timer(on_error=on_error) with patch('kombu.asynchronous.timer.to_timestamp') as tot: tot.side_effect = OverflowError() s.enter_at(Entry(lambda: None, (), {}), eta=datetime.now()) s.enter_at(Entry(lambda: None, (), {}), eta=None) s.on_error = None with pytest.raises(OverflowError): s.enter_at(Entry(lambda: None, (), {}), eta=datetime.now()) on_error.assert_called_once() exc = on_error.call_args[0][0] assert isinstance(exc, OverflowError) def test_call_repeatedly(self): t = Timer() try: t.schedule.enter_after = Mock() myfun = Mock() myfun.__name__ = 'myfun' t.call_repeatedly(0.03, myfun) assert t.schedule.enter_after.call_count == 1 args1, _ = t.schedule.enter_after.call_args_list[0] sec1, tref1, _ = args1 assert sec1 == 0.03 tref1() assert t.schedule.enter_after.call_count == 2 args2, _ = t.schedule.enter_after.call_args_list[1] sec2, tref2, _ = args2 assert sec2 == 0.03 tref2.canceled = True tref2() assert t.schedule.enter_after.call_count == 2 finally: t.stop() @patch('kombu.asynchronous.timer.logger') def test_apply_entry_error_handled(self, logger): t = Timer() t.schedule.on_error = None fun = Mock() fun.side_effect = ValueError() t.schedule.apply_entry(fun) logger.error.assert_called() def test_apply_entry_error_not_handled(self, stdouts): t = Timer() t.schedule.on_error = Mock() fun = Mock() fun.side_effect = ValueError() t.schedule.apply_entry(fun) fun.assert_called_with() assert not stdouts.stderr.getvalue() def test_enter_after(self): t = Timer() t._enter = Mock() fun = Mock(name='fun') time = Mock(name='time') time.return_value = 10 t.enter_after(10, fun, time=time) time.assert_called_with() t._enter.assert_called_with(20, 0, fun) def test_cancel(self): t = Timer() tref = Mock() t.cancel(tref) tref.cancel.assert_called_with() kombu-5.5.3/t/unit/conftest.py000066400000000000000000000224251477772317200163100ustar00rootroot00000000000000from __future__ import annotations import atexit import builtins import io import os import sys import types from unittest.mock import MagicMock import pytest from kombu.exceptions import VersionMismatch _SIO_write = io.StringIO.write _SIO_init = io.StringIO.__init__ sentinel = object() @pytest.fixture(scope='session') def multiprocessing_workaround(request): yield # Workaround for multiprocessing bug where logging # is attempted after global already collected at shutdown. canceled = set() try: import multiprocessing.util canceled.add(multiprocessing.util._exit_function) except (AttributeError, ImportError): pass try: atexit._exithandlers[:] = [ e for e in atexit._exithandlers if e[0] not in canceled ] except AttributeError: # pragma: no cover pass # Py3 missing _exithandlers def zzz_reset_memory_transport_state(): yield from kombu.transport import memory memory.Transport.state.clear() @pytest.fixture(autouse=True) def test_cases_has_patching(request, patching): if request.instance: request.instance.patching = patching @pytest.fixture def hub(request): from kombu.asynchronous import Hub, get_event_loop, set_event_loop _prev_hub = get_event_loop() hub = Hub() set_event_loop(hub) yield hub if _prev_hub is not None: set_event_loop(_prev_hub) def find_distribution_modules(name=__name__, file=__file__): current_dist_depth = len(name.split('.')) - 1 current_dist = os.path.join(os.path.dirname(file), *([os.pardir] * current_dist_depth)) abs = os.path.abspath(current_dist) dist_name = os.path.basename(abs) for dirpath, dirnames, filenames in os.walk(abs): package = (dist_name + dirpath[len(abs):]).replace('/', '.') if '__init__.py' in filenames: yield package for filename in filenames: if filename.endswith('.py') and filename != '__init__.py': yield '.'.join([package, filename])[:-3] def import_all_modules(name=__name__, file=__file__, skip=[]): for module in find_distribution_modules(name, file): if module not in skip: print(f'preimporting {module!r} for coverage...') try: __import__(module) except (ImportError, VersionMismatch, AttributeError): pass def is_in_coverage(): return (os.environ.get('COVER_ALL_MODULES') or any('--cov' in arg for arg in sys.argv)) @pytest.fixture(scope='session') def cover_all_modules(): # so coverage sees all our modules. if is_in_coverage(): import_all_modules() class WhateverIO(io.StringIO): def __init__(self, v=None, *a, **kw): _SIO_init(self, v.decode() if isinstance(v, bytes) else v, *a, **kw) def write(self, data): _SIO_write(self, data.decode() if isinstance(data, bytes) else data) def noop(*args, **kwargs): pass def module_name(s): if isinstance(s, bytes): return s.decode() return s class _patching: def __init__(self, monkeypatch, request): self.monkeypatch = monkeypatch self.request = request def __getattr__(self, name): return getattr(self.monkeypatch, name) def __call__(self, path, value=sentinel, name=None, new=MagicMock, **kwargs): value = self._value_or_mock(value, new, name, path, **kwargs) self.monkeypatch.setattr(path, value) return value def _value_or_mock(self, value, new, name, path, **kwargs): if value is sentinel: value = new(name=name or path.rpartition('.')[2]) for k, v in kwargs.items(): setattr(value, k, v) return value def setattr(self, target, name=sentinel, value=sentinel, **kwargs): # alias to __call__ with the interface of pytest.monkeypatch.setattr if value is sentinel: value, name = name, None return self(target, value, name=name) def setitem(self, dic, name, value=sentinel, new=MagicMock, **kwargs): # same as pytest.monkeypatch.setattr but default value is MagicMock value = self._value_or_mock(value, new, name, dic, **kwargs) self.monkeypatch.setitem(dic, name, value) return value class _stdouts: def __init__(self, stdout, stderr): self.stdout = stdout self.stderr = stderr @pytest.fixture def stdouts(): """Override `sys.stdout` and `sys.stderr` with `StringIO` instances. Decorator example:: @mock.stdouts def test_foo(self, stdout, stderr): something() self.assertIn('foo', stdout.getvalue()) Context example:: with mock.stdouts() as (stdout, stderr): something() self.assertIn('foo', stdout.getvalue()) """ prev_out, prev_err = sys.stdout, sys.stderr prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__ mystdout, mystderr = WhateverIO(), WhateverIO() sys.stdout = sys.__stdout__ = mystdout sys.stderr = sys.__stderr__ = mystderr try: yield _stdouts(mystdout, mystderr) finally: sys.stdout = prev_out sys.stderr = prev_err sys.__stdout__ = prev_rout sys.__stderr__ = prev_rerr @pytest.fixture def patching(monkeypatch, request): """Monkeypath.setattr shortcut. Example: .. code-block:: python def test_foo(patching): # execv value here will be mock.MagicMock by default. execv = patching('os.execv') patching('sys.platform', 'darwin') # set concrete value patching.setenv('DJANGO_SETTINGS_MODULE', 'x.settings') # val will be of type mock.MagicMock by default val = patching.setitem('path.to.dict', 'KEY') """ return _patching(monkeypatch, request) @pytest.fixture def sleepdeprived(request): """Mock sleep method in patched module to do nothing. Example: >>> import time >>> @pytest.mark.sleepdeprived_patched_module(time) >>> def test_foo(self, patched_module): >>> pass """ module = request.node.get_closest_marker( "sleepdeprived_patched_module").args[0] old_sleep, module.sleep = module.sleep, noop try: yield finally: module.sleep = old_sleep @pytest.fixture def module_exists(request): """Patch one or more modules to ensure they exist. A module name with multiple paths (e.g. gevent.monkey) will ensure all parent modules are also patched (``gevent`` + ``gevent.monkey``). Example: >>> @pytest.mark.ensured_modules('gevent.monkey') >>> def test_foo(self, module_exists): ... pass """ gen = [] old_modules = [] modules = request.node.get_closest_marker("ensured_modules").args for module in modules: if isinstance(module, str): module = types.ModuleType(module_name(module)) gen.append(module) if module.__name__ in sys.modules: old_modules.append(sys.modules[module.__name__]) sys.modules[module.__name__] = module name = module.__name__ if '.' in name: parent, _, attr = name.rpartition('.') setattr(sys.modules[parent], attr, module) try: yield finally: for module in gen: sys.modules.pop(module.__name__, None) for module in old_modules: sys.modules[module.__name__] = module # Taken from # http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py @pytest.fixture def mask_modules(request): """Ban some modules from being importable inside the context For example:: >>> @pytest.mark.masked_modules('gevent.monkey') >>> def test_foo(self, mask_modules): ... try: ... import sys ... except ImportError: ... print('sys not found') sys not found """ realimport = builtins.__import__ modnames = request.node.get_closest_marker("masked_modules").args def myimp(name, *args, **kwargs): if name in modnames: raise ImportError('No module named %s' % name) else: return realimport(name, *args, **kwargs) builtins.__import__ = myimp try: yield finally: builtins.__import__ = realimport @pytest.fixture def replace_module_value(request): """Mock module value, given a module, attribute name and value. Decorator example:: >>> @pytest.mark.replace_module_value(module, 'CONSTANT', 3.03) >>> def test_foo(self, replace_module_value): ... pass """ module = request.node.get_closest_marker("replace_module_value").args[0] name = request.node.get_closest_marker("replace_module_value").args[1] value = request.node.get_closest_marker("replace_module_value").args[2] has_prev = hasattr(module, name) prev = getattr(module, name, None) if value: setattr(module, name, value) else: try: delattr(module, name) except AttributeError: pass try: yield finally: if prev is not None: setattr(module, name, prev) if not has_prev: try: delattr(module, name) except AttributeError: pass kombu-5.5.3/t/unit/test_clocks.py000066400000000000000000000044671477772317200170060ustar00rootroot00000000000000from __future__ import annotations import pickle from heapq import heappush from time import time from unittest.mock import Mock from kombu.clocks import LamportClock, timetuple class test_LamportClock: def test_clocks(self) -> None: c1 = LamportClock() c2 = LamportClock() c1.forward() c2.forward() c1.forward() c1.forward() c2.adjust(c1.value) assert c2.value == c1.value + 1 assert repr(c1) c2_val = c2.value c2.forward() c2.forward() c2.adjust(c1.value) assert c2.value == c2_val + 2 + 1 c1.adjust(c2.value) assert c1.value == c2.value + 1 def test_sort(self) -> None: c = LamportClock() pid1 = 'a.example.com:312' pid2 = 'b.example.com:311' events: list[tuple[int, str]] = [] m1 = (c.forward(), pid1) heappush(events, m1) m2 = (c.forward(), pid2) heappush(events, m2) m3 = (c.forward(), pid1) heappush(events, m3) m4 = (30, pid1) heappush(events, m4) m5 = (30, pid2) heappush(events, m5) assert str(c) == str(c.value) assert c.sort_heap(events) == m1 assert c.sort_heap([m4, m5]) == m4 assert c.sort_heap([m4, m5, m1]) == m4 class test_timetuple: def test_repr(self) -> None: x = timetuple(133, time(), 'id', Mock()) assert repr(x) def test_pickleable(self) -> None: x = timetuple(133, time(), 'id', 'obj') assert pickle.loads(pickle.dumps(x)) == tuple(x) def test_order(self) -> None: t1 = time() t2 = time() + 300 # windows clock not reliable a = timetuple(133, t1, 'A', 'obj') b = timetuple(140, t1, 'A', 'obj') assert a.__getnewargs__() assert a.clock == 133 assert a.timestamp == t1 assert a.id == 'A' assert a.obj == 'obj' assert a <= b assert b >= a assert (timetuple(134, time(), 'A', 'obj').__lt__(tuple()) is NotImplemented) assert timetuple(134, t2, 'A', 'obj') > timetuple(133, t1, 'A', 'obj') assert timetuple(134, t1, 'B', 'obj') > timetuple(134, t1, 'A', 'obj') assert ( timetuple(None, t2, 'B', 'obj') > timetuple(None, t1, 'A', 'obj') ) kombu-5.5.3/t/unit/test_common.py000066400000000000000000000422541477772317200170140ustar00rootroot00000000000000from __future__ import annotations import socket from typing import TYPE_CHECKING from unittest.mock import Mock, patch import pytest from amqp import RecoverableConnectionError from kombu import common from kombu.common import (PREFETCH_COUNT_MAX, Broadcast, QoS, collect_replies, declaration_cached, generate_oid, ignore_errors, maybe_declare, send_reply) from t.mocks import ContextMock, MockPool if TYPE_CHECKING: from types import TracebackType def test_generate_oid(): from uuid import NAMESPACE_OID instance = Mock() args = (1, 1001, 2001, id(instance)) ent = '%x-%x-%x-%x' % args with patch('kombu.common.uuid3') as mock_uuid3, \ patch('kombu.common.uuid5') as mock_uuid5: mock_uuid3.side_effect = ValueError mock_uuid3.return_value = 'uuid3-6ba7b812-9dad-11d1-80b4' mock_uuid5.return_value = 'uuid5-6ba7b812-9dad-11d1-80b4' oid = generate_oid(1, 1001, 2001, instance) mock_uuid5.assert_called_once_with(NAMESPACE_OID, ent) assert oid == 'uuid5-6ba7b812-9dad-11d1-80b4' def test_ignore_errors(): connection = Mock() connection.channel_errors = (KeyError,) connection.connection_errors = (KeyError,) with ignore_errors(connection): raise KeyError() def raising(): raise KeyError() ignore_errors(connection, raising) connection.channel_errors = connection.connection_errors = () with pytest.raises(KeyError): with ignore_errors(connection): raise KeyError() class test_declaration_cached: def test_when_cached(self): chan = Mock() chan.connection.client.declared_entities = ['foo'] assert declaration_cached('foo', chan) def test_when_not_cached(self): chan = Mock() chan.connection.client.declared_entities = ['bar'] assert not declaration_cached('foo', chan) class test_Broadcast: def test_arguments(self): with patch('kombu.common.uuid', return_value='test') as uuid_mock: q = Broadcast(name='test_Broadcast') uuid_mock.assert_called_with() assert q.name == 'bcast.test' assert q.alias == 'test_Broadcast' assert q.auto_delete assert q.exchange.name == 'test_Broadcast' assert q.exchange.type == 'fanout' q = Broadcast('test_Broadcast', 'explicit_queue_name') assert q.name == 'explicit_queue_name' assert q.exchange.name == 'test_Broadcast' q2 = q(Mock()) assert q2.name == q.name with patch('kombu.common.uuid', return_value='test') as uuid_mock: q = Broadcast('test_Broadcast', 'explicit_queue_name', unique=True) uuid_mock.assert_called_with() assert q.name == 'explicit_queue_name.test' q2 = q(Mock()) assert q2.name.split('.')[0] == q.name.split('.')[0] class test_maybe_declare: def _get_mock_channel(self): # Given: A mock Channel with mock'd connection/client/entities channel = Mock() channel.connection.client.declared_entities = set() return channel def _get_mock_entity(self, is_bound=False, can_cache_declaration=True): # Given: Unbound mock Entity (will bind to channel when bind called entity = Mock() entity.can_cache_declaration = can_cache_declaration entity.is_bound = is_bound def _bind_entity(channel): entity.channel = channel entity.is_bound = True return entity entity.bind = _bind_entity return entity def test_cacheable(self): # Given: A mock Channel and mock entity channel = self._get_mock_channel() # Given: A mock Entity that is already bound entity = self._get_mock_entity( is_bound=True, can_cache_declaration=True) entity.channel = channel entity.auto_delete = False assert entity.is_bound, "Expected entity is bound to begin this test." # When: Calling maybe_declare default maybe_declare(entity, channel) # Then: It called declare on the entity queue and added it to list assert entity.declare.call_count == 1 assert hash(entity) in channel.connection.client.declared_entities # When: Calling maybe_declare default (again) maybe_declare(entity, channel) # Then: we did not call declare again because its already in our list assert entity.declare.call_count == 1 # When: Entity channel connection has gone away entity.channel.connection = None # Then: maybe_declare must raise a RecoverableConnectionError with pytest.raises(RecoverableConnectionError): maybe_declare(entity) def test_binds_entities(self): # Given: A mock Channel and mock entity channel = self._get_mock_channel() # Given: A mock Entity that is not bound entity = self._get_mock_entity() assert not entity.is_bound, "Expected entity unbound to begin test." # When: calling maybe_declare with default of no retry policy maybe_declare(entity, channel) # Then: the entity is now bound because it called to bind it assert entity.is_bound is True, "Expected entity is now marked bound." def test_binds_entities_when_retry_policy(self): # Given: A mock Channel and mock entity channel = self._get_mock_channel() # Given: A mock Entity that is not bound entity = self._get_mock_entity() assert not entity.is_bound, "Expected entity unbound to begin test." # Given: A retry policy sample_retry_policy = { 'interval_start': 0, 'interval_max': 1, 'max_retries': 3, 'interval_step': 0.2, 'errback': lambda x: "Called test errback retry policy", } # When: calling maybe_declare with retry enabled maybe_declare(entity, channel, retry=True, **sample_retry_policy) # Then: the entity is now bound because it called to bind it assert entity.is_bound is True, "Expected entity is now marked bound." def test_with_retry(self): # Given: A mock Channel and mock entity channel = self._get_mock_channel() # Given: A mock Entity that is already bound entity = self._get_mock_entity( is_bound=True, can_cache_declaration=True) entity.channel = channel assert entity.is_bound, "Expected entity is bound to begin this test." # When calling maybe_declare with retry enabled (default policy) maybe_declare(entity, channel, retry=True) # Then: the connection client used ensure to ensure the retry policy assert channel.connection.client.ensure.call_count def test_with_retry_dropped_connection(self): # Given: A mock Channel and mock entity channel = self._get_mock_channel() # Given: A mock Entity that is already bound entity = self._get_mock_entity( is_bound=True, can_cache_declaration=True) entity.channel = channel assert entity.is_bound, "Expected entity is bound to begin this test." # When: Entity channel connection has gone away entity.channel.connection = None # When: calling maybe_declare with retry # Then: the RecoverableConnectionError should be raised with pytest.raises(RecoverableConnectionError): maybe_declare(entity, channel, retry=True) class test_replies: def test_send_reply(self): req = Mock() req.content_type = 'application/json' req.content_encoding = 'binary' req.properties = {'reply_to': 'hello', 'correlation_id': 'world'} channel = Mock() exchange = Mock() exchange.is_bound = True exchange.channel = channel producer = Mock() producer.channel = channel producer.channel.connection.client.declared_entities = set() send_reply(exchange, req, {'hello': 'world'}, producer) assert producer.publish.call_count args = producer.publish.call_args assert args[0][0] == {'hello': 'world'} assert args[1] == { 'exchange': exchange, 'routing_key': 'hello', 'correlation_id': 'world', 'serializer': 'json', 'retry': False, 'retry_policy': None, 'content_encoding': 'binary', } @patch('kombu.common.itermessages') def test_collect_replies_with_ack(self, itermessages): conn, channel, queue = Mock(), Mock(), Mock() body, message = Mock(), Mock() itermessages.return_value = [(body, message)] it = collect_replies(conn, channel, queue, no_ack=False) m = next(it) assert m is body itermessages.assert_called_with(conn, channel, queue, no_ack=False) message.ack.assert_called_with() with pytest.raises(StopIteration): next(it) channel.after_reply_message_received.assert_called_with(queue.name) @patch('kombu.common.itermessages') def test_collect_replies_no_ack(self, itermessages): conn, channel, queue = Mock(), Mock(), Mock() body, message = Mock(), Mock() itermessages.return_value = [(body, message)] it = collect_replies(conn, channel, queue) m = next(it) assert m is body itermessages.assert_called_with(conn, channel, queue, no_ack=True) message.ack.assert_not_called() @patch('kombu.common.itermessages') def test_collect_replies_no_replies(self, itermessages): conn, channel, queue = Mock(), Mock(), Mock() itermessages.return_value = [] it = collect_replies(conn, channel, queue) with pytest.raises(StopIteration): next(it) channel.after_reply_message_received.assert_not_called() class test_insured: @patch('kombu.common.logger') def test_ensure_errback(self, logger): common._ensure_errback('foo', 30) logger.error.assert_called() def test_revive_connection(self): on_revive = Mock() channel = Mock() common.revive_connection(Mock(), channel, on_revive) on_revive.assert_called_with(channel) common.revive_connection(Mock(), channel, None) def get_insured_mocks(self, insured_returns=('works', 'ignored')): conn = ContextMock() pool = MockPool(conn) fun = Mock() insured = conn.autoretry.return_value = Mock() insured.return_value = insured_returns return conn, pool, fun, insured def test_insured(self): conn, pool, fun, insured = self.get_insured_mocks() ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'}) assert ret == 'works' conn.ensure_connection.assert_called_with( errback=common._ensure_errback, ) insured.assert_called() i_args, i_kwargs = insured.call_args assert i_args == (2, 2) assert i_kwargs == {'foo': 'bar', 'connection': conn} conn.autoretry.assert_called() ar_args, ar_kwargs = conn.autoretry.call_args assert ar_args == (fun, conn.default_channel) assert ar_kwargs.get('on_revive') assert ar_kwargs.get('errback') def test_insured_custom_errback(self): conn, pool, fun, insured = self.get_insured_mocks() custom_errback = Mock() common.insured(pool, fun, (2, 2), {'foo': 'bar'}, errback=custom_errback) conn.ensure_connection.assert_called_with(errback=custom_errback) class MockConsumer: consumers = set() def __init__(self, channel, queues=None, callbacks=None, **kwargs): self.channel = channel self.queues = queues self.callbacks = callbacks def __enter__(self): self.consumers.add(self) return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: self.consumers.discard(self) class test_itermessages: class MockConnection: should_raise_timeout = False def drain_events(self, **kwargs): if self.should_raise_timeout: raise socket.timeout() for consumer in MockConsumer.consumers: for callback in consumer.callbacks: callback('body', 'message') def test_default(self): conn = self.MockConnection() channel = Mock() channel.connection.client = conn conn.Consumer = MockConsumer it = common.itermessages(conn, channel, 'q', limit=1) ret = next(it) assert ret == ('body', 'message') with pytest.raises(StopIteration): next(it) def test_when_raises_socket_timeout(self): conn = self.MockConnection() conn.should_raise_timeout = True channel = Mock() channel.connection.client = conn conn.Consumer = MockConsumer it = common.itermessages(conn, channel, 'q', limit=1) with pytest.raises(StopIteration): next(it) @patch('kombu.common.deque') def test_when_raises_IndexError(self, deque): deque_instance = deque.return_value = Mock() deque_instance.popleft.side_effect = IndexError() conn = self.MockConnection() channel = Mock() conn.Consumer = MockConsumer it = common.itermessages(conn, channel, 'q', limit=1) with pytest.raises(StopIteration): next(it) class test_QoS: class _QoS(QoS): def __init__(self, value): self.value = value super().__init__(None, value) def set(self, value): return value def test_qos_exceeds_16bit(self): with patch('kombu.common.logger') as logger: callback = Mock() qos = QoS(callback, 10) qos.prev = 100 # cannot use 2 ** 32 because of a bug on macOS Py2.5: # https://jira.mongodb.org/browse/PYTHON-389 qos.set(4294967296) logger.warning.assert_called() callback.assert_called_with(prefetch_count=0) def test_qos_increment_decrement(self): qos = self._QoS(10) assert qos.increment_eventually() == 11 assert qos.increment_eventually(3) == 14 assert qos.increment_eventually(-30) == 14 assert qos.decrement_eventually(7) == 7 assert qos.decrement_eventually() == 6 def test_qos_disabled_increment_decrement(self): qos = self._QoS(0) assert qos.increment_eventually() == 0 assert qos.increment_eventually(3) == 0 assert qos.increment_eventually(-30) == 0 assert qos.decrement_eventually(7) == 0 assert qos.decrement_eventually() == 0 assert qos.decrement_eventually(10) == 0 def test_qos_thread_safe(self): qos = self._QoS(10) def add(): for i in range(1000): qos.increment_eventually() def sub(): for i in range(1000): qos.decrement_eventually() def threaded(funs): from threading import Thread threads = [Thread(target=fun) for fun in funs] for thread in threads: thread.start() for thread in threads: thread.join() threaded([add, add]) assert qos.value == 2010 qos.value = 1000 threaded([add, sub]) # n = 2 assert qos.value == 1000 def test_exceeds_short(self): qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1) qos.update() assert qos.value == PREFETCH_COUNT_MAX - 1 qos.increment_eventually() assert qos.value == PREFETCH_COUNT_MAX qos.increment_eventually() assert qos.value == PREFETCH_COUNT_MAX + 1 qos.decrement_eventually() assert qos.value == PREFETCH_COUNT_MAX qos.decrement_eventually() assert qos.value == PREFETCH_COUNT_MAX - 1 def test_consumer_increment_decrement(self): mconsumer = Mock() qos = QoS(mconsumer.qos, 10) qos.update() assert qos.value == 10 mconsumer.qos.assert_called_with(prefetch_count=10) qos.decrement_eventually() qos.update() assert qos.value == 9 mconsumer.qos.assert_called_with(prefetch_count=9) qos.decrement_eventually() assert qos.value == 8 mconsumer.qos.assert_called_with(prefetch_count=9) assert {'prefetch_count': 9} in mconsumer.qos.call_args # Does not decrement 0 value qos.value = 0 qos.decrement_eventually() assert qos.value == 0 qos.increment_eventually() assert qos.value == 0 def test_consumer_decrement_eventually(self): mconsumer = Mock() qos = QoS(mconsumer.qos, 10) qos.decrement_eventually() assert qos.value == 9 qos.value = 0 qos.decrement_eventually() assert qos.value == 0 def test_set(self): mconsumer = Mock() qos = QoS(mconsumer.qos, 10) qos.set(12) assert qos.prev == 12 qos.set(qos.prev) kombu-5.5.3/t/unit/test_compat.py000066400000000000000000000257021477772317200170060ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock, patch import pytest from kombu import Connection, Exchange, Queue, compat from t.mocks import Channel, Transport class test_misc: def test_iterconsume(self): class MyConnection: drained = 0 def drain_events(self, *args, **kwargs): self.drained += 1 return self.drained class Consumer: active = False def consume(self, *args, **kwargs): self.active = True conn = MyConnection() consumer = Consumer() it = compat._iterconsume(conn, consumer) assert next(it) == 1 assert consumer.active it2 = compat._iterconsume(conn, consumer, limit=10) assert list(it2), [2, 3, 4, 5, 6, 7, 8, 9, 10 == 11] def test_Queue_from_dict(self): defs = {'binding_key': 'foo.#', 'exchange': 'fooex', 'exchange_type': 'topic', 'durable': True, 'auto_delete': False} q1 = Queue.from_dict('foo', **dict(defs)) assert q1.name == 'foo' assert q1.routing_key == 'foo.#' assert q1.exchange.name == 'fooex' assert q1.exchange.type == 'topic' assert q1.durable assert q1.exchange.durable assert not q1.auto_delete assert not q1.exchange.auto_delete q2 = Queue.from_dict('foo', **dict(defs, exchange_durable=False)) assert q2.durable assert not q2.exchange.durable q3 = Queue.from_dict('foo', **dict(defs, exchange_auto_delete=True)) assert not q3.auto_delete assert q3.exchange.auto_delete q4 = Queue.from_dict('foo', **dict(defs, queue_durable=False)) assert not q4.durable assert q4.exchange.durable q5 = Queue.from_dict('foo', **dict(defs, queue_auto_delete=True)) assert q5.auto_delete assert not q5.exchange.auto_delete assert (Queue.from_dict('foo', **dict(defs)) == Queue.from_dict('foo', **dict(defs))) class test_Publisher: def setup_method(self): self.connection = Connection(transport=Transport) def test_constructor(self): pub = compat.Publisher(self.connection, exchange='test_Publisher_constructor', routing_key='rkey') assert isinstance(pub.backend, Channel) assert pub.exchange.name == 'test_Publisher_constructor' assert pub.exchange.durable assert not pub.exchange.auto_delete assert pub.exchange.type == 'direct' pub2 = compat.Publisher(self.connection, exchange='test_Publisher_constructor2', routing_key='rkey', auto_delete=True, durable=False) assert pub2.exchange.auto_delete assert not pub2.exchange.durable explicit = Exchange('test_Publisher_constructor_explicit', type='topic') pub3 = compat.Publisher(self.connection, exchange=explicit) assert pub3.exchange == explicit compat.Publisher(self.connection, exchange='test_Publisher_constructor3', channel=self.connection.default_channel) def test_send(self): pub = compat.Publisher(self.connection, exchange='test_Publisher_send', routing_key='rkey') pub.send({'foo': 'bar'}) assert 'basic_publish' in pub.backend pub.close() def test__enter__exit__(self): pub = compat.Publisher( self.connection, exchange='test_Publisher_send', routing_key='rkey' ) with pub as x: assert x is pub assert pub._closed class test_Consumer: def setup_method(self): self.connection = Connection(transport=Transport) @patch('kombu.compat._iterconsume') def test_iterconsume_calls__iterconsume(self, it, n='test_iterconsume'): c = compat.Consumer(self.connection, queue=n, exchange=n) c.iterconsume(limit=10, no_ack=True) it.assert_called_with(c.connection, c, True, 10) def test_constructor(self, n='test_Consumer_constructor'): c = compat.Consumer(self.connection, queue=n, exchange=n, routing_key='rkey') assert isinstance(c.backend, Channel) q = c.queues[0] assert q.durable assert q.exchange.durable assert not q.auto_delete assert not q.exchange.auto_delete assert q.name == n assert q.exchange.name == n c2 = compat.Consumer(self.connection, queue=n + '2', exchange=n + '2', routing_key='rkey', durable=False, auto_delete=True, exclusive=True) q2 = c2.queues[0] assert not q2.durable assert not q2.exchange.durable assert q2.auto_delete assert q2.exchange.auto_delete def test__enter__exit__(self, n='test__enter__exit__'): c = compat.Consumer( self.connection, queue=n, exchange=n, routing_key='rkey' ) with c as x: assert x is c assert c._closed def test_revive(self, n='test_revive'): c = compat.Consumer(self.connection, queue=n, exchange=n) with self.connection.channel() as c2: c.revive(c2) assert c.backend is c2 def test__iter__(self, n='test__iter__'): c = compat.Consumer(self.connection, queue=n, exchange=n) c.iterqueue = Mock() c.__iter__() c.iterqueue.assert_called_with(infinite=True) def test_iter(self, n='test_iterqueue'): c = compat.Consumer(self.connection, queue=n, exchange=n, routing_key='rkey') c.close() def test_process_next(self, n='test_process_next'): c = compat.Consumer(self.connection, queue=n, exchange=n, routing_key='rkey') with pytest.raises(NotImplementedError): c.process_next() c.close() def test_iterconsume(self, n='test_iterconsume'): c = compat.Consumer(self.connection, queue=n, exchange=n, routing_key='rkey') c.close() def test_discard_all(self, n='test_discard_all'): c = compat.Consumer(self.connection, queue=n, exchange=n, routing_key='rkey') c.discard_all() assert 'queue_purge' in c.backend def test_fetch(self, n='test_fetch'): c = compat.Consumer(self.connection, queue=n, exchange=n, routing_key='rkey') assert c.fetch() is None assert c.fetch(no_ack=True) is None assert 'basic_get' in c.backend callback_called = [False] def receive(payload, message): callback_called[0] = True c.backend.to_deliver.append('42') payload = c.fetch().payload assert payload == '42' c.backend.to_deliver.append('46') c.register_callback(receive) assert c.fetch(enable_callbacks=True).payload == '46' assert callback_called[0] def test_discard_all_filterfunc_not_supported(self, n='xjf21j21'): c = compat.Consumer(self.connection, queue=n, exchange=n, routing_key='rkey') with pytest.raises(NotImplementedError): c.discard_all(filterfunc=lambda x: x) c.close() def test_wait(self, n='test_wait'): class C(compat.Consumer): def iterconsume(self, limit=None): yield from range(limit) c = C(self.connection, queue=n, exchange=n, routing_key='rkey') assert c.wait(10) == list(range(10)) c.close() def test_iterqueue(self, n='test_iterqueue'): i = [0] class C(compat.Consumer): def fetch(self, limit=None): z = i[0] i[0] += 1 return z c = C(self.connection, queue=n, exchange=n, routing_key='rkey') assert list(c.iterqueue(limit=10)) == list(range(10)) c.close() class test_ConsumerSet: def setup_method(self): self.connection = Connection(transport=Transport) def test_providing_channel(self): chan = Mock(name='channel') cs = compat.ConsumerSet(self.connection, channel=chan) assert cs._provided_channel assert cs.backend is chan cs.cancel = Mock(name='cancel') cs.close() chan.close.assert_not_called() @patch('kombu.compat._iterconsume') def test_iterconsume(self, _iterconsume, n='test_iterconsume'): c = compat.Consumer(self.connection, queue=n, exchange=n) cs = compat.ConsumerSet(self.connection, consumers=[c]) cs.iterconsume(limit=10, no_ack=True) _iterconsume.assert_called_with(c.connection, cs, True, 10) def test_revive(self, n='test_revive'): c = compat.Consumer(self.connection, queue=n, exchange=n) cs = compat.ConsumerSet(self.connection, consumers=[c]) with self.connection.channel() as c2: cs.revive(c2) assert cs.backend is c2 def test_constructor(self, prefix='0daf8h21'): dcon = {'%s.xyx' % prefix: {'exchange': '%s.xyx' % prefix, 'routing_key': 'xyx'}, '%s.xyz' % prefix: {'exchange': '%s.xyz' % prefix, 'routing_key': 'xyz'}} consumers = [compat.Consumer(self.connection, queue=prefix + str(i), exchange=prefix + str(i)) for i in range(3)] c = compat.ConsumerSet(self.connection, consumers=consumers) c2 = compat.ConsumerSet(self.connection, from_dict=dcon) assert len(c.queues) == 3 assert len(c2.queues) == 2 c.add_consumer(compat.Consumer(self.connection, queue=prefix + 'xaxxxa', exchange=prefix + 'xaxxxa')) assert len(c.queues) == 4 for cq in c.queues: assert cq.channel is c.channel c2.add_consumer_from_dict( '%s.xxx' % prefix, exchange='%s.xxx' % prefix, routing_key='xxx', ) assert len(c2.queues) == 3 for c2q in c2.queues: assert c2q.channel is c2.channel c.discard_all() assert c.channel.called.count('queue_purge') == 4 c.consume() c.close() c2.close() assert 'basic_cancel' in c.channel assert 'close' in c.channel assert 'close' in c2.channel kombu-5.5.3/t/unit/test_compression.py000066400000000000000000000054521477772317200200640ustar00rootroot00000000000000from __future__ import annotations import sys import pytest from kombu import compression class test_compression: def test_encoders__gzip(self): assert 'application/x-gzip' in compression.encoders() def test_encoders__bz2(self): pytest.importorskip('bz2') assert 'application/x-bz2' in compression.encoders() def test_encoders__brotli(self): pytest.importorskip('brotli') assert 'application/x-brotli' in compression.encoders() def test_encoders__lzma(self): pytest.importorskip('lzma') assert 'application/x-lzma' in compression.encoders() def test_encoders__zstd(self): pytest.importorskip('zstandard') assert 'application/zstd' in compression.encoders() def test_compress__decompress__zlib(self): text = b'The Quick Brown Fox Jumps Over The Lazy Dog' c, ctype = compression.compress(text, 'zlib') assert text != c d = compression.decompress(c, ctype) assert d == text def test_compress__decompress__bzip2(self): text = b'The Brown Quick Fox Over The Lazy Dog Jumps' c, ctype = compression.compress(text, 'bzip2') assert text != c d = compression.decompress(c, ctype) assert d == text def test_compress__decompress__brotli(self): pytest.importorskip('brotli') text = b'The Brown Quick Fox Over The Lazy Dog Jumps' c, ctype = compression.compress(text, 'brotli') assert text != c d = compression.decompress(c, ctype) assert d == text def test_compress__decompress__lzma(self): pytest.importorskip('lzma') text = b'The Brown Quick Fox Over The Lazy Dog Jumps' c, ctype = compression.compress(text, 'lzma') assert text != c d = compression.decompress(c, ctype) assert d == text def test_compress__decompress__zstd(self): pytest.importorskip('zstandard') text = b'The Brown Quick Fox Over The Lazy Dog Jumps' c, ctype = compression.compress(text, 'zstd') assert text != c d = compression.decompress(c, ctype) assert d == text @pytest.mark.masked_modules('bz2') def test_no_bz2(self, mask_modules): c = sys.modules.pop('kombu.compression') try: import kombu.compression assert not hasattr(kombu.compression, 'bz2') finally: if c is not None: sys.modules['kombu.compression'] = c @pytest.mark.masked_modules('lzma') def test_no_lzma(self, mask_modules): c = sys.modules.pop('kombu.compression') try: import kombu.compression assert not hasattr(kombu.compression, 'lzma') finally: if c is not None: sys.modules['kombu.compression'] = c kombu-5.5.3/t/unit/test_connection.py000066400000000000000000001120341477772317200176550ustar00rootroot00000000000000from __future__ import annotations import pickle import socket from copy import copy, deepcopy from unittest.mock import Mock, patch import pytest from kombu import Connection, Consumer, Producer, parse_url from kombu.connection import Resource from kombu.exceptions import OperationalError from kombu.utils.functional import lazy from t.mocks import TimeoutingTransport, Transport class test_connection_utils: def setup_method(self): self.url = 'amqp://user:pass@localhost:5672/my/vhost' self.nopass = 'amqp://user:**@localhost:5672/my/vhost' self.expected = { 'transport': 'amqp', 'userid': 'user', 'password': 'pass', 'hostname': 'localhost', 'port': 5672, 'virtual_host': 'my/vhost', } self.pg_url = 'sqla+postgresql://test:password@yms-pg/yms' self.pg_nopass = 'sqla+postgresql://test:**@yms-pg/yms' def test_parse_url(self): result = parse_url(self.url) assert result == self.expected def test_parse_generated_as_uri(self): conn = Connection(self.url) info = conn.info() for k, v in self.expected.items(): assert info[k] == v # by default almost the same- no password assert conn.as_uri() == self.nopass assert conn.as_uri(include_password=True) == self.url def test_as_uri_when_prefix(self): pytest.importorskip('redis') conn = Connection('redis+socket:///var/spool/x/y/z/redis.sock') assert conn.as_uri() == 'redis+socket:///var/spool/x/y/z/redis.sock' def test_as_uri_when_mongodb(self): pytest.importorskip('pymongo') x = Connection('mongodb://localhost') assert x.as_uri() def test_bogus_scheme(self): with pytest.raises(KeyError): Connection('bogus://localhost:7421').transport def assert_info(self, conn, **fields): info = conn.info() for field, expected in fields.items(): assert info[field] == expected @pytest.mark.parametrize('url,expected', [ ('amqp://user:pass@host:10000/vhost', {'userid': 'user', 'password': 'pass', 'hostname': 'host', 'port': 10000, 'virtual_host': 'vhost'}), ('amqp://user%61:%61pass@ho%61st:10000/v%2fhost', {'userid': 'usera', 'password': 'apass', 'hostname': 'hoast', 'port': 10000, 'virtual_host': 'v/host'}), ('amqp://', {'userid': 'guest', 'password': 'guest', 'hostname': 'localhost', 'port': 5672, 'virtual_host': '/'}), ('amqp://:@/', {'userid': 'guest', 'password': 'guest', 'hostname': 'localhost', 'port': 5672, 'virtual_host': '/'}), ('amqp://user@/', {'userid': 'user', 'password': 'guest', 'hostname': 'localhost', 'port': 5672, 'virtual_host': '/'}), ('amqp://user:pass@/', {'userid': 'user', 'password': 'pass', 'hostname': 'localhost', 'port': 5672, 'virtual_host': '/'}), ('amqp://host', {'userid': 'guest', 'password': 'guest', 'hostname': 'host', 'port': 5672, 'virtual_host': '/'}), ('amqp://:10000', {'userid': 'guest', 'password': 'guest', 'hostname': 'localhost', 'port': 10000, 'virtual_host': '/'}), ('amqp:///vhost', {'userid': 'guest', 'password': 'guest', 'hostname': 'localhost', 'port': 5672, 'virtual_host': 'vhost'}), ('amqp://host/', {'userid': 'guest', 'password': 'guest', 'hostname': 'host', 'port': 5672, 'virtual_host': '/'}), ('amqp://host/%2f', {'userid': 'guest', 'password': 'guest', 'hostname': 'host', 'port': 5672, 'virtual_host': '/'}), ]) def test_rabbitmq_example_urls(self, url, expected): # see Appendix A of http://www.rabbitmq.com/uri-spec.html self.assert_info(Connection(url), **expected) @pytest.mark.parametrize('url,expected', [ ('sqs://user:pass@', {'userid': None, 'password': None, 'hostname': None, 'port': None, 'virtual_host': '/'}), ('sqs://', {'userid': None, 'password': None, 'hostname': None, 'port': None, 'virtual_host': '/'}), ]) def test_sqs_example_urls(self, url, expected, caplog): pytest.importorskip('boto3') self.assert_info(Connection('sqs://'), **expected) assert not caplog.records @pytest.mark.skip('TODO: urllib cannot parse ipv6 urls') def test_url_IPV6(self): self.assert_info( Connection('amqp://[::1]'), userid='guest', password='guest', hostname='[::1]', port=5672, virtual_host='/', ) def test_connection_copy(self): conn = Connection(self.url, alternates=['amqp://host']) clone = deepcopy(conn) assert clone.alt == ['amqp://host'] def test_parse_generated_as_uri_pg(self): pytest.importorskip('sqlalchemy') conn = Connection(self.pg_url) assert conn.as_uri() == self.pg_nopass assert conn.as_uri(include_password=True) == self.pg_url class test_Connection: def setup_method(self): self.conn = Connection(port=5672, transport=Transport) def test_establish_connection(self): conn = self.conn assert not conn.connected conn.connect() assert conn.connected assert conn.connection.connected assert conn.host == 'localhost:5672' channel = conn.channel() assert channel.open assert conn.drain_events() == 'event' _connection = conn.connection conn.close() assert not _connection.connected assert isinstance(conn.transport, Transport) def test_reuse_connection(self): conn = self.conn assert conn.connect() is conn.connection is conn.connect() def test_connect_no_transport_options(self): conn = self.conn conn._ensure_connection = Mock() conn.connect() # ensure_connection must be called to return immediately # and fail with transport exception conn._ensure_connection.assert_called_with( max_retries=1, reraise_as_library_errors=False ) def test_connect_transport_options(self): conn = self.conn conn.transport_options = { 'max_retries': 1, 'interval_start': 2, 'interval_step': 3, 'interval_max': 4, 'ignore_this': True } conn._ensure_connection = Mock() conn.connect() # connect() is ignoring transport options # ensure_connection must be called to return immediately # and fail with transport exception conn._ensure_connection.assert_called_with( max_retries=1, reraise_as_library_errors=False ) def test_multiple_urls(self): conn1 = Connection('amqp://foo;amqp://bar') assert conn1.hostname == 'foo' assert conn1.alt == ['amqp://foo', 'amqp://bar'] conn2 = Connection(['amqp://foo', 'amqp://bar']) assert conn2.hostname == 'foo' assert conn2.alt == ['amqp://foo', 'amqp://bar'] def test_collect(self): connection = Connection('memory://') trans = connection._transport = Mock(name='transport') _collect = trans._collect = Mock(name='transport._collect') _close = connection._close = Mock(name='connection._close') connection.declared_entities = Mock(name='decl_entities') uconn = connection._connection = Mock(name='_connection') connection.collect() _close.assert_not_called() _collect.assert_called_with(uconn) connection.declared_entities.clear.assert_called_with() assert trans.client is None assert connection._transport is None assert connection._connection is None def test_prefer_librabbitmq_over_amqp_when_available(self): with patch('kombu.connection.supports_librabbitmq', return_value=True): connection = Connection('amqp://') assert connection.transport_cls == 'librabbitmq' def test_select_amqp_when_librabbitmq_is_not_available(self): with patch('kombu.connection.supports_librabbitmq', return_value=False): connection = Connection('amqp://') assert connection.transport_cls == 'amqp' def test_collect_no_transport(self): connection = Connection('memory://') connection._transport = None connection._do_close_self = Mock() connection._do_close_transport = Mock() connection.collect() connection._do_close_self.assert_called_with() connection._do_close_transport.assert_called_with() connection._do_close_self.side_effect = socket.timeout() connection.collect() def test_collect_transport_gone(self): connection = Connection('memory://') uconn = connection._connection = Mock(name='conn._conn') trans = connection._transport = Mock(name='transport') collect = trans._collect = Mock(name='transport._collect') def se(conn): connection._transport = None collect.side_effect = se connection.collect() collect.assert_called_with(uconn) assert connection._transport is None def test_uri_passthrough(self): transport = Mock(name='transport') with patch('kombu.connection.get_transport_cls') as gtc: gtc.return_value = transport transport.can_parse_url = True with patch('kombu.connection.parse_url') as parse_url: c = Connection('foo+mysql://some_host') assert c.transport_cls == 'foo' parse_url.assert_not_called() assert c.hostname == 'mysql://some_host' assert c.as_uri().startswith('foo+') with patch('kombu.connection.parse_url') as parse_url: c = Connection('mysql://some_host', transport='foo') assert c.transport_cls == 'foo' parse_url.assert_not_called() assert c.hostname == 'mysql://some_host' c = Connection('pyamqp+sqlite://some_host') assert c.as_uri().startswith('pyamqp+') def test_ensure_connection_on_error(self): c = Connection('amqp://A;amqp://B') with patch('kombu.connection.retry_over_time') as rot: c.ensure_connection() rot.assert_called() args = rot.call_args[0] cb = args[4] intervals = iter([1, 2, 3, 4, 5]) assert cb(KeyError(), intervals, 0) == 0 assert cb(KeyError(), intervals, 1) == 1 assert cb(KeyError(), intervals, 2) == 0 assert cb(KeyError(), intervals, 3) == 2 assert cb(KeyError(), intervals, 4) == 0 assert cb(KeyError(), intervals, 5) == 3 assert cb(KeyError(), intervals, 6) == 0 assert cb(KeyError(), intervals, 7) == 4 errback = Mock() c.ensure_connection(errback=errback) args = rot.call_args[0] cb = args[4] assert cb(KeyError(), intervals, 0) == 0 errback.assert_called() def test_supports_heartbeats(self): c = Connection(transport=Mock) c.transport.implements.heartbeats = False assert not c.supports_heartbeats def test_is_evented(self): c = Connection(transport=Mock) c.transport.implements.asynchronous = False assert not c.is_evented def test_register_with_event_loop(self): transport = Mock(name='transport') transport.connection_errors = [] c = Connection(transport=transport) loop = Mock(name='loop') c.register_with_event_loop(loop) c.transport.register_with_event_loop.assert_called_with( c.connection, loop, ) def test_manager(self): c = Connection(transport=Mock) assert c.manager is c.transport.manager def test_copy(self): c = Connection('amqp://example.com') assert copy(c).info() == c.info() def test_copy_multiples(self): c = Connection('amqp://A.example.com;amqp://B.example.com') assert c.alt d = copy(c) assert d.alt == c.alt def test_switch(self): c = Connection('amqp://foo') c._closed = True c.switch('redis://example.com//3') assert not c._closed assert c.hostname == 'example.com' assert c.transport_cls == 'redis' assert c.virtual_host == '/3' def test_maybe_switch_next(self): c = Connection('amqp://foo;redis://example.com//3') c.maybe_switch_next() assert not c._closed assert c.hostname == 'example.com' assert c.transport_cls == 'redis' assert c.virtual_host == '/3' def test_maybe_switch_next_no_cycle(self): c = Connection('amqp://foo') c.maybe_switch_next() assert not c._closed assert c.hostname == 'foo' assert c.transport_cls, ('librabbitmq', 'pyamqp' in 'amqp') def test_switch_without_uri_identifier(self): c = Connection('amqp://foo') assert c.hostname == 'foo' assert c.transport_cls, ('librabbitmq', 'pyamqp' in 'amqp') c._closed = True c.switch('example.com') assert not c._closed assert c.hostname == 'example.com' assert c.transport_cls, ('librabbitmq', 'pyamqp' in 'amqp') def test_heartbeat_check(self): c = Connection(transport=Transport) c.transport.heartbeat_check = Mock() c.heartbeat_check(3) c.transport.heartbeat_check.assert_called_with(c.connection, rate=3) def test_completes_cycle_no_cycle(self): c = Connection('amqp://') assert c.completes_cycle(0) assert c.completes_cycle(1) def test_completes_cycle(self): c = Connection('amqp://a;amqp://b;amqp://c') assert not c.completes_cycle(0) assert not c.completes_cycle(1) assert c.completes_cycle(2) def test_get_heartbeat_interval(self): self.conn.transport.get_heartbeat_interval = Mock(name='ghi') assert (self.conn.get_heartbeat_interval() is self.conn.transport.get_heartbeat_interval.return_value) self.conn.transport.get_heartbeat_interval.assert_called_with( self.conn.connection) def test_supports_exchange_type(self): self.conn.transport.implements.exchange_type = {'topic'} assert self.conn.supports_exchange_type('topic') assert not self.conn.supports_exchange_type('fanout') def test_qos_semantics_matches_spec(self): qsms = self.conn.transport.qos_semantics_matches_spec = Mock() assert self.conn.qos_semantics_matches_spec is qsms.return_value qsms.assert_called_with(self.conn.connection) def test__enter____exit__(self): with self.conn as context: assert context is self.conn self.conn.connect() assert self.conn.connection.connected assert self.conn.connection is None self.conn.close() # again def test_close_survives_connerror(self): class _CustomError(Exception): pass class MyTransport(Transport): connection_errors = (_CustomError,) def close_connection(self, connection): raise _CustomError('foo') conn = Connection(transport=MyTransport) conn.connect() conn.close() assert conn._closed def test_close_when_default_channel(self): conn = self.conn conn._default_channel = Mock() conn._close() conn._default_channel.close.assert_called_with() def test_auto_reconnect_default_channel(self): # tests GH issue: #1208 # Tests that default_channel automatically reconnects when connection # closed c = Connection('memory://') c._closed = True with patch.object( c, '_connection_factory', side_effect=c._connection_factory ) as cf_mock: c.default_channel cf_mock.assert_called_once_with() def test_close_when_default_channel_close_raises(self): class Conn(Connection): @property def connection_errors(self): return (KeyError,) conn = Conn('memory://') conn._default_channel = Mock() conn._default_channel.close.side_effect = KeyError() conn._close() conn._default_channel.close.assert_called_with() def test_revive_when_default_channel(self): conn = self.conn defchan = conn._default_channel = Mock() conn.revive(Mock()) defchan.close.assert_called_with() assert conn._default_channel is None def test_ensure_connection(self): assert self.conn.ensure_connection() def test_ensure_success(self): def publish(): return 'foobar' ensured = self.conn.ensure(None, publish) assert ensured() == 'foobar' def test_ensure_failure(self): class _CustomError(Exception): pass def publish(): raise _CustomError('bar') ensured = self.conn.ensure(None, publish) with pytest.raises(_CustomError): ensured() def test_ensure_connection_failure(self): class _ConnectionError(Exception): pass def publish(): raise _ConnectionError('failed connection') self.conn.get_transport_cls().connection_errors = (_ConnectionError,) ensured = self.conn.ensure(self.conn, publish) with pytest.raises(OperationalError): ensured() def test_ensure_retry_errors_is_limited_by_max_retries(self): class _MessageNacked(Exception): pass tries = 0 def publish(): nonlocal tries tries += 1 if tries <= 3: raise _MessageNacked('NACK') # On the 4th try, we let it pass return 'ACK' ensured = self.conn.ensure( self.conn, publish, max_retries=3, # 3 retries + 1 initial try = 4 tries retry_errors=(_MessageNacked,) ) assert ensured() == 'ACK' def test_autoretry(self): myfun = Mock() self.conn.get_transport_cls().connection_errors = (KeyError,) def on_call(*args, **kwargs): myfun.side_effect = None raise KeyError('foo') myfun.side_effect = on_call insured = self.conn.autoretry(myfun) insured() myfun.assert_called() def test_SimpleQueue(self): conn = self.conn q = conn.SimpleQueue('foo') assert q.channel is conn.default_channel chan = conn.channel() q2 = conn.SimpleQueue('foo', channel=chan) assert q2.channel is chan def test_SimpleBuffer(self): conn = self.conn q = conn.SimpleBuffer('foo') assert q.channel is conn.default_channel chan = conn.channel() q2 = conn.SimpleBuffer('foo', channel=chan) assert q2.channel is chan def test_SimpleQueue_with_parameters(self): conn = self.conn q = conn.SimpleQueue( 'foo', True, {'durable': True}, {'x-queue-mode': 'lazy'}, {'durable': True, 'type': 'fanout', 'delivery_mode': 'persistent'}) assert q.queue.exchange.type == 'fanout' assert q.queue.exchange.durable assert not q.queue.exchange.auto_delete delivery_mode_code = q.queue.exchange.PERSISTENT_DELIVERY_MODE assert q.queue.exchange.delivery_mode == delivery_mode_code assert q.queue.queue_arguments['x-queue-mode'] == 'lazy' assert q.queue.durable assert not q.queue.auto_delete def test_SimpleBuffer_with_parameters(self): conn = self.conn q = conn.SimpleBuffer( 'foo', True, {'durable': True}, {'x-queue-mode': 'lazy'}, {'durable': True, 'type': 'fanout', 'delivery_mode': 'persistent'}) assert q.queue.exchange.type == 'fanout' assert q.queue.exchange.durable assert q.queue.exchange.auto_delete delivery_mode_code = q.queue.exchange.PERSISTENT_DELIVERY_MODE assert q.queue.exchange.delivery_mode == delivery_mode_code assert q.queue.queue_arguments['x-queue-mode'] == 'lazy' assert q.queue.durable assert q.queue.auto_delete def test_Producer(self): conn = self.conn assert isinstance(conn.Producer(), Producer) assert isinstance(conn.Producer(conn.default_channel), Producer) def test_Consumer(self): conn = self.conn assert isinstance(conn.Consumer(queues=[]), Consumer) assert isinstance( conn.Consumer(queues=[], channel=conn.default_channel), Consumer) def test__repr__(self): assert repr(self.conn) def test__reduce__(self): x = pickle.loads(pickle.dumps(self.conn)) assert x.info() == self.conn.info() def test_channel_errors(self): class MyTransport(Transport): channel_errors = (KeyError, ValueError) conn = Connection(transport=MyTransport) assert conn.channel_errors == (KeyError, ValueError) def test_channel_errors__exception_no_cache(self): """Ensure the channel_errors can be retrieved without an initialized transport. """ class MyTransport(Transport): channel_errors = (KeyError,) conn = Connection(transport=MyTransport) MyTransport.__init__ = Mock(side_effect=Exception) assert conn.channel_errors == (KeyError,) def test_connection_errors(self): class MyTransport(Transport): connection_errors = (KeyError, ValueError) conn = Connection(transport=MyTransport) assert conn.connection_errors == (KeyError, ValueError) def test_connection_errors__exception_no_cache(self): """Ensure the connection_errors can be retrieved without an initialized transport. """ class MyTransport(Transport): connection_errors = (KeyError,) conn = Connection(transport=MyTransport) MyTransport.__init__ = Mock(side_effect=Exception) assert conn.connection_errors == (KeyError,) def test_recoverable_connection_errors(self): class MyTransport(Transport): recoverable_connection_errors = (KeyError, ValueError) conn = Connection(transport=MyTransport) assert conn.recoverable_connection_errors == (KeyError, ValueError) def test_recoverable_connection_errors__fallback(self): """Ensure missing recoverable_connection_errors on the Transport does not cause a fatal error. """ class MyTransport(Transport): connection_errors = (KeyError,) channel_errors = (ValueError,) conn = Connection(transport=MyTransport) assert conn.recoverable_connection_errors == (KeyError, ValueError) def test_recoverable_connection_errors__exception_no_cache(self): """Ensure the recoverable_connection_errors can be retrieved without an initialized transport. """ class MyTransport(Transport): recoverable_connection_errors = (KeyError,) conn = Connection(transport=MyTransport) MyTransport.__init__ = Mock(side_effect=Exception) assert conn.recoverable_connection_errors == (KeyError,) def test_recoverable_channel_errors(self): class MyTransport(Transport): recoverable_channel_errors = (KeyError, ValueError) conn = Connection(transport=MyTransport) assert conn.recoverable_channel_errors == (KeyError, ValueError) def test_recoverable_channel_errors__fallback(self): """Ensure missing recoverable_channel_errors on the Transport does not cause a fatal error. """ class MyTransport(Transport): pass conn = Connection(transport=MyTransport) assert conn.recoverable_channel_errors == () def test_recoverable_channel_errors__exception_no_cache(self): """Ensure the recoverable_channel_errors can be retrieved without an initialized transport. """ class MyTransport(Transport): recoverable_channel_errors = (KeyError,) conn = Connection(transport=MyTransport) MyTransport.__init__ = Mock(side_effect=Exception) assert conn.recoverable_channel_errors == (KeyError,) def test_multiple_urls_hostname(self): conn = Connection(['example.com;amqp://example.com']) assert conn.as_uri() == 'amqp://guest:**@example.com:5672//' conn = Connection(['example.com', 'amqp://example.com']) assert conn.as_uri() == 'amqp://guest:**@example.com:5672//' conn = Connection('example.com;example.com;') assert conn.as_uri() == 'amqp://guest:**@example.com:5672//' def test_connection_respect_its_timeout(self): invalid_port = 1222 with Connection( f'amqp://guest:guest@localhost:{invalid_port}//', transport_options={'max_retries': 2}, connect_timeout=1 ) as conn: with pytest.raises(OperationalError): conn.default_channel def test_connection_failover_without_total_timeout(self): with Connection( ['server1', 'server2'], transport=TimeoutingTransport, connect_timeout=1, transport_options={'interval_start': 0, 'interval_step': 0}, ) as conn: conn._establish_connection = Mock( side_effect=conn._establish_connection ) with pytest.raises(OperationalError): conn.default_channel # Never retried, because `retry_over_time` `timeout` is equal # to `connect_timeout` conn._establish_connection.assert_called_once() def test_connection_failover_with_total_timeout(self): with Connection( ['server1', 'server2'], transport=TimeoutingTransport, connect_timeout=1, transport_options={'connect_retries_timeout': 2, 'interval_start': 0, 'interval_step': 0}, ) as conn: conn._establish_connection = Mock( side_effect=conn._establish_connection ) with pytest.raises(OperationalError): conn.default_channel assert conn._establish_connection.call_count == 2 def test_connection_timeout_with_errback(self): errback = Mock() with Connection( ['server1', 'server2'], transport=TimeoutingTransport, connect_timeout=1, transport_options={ 'connect_retries_timeout': 2, 'interval_start': 0, 'interval_step': 0, 'errback': errback }, ) as conn: with pytest.raises(OperationalError): conn.default_channel errback.assert_called() def test_connection_timeout_with_callback(self): callback = Mock() with Connection( ['server1', 'server2'], transport=TimeoutingTransport, connect_timeout=1, transport_options={ 'connect_retries_timeout': 2, 'interval_start': 0, 'interval_step': 0, 'callback': callback }, ) as conn: with pytest.raises(OperationalError): conn.default_channel callback.assert_called() class test_Connection_with_transport_options: transport_options = {'pool_recycler': 3600, 'echo': True} def setup_method(self): self.conn = Connection(port=5672, transport=Transport, transport_options=self.transport_options) def test_establish_connection(self): conn = self.conn assert conn.transport_options == self.transport_options class xResource(Resource): def setup(self): pass class ResourceCase: def create_resource(self, limit): raise NotImplementedError('subclass responsibility') def assert_state(self, P, avail, dirty): assert P._resource.qsize() == avail assert len(P._dirty) == dirty def test_setup(self): with pytest.raises(NotImplementedError): Resource() def test_acquire__release(self): P = self.create_resource(10) self.assert_state(P, 10, 0) chans = [P.acquire() for _ in range(10)] self.assert_state(P, 0, 10) with pytest.raises(P.LimitExceeded): P.acquire() chans.pop().release() self.assert_state(P, 1, 9) [chan.release() for chan in chans] self.assert_state(P, 10, 0) def test_acquire_prepare_raises(self): P = self.create_resource(10) assert len(P._resource.queue) == 10 P.prepare = Mock() P.prepare.side_effect = IOError() with pytest.raises(IOError): P.acquire(block=True) assert len(P._resource.queue) == 10 def test_acquire_no_limit(self): P = self.create_resource(None) P.acquire().release() def test_acquire_resize_in_use(self): P = self.create_resource(5) self.assert_state(P, 5, 0) chans = [P.acquire() for _ in range(5)] self.assert_state(P, 0, 5) with pytest.raises(RuntimeError): P.resize(4) [chan.release() for chan in chans] self.assert_state(P, 5, 0) def test_acquire_resize_ignore_err_no_shrink(self): P = self.create_resource(5) self.assert_state(P, 5, 0) chans = [P.acquire() for _ in range(5)] self.assert_state(P, 0, 5) P.resize(4, ignore_errors=True) self.assert_state(P, 0, 5) [chan.release() for chan in chans] self.assert_state(P, 5, 0) def test_acquire_resize_ignore_err_shrink(self): P = self.create_resource(5) self.assert_state(P, 5, 0) chans = [P.acquire() for _ in range(4)] self.assert_state(P, 1, 4) P.resize(4, ignore_errors=True) self.assert_state(P, 0, 4) [chan.release() for chan in chans] self.assert_state(P, 4, 0) def test_acquire_resize_larger(self): P = self.create_resource(1) self.assert_state(P, 1, 0) c1 = P.acquire() self.assert_state(P, 0, 1) with pytest.raises(P.LimitExceeded): P.acquire() P.resize(2) self.assert_state(P, 1, 1) c2 = P.acquire() self.assert_state(P, 0, 2) c1.release() c2.release() self.assert_state(P, 2, 0) def test_acquire_resize_force_smaller(self): P = self.create_resource(2) self.assert_state(P, 2, 0) c1 = P.acquire() c2 = P.acquire() self.assert_state(P, 0, 2) with pytest.raises(P.LimitExceeded): P.acquire() P.resize(1, force=True) # acts like reset del c1 del c2 self.assert_state(P, 1, 0) c1 = P.acquire() self.assert_state(P, 0, 1) with pytest.raises(P.LimitExceeded): P.acquire() c1.release() self.assert_state(P, 1, 0) def test_acquire_resize_reset(self): P = self.create_resource(2) self.assert_state(P, 2, 0) c1 = P.acquire() c2 = P.acquire() self.assert_state(P, 0, 2) with pytest.raises(P.LimitExceeded): P.acquire() P.resize(3, reset=True) del c1 del c2 self.assert_state(P, 3, 0) c1 = P.acquire() c2 = P.acquire() c3 = P.acquire() self.assert_state(P, 0, 3) with pytest.raises(P.LimitExceeded): P.acquire() c1.release() c2.release() c3.release() self.assert_state(P, 3, 0) def test_replace_when_limit(self): P = self.create_resource(10) r = P.acquire() P._dirty = Mock() P.close_resource = Mock() P.replace(r) P._dirty.discard.assert_called_with(r) P.close_resource.assert_called_with(r) def test_replace_no_limit(self): P = self.create_resource(None) r = P.acquire() P._dirty = Mock() P.close_resource = Mock() P.replace(r) P._dirty.discard.assert_not_called() P.close_resource.assert_called_with(r) def test_interface_prepare(self): x = xResource() assert x.prepare(10) == 10 def test_force_close_all_handles_AttributeError(self): P = self.create_resource(10) cr = P.collect_resource = Mock() cr.side_effect = AttributeError('x') P.acquire() assert P._dirty P.force_close_all() def test_force_close_all_no_mutex(self): P = self.create_resource(10) P.close_resource = Mock() m = P._resource = Mock() m.mutex = None m.queue.pop.side_effect = IndexError P.force_close_all() def test_add_when_empty(self): P = self.create_resource(None) P._resource.queue.clear() assert not P._resource.queue P._add_when_empty() assert P._resource.queue class test_ConnectionPool(ResourceCase): def create_resource(self, limit): return Connection(port=5672, transport=Transport).Pool(limit) def test_collect_resource__does_not_collect_lazy_resource(self): P = self.create_resource(10) res = lazy(object()) res.collect = Mock(name='collect') P.collect_resource(res) res.collect.assert_not_called() def test_collect_resource(self): res = Mock(name='res') P = self.create_resource(10) P.collect_resource(res, socket_timeout=10.3) res.collect.assert_called_with(10.3) def test_setup(self): P = self.create_resource(10) q = P._resource.queue assert q[0]()._connection is None assert q[1]()._connection is None assert q[2]()._connection is None def test_acquire_raises_evaluated(self): P = self.create_resource(1) # evaluate the connection first r = P.acquire() r.release() P.prepare = Mock() P.prepare.side_effect = MemoryError() P.release = Mock() with pytest.raises(MemoryError): with P.acquire(): assert False P.release.assert_called_with(r) def test_release_no__debug(self): P = self.create_resource(10) R = Mock() R._debug.side_effect = AttributeError() P.release_resource(R) def test_setup_no_limit(self): P = self.create_resource(None) assert not P._resource.queue assert P.limit is None def test_prepare_not_callable(self): P = self.create_resource(None) conn = Connection('memory://') assert P.prepare(conn) is conn def test_acquire_channel(self): P = self.create_resource(10) with P.acquire_channel() as (conn, channel): assert channel is conn.default_channel def test_exception_during_connection_use(self): """Tests that connections retrieved from a pool are replaced. In case of an exception during usage of an exception, it is required that the connection is 'replaced' (effectively closing the connection) before releasing it back into the pool. This ensures that reconnecting to the broker is required before the next usage. """ P = self.create_resource(1) # Raising an exception during a network call should cause the cause the # connection to be replaced. with pytest.raises(IOError): with P.acquire() as connection: connection.connect() connection.heartbeat_check = Mock() connection.heartbeat_check.side_effect = IOError() _ = connection.heartbeat_check() # Acquiring the same connection from the pool yields a disconnected Connection # object. with P.acquire() as connection: assert not connection.connected # acquire_channel automatically reconnects with pytest.raises(IOError): with P.acquire_channel() as (connection, _): # The Connection object should still be connected assert connection.connected connection.heartbeat_check = Mock() connection.heartbeat_check.side_effect = IOError() _ = connection.heartbeat_check() with P.acquire() as connection: # The connection should be closed assert not connection.connected class test_ChannelPool(ResourceCase): def create_resource(self, limit): return Connection(port=5672, transport=Transport).ChannelPool(limit) def test_setup(self): P = self.create_resource(10) q = P._resource.queue with pytest.raises(AttributeError): q[0].basic_consume def test_setup_no_limit(self): P = self.create_resource(None) assert not P._resource.queue assert P.limit is None def test_prepare_not_callable(self): P = self.create_resource(10) conn = Connection('memory://') chan = conn.default_channel assert P.prepare(chan) is chan kombu-5.5.3/t/unit/test_entity.py000066400000000000000000000336101477772317200170340ustar00rootroot00000000000000from __future__ import annotations import pickle from unittest.mock import Mock, call import pytest from kombu import Connection, Exchange, Producer, Queue, binding from kombu.abstract import MaybeChannelBound from kombu.exceptions import NotBoundError from kombu.serialization import registry from t.mocks import Transport def get_conn() -> Connection: return Connection(transport=Transport) class test_binding: def test_constructor(self) -> None: x = binding( Exchange('foo'), 'rkey', arguments={'barg': 'bval'}, unbind_arguments={'uarg': 'uval'}, ) assert x.exchange == Exchange('foo') assert x.routing_key == 'rkey' assert x.arguments == {'barg': 'bval'} assert x.unbind_arguments == {'uarg': 'uval'} def test_declare(self) -> None: chan = get_conn().channel() x = binding(Exchange('foo'), 'rkey') x.declare(chan) assert 'exchange_declare' in chan def test_declare_no_exchange(self) -> None: chan = get_conn().channel() x = binding() x.declare(chan) assert 'exchange_declare' not in chan def test_bind(self) -> None: chan = get_conn().channel() x = binding(Exchange('foo')) x.bind(Exchange('bar')(chan)) assert 'exchange_bind' in chan def test_unbind(self) -> None: chan = get_conn().channel() x = binding(Exchange('foo')) x.unbind(Exchange('bar')(chan)) assert 'exchange_unbind' in chan def test_repr(self) -> None: b = binding(Exchange('foo'), 'rkey') assert 'foo' in repr(b) assert 'rkey' in repr(b) class test_Exchange: def test_bound(self) -> None: exchange = Exchange('foo', 'direct') assert not exchange.is_bound assert ' None: assert hash(Exchange('a')) == hash(Exchange('a')) assert hash(Exchange('a')) != hash(Exchange('b')) def test_can_cache_declaration(self) -> None: assert Exchange('a', durable=True).can_cache_declaration assert Exchange('a', durable=False).can_cache_declaration assert not Exchange('a', auto_delete=True).can_cache_declaration assert not Exchange( 'a', durable=True, auto_delete=True, ).can_cache_declaration def test_pickle(self) -> None: e1 = Exchange('foo', 'direct') e2 = pickle.loads(pickle.dumps(e1)) assert e1 == e2 def test_eq(self) -> None: e1 = Exchange('foo', 'direct') e2 = Exchange('foo', 'direct') assert e1 == e2 e3 = Exchange('foo', 'topic') assert e1 != e3 assert e1.__eq__(True) == NotImplemented def test_revive(self) -> None: exchange = Exchange('foo', 'direct') conn = get_conn() chan = conn.channel() # reviving unbound channel is a noop. exchange.revive(chan) assert not exchange.is_bound assert exchange._channel is None bound = exchange.bind(chan) assert bound.is_bound assert bound.channel is chan chan2 = conn.channel() bound.revive(chan2) assert bound.is_bound assert bound._channel is chan2 def test_assert_is_bound(self) -> None: exchange = Exchange('foo', 'direct') with pytest.raises(NotBoundError): exchange.declare() conn = get_conn() chan = conn.channel() exchange.bind(chan).declare() assert 'exchange_declare' in chan def test_set_transient_delivery_mode(self) -> None: exc = Exchange('foo', 'direct', delivery_mode='transient') assert exc.delivery_mode == Exchange.TRANSIENT_DELIVERY_MODE def test_set_passive_mode(self) -> None: exc = Exchange('foo', 'direct', passive=True) assert exc.passive def test_set_persistent_delivery_mode(self) -> None: exc = Exchange('foo', 'direct', delivery_mode='persistent') assert exc.delivery_mode == Exchange.PERSISTENT_DELIVERY_MODE def test_bind_at_instantiation(self) -> None: assert Exchange('foo', channel=get_conn().channel()).is_bound def test_create_message(self) -> None: chan = get_conn().channel() Exchange('foo', channel=chan).Message({'foo': 'bar'}) assert 'prepare_message' in chan def test_publish(self) -> None: chan = get_conn().channel() Exchange('foo', channel=chan).publish('the quick brown fox') assert 'basic_publish' in chan def test_delete(self) -> None: chan = get_conn().channel() Exchange('foo', channel=chan).delete() assert 'exchange_delete' in chan def test__repr__(self) -> None: b = Exchange('foo', 'topic') assert 'foo(topic)' in repr(b) assert 'Exchange' in repr(b) def test_bind_to(self) -> None: chan = get_conn().channel() foo = Exchange('foo', 'topic') bar = Exchange('bar', 'topic') foo(chan).bind_to(bar) assert 'exchange_bind' in chan def test_bind_to_by_name(self) -> None: chan = get_conn().channel() foo = Exchange('foo', 'topic') foo(chan).bind_to('bar') assert 'exchange_bind' in chan def test_unbind_from(self) -> None: chan = get_conn().channel() foo = Exchange('foo', 'topic') bar = Exchange('bar', 'topic') foo(chan).unbind_from(bar) assert 'exchange_unbind' in chan def test_unbind_from_by_name(self) -> None: chan = get_conn().channel() foo = Exchange('foo', 'topic') foo(chan).unbind_from('bar') assert 'exchange_unbind' in chan def test_declare__no_declare(self) -> None: chan = get_conn().channel() foo = Exchange('foo', 'topic', no_declare=True) foo(chan).declare() assert 'exchange_declare' not in chan def test_declare__internal_exchange(self) -> None: chan = get_conn().channel() foo = Exchange('amq.rabbitmq.trace', 'topic') foo(chan).declare() assert 'exchange_declare' not in chan def test_declare(self) -> None: chan = get_conn().channel() foo = Exchange('foo', 'topic', no_declare=False) foo(chan).declare() assert 'exchange_declare' in chan class test_Queue: def setup_method(self) -> None: self.exchange = Exchange('foo', 'direct') def test_constructor_with_actual_exchange(self) -> None: exchange = Exchange('exchange_name', 'direct') queue = Queue(name='queue_name', exchange=exchange) assert queue.exchange == exchange def test_constructor_with_string_exchange(self) -> None: exchange_name = 'exchange_name' queue = Queue(name='queue_name', exchange=exchange_name) assert queue.exchange == Exchange(exchange_name) def test_constructor_with_default_exchange(self) -> None: queue = Queue(name='queue_name') assert queue.exchange == Exchange('') def test_hash(self) -> None: assert hash(Queue('a')) == hash(Queue('a')) assert hash(Queue('a')) != hash(Queue('b')) def test_repr_with_bindings(self) -> None: ex = Exchange('foo') x = Queue('foo', bindings=[ex.binding('A'), ex.binding('B')]) assert repr(x) def test_anonymous(self) -> None: chan = Mock() x = Queue(bindings=[binding(Exchange('foo'), 'rkey')]) chan.queue_declare.return_value = 'generated', 0, 0 xx = x(chan) xx.declare() assert xx.name == 'generated' def test_basic_get__accept_disallowed(self) -> None: conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = Producer(conn) p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) message = q(conn).get(no_ack=True) assert message is not None with pytest.raises(q.ContentDisallowed): message.decode() def test_basic_get__accept_allowed(self) -> None: conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = Producer(conn) p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) message = q(conn).get(accept=['pickle'], no_ack=True) assert message is not None payload = message.decode() assert payload['complex'] def test_when_bound_but_no_exchange(self) -> None: q = Queue('a') q.exchange = None assert q.when_bound() is None def test_declare_but_no_exchange(self) -> None: q = Queue('a') q.queue_declare = Mock() q.queue_bind = Mock() q.exchange = None q.declare() q.queue_declare.assert_called_with( channel=None, nowait=False, passive=False) def test_declare__no_declare(self) -> None: q = Queue('a', no_declare=True) q.queue_declare = Mock() q.queue_bind = Mock() q.exchange = None q.declare() q.queue_declare.assert_not_called() q.queue_bind.assert_not_called() def test_bind_to_when_name(self) -> None: chan = Mock() q = Queue('a') q(chan).bind_to('ex') chan.queue_bind.assert_called() def test_get_when_no_m2p(self) -> None: chan = Mock() q = Queue('a')(chan) chan.message_to_python = None assert q.get() def test_multiple_bindings(self) -> None: chan = Mock() q = Queue('mul', [ binding(Exchange('mul1'), 'rkey1'), binding(Exchange('mul2'), 'rkey2'), binding(Exchange('mul3'), 'rkey3'), ]) q(chan).declare() assert call( nowait=False, exchange='mul1', auto_delete=False, passive=False, arguments=None, type='direct', durable=True, ) in chan.exchange_declare.call_args_list def test_can_cache_declaration(self) -> None: assert Queue('a', durable=True).can_cache_declaration assert Queue('a', durable=False).can_cache_declaration assert not Queue( 'a', queue_arguments={'x-expires': 100} ).can_cache_declaration def test_eq(self) -> None: q1 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx') q2 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx') assert q1 == q2 assert q1.__eq__(True) == NotImplemented q3 = Queue('yyy', Exchange('xxx', 'direct'), 'xxx') assert q1 != q3 def test_exclusive_implies_auto_delete(self) -> None: assert Queue('foo', self.exchange, exclusive=True).auto_delete def test_binds_at_instantiation(self) -> None: assert Queue('foo', self.exchange, channel=get_conn().channel()).is_bound def test_also_binds_exchange(self) -> None: chan = get_conn().channel() b = Queue('foo', self.exchange) assert not b.is_bound assert not b.exchange.is_bound b = b.bind(chan) assert b.is_bound assert b.exchange.is_bound assert b.channel is b.exchange.channel assert b.exchange is not self.exchange def test_declare(self) -> None: chan = get_conn().channel() b = Queue('foo', self.exchange, 'foo', channel=chan) assert b.is_bound b.declare() assert 'exchange_declare' in chan assert 'queue_declare' in chan assert 'queue_bind' in chan def test_get(self) -> None: b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel()) b.get() assert 'basic_get' in b.channel def test_purge(self) -> None: b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel()) b.purge() assert 'queue_purge' in b.channel def test_consume(self) -> None: b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel()) b.consume('fifafo', None) assert 'basic_consume' in b.channel def test_consume_with_callbacks(self) -> None: chan = Mock() b = Queue('foo', self.exchange, 'foo', channel=chan) callback = Mock() on_cancel = Mock() b.consume('fifafo', callback=callback, on_cancel=on_cancel) chan.basic_consume.assert_called_with( queue='foo', no_ack=False, consumer_tag='fifafo', callback=callback, nowait=False, arguments=None, on_cancel=on_cancel ) def test_cancel(self) -> None: b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel()) b.cancel('fifafo') assert 'basic_cancel' in b.channel def test_delete(self) -> None: b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel()) b.delete() assert 'queue_delete' in b.channel def test_queue_unbind(self) -> None: b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel()) b.queue_unbind() assert 'queue_unbind' in b.channel def test_as_dict(self) -> None: q = Queue('foo', self.exchange, 'rk') d = q.as_dict(recurse=True) assert d['exchange']['name'] == self.exchange.name def test_queue_dump(self) -> None: b = binding(self.exchange, 'rk') q = Queue('foo', self.exchange, 'rk', bindings=[b]) d = q.as_dict(recurse=True) assert d['bindings'][0]['routing_key'] == 'rk' registry.dumps(d) def test__repr__(self) -> None: b = Queue('foo', self.exchange, 'foo') assert 'foo' in repr(b) assert 'Queue' in repr(b) class test_MaybeChannelBound: def test_repr(self) -> None: assert repr(MaybeChannelBound()) kombu-5.5.3/t/unit/test_exceptions.py000066400000000000000000000003451477772317200177000ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock from kombu.exceptions import HttpError class test_HttpError: def test_str(self) -> None: assert str(HttpError(200, 'msg', Mock(name='response'))) kombu-5.5.3/t/unit/test_log.py000066400000000000000000000113031477772317200162740ustar00rootroot00000000000000from __future__ import annotations import logging import sys from unittest.mock import ANY, Mock, patch from kombu.log import (Log, LogMixin, get_logger, get_loglevel, safeify_format, setup_logging) class test_get_logger: def test_when_string(self): logger = get_logger('foo') assert logger is logging.getLogger('foo') h1 = logger.handlers[0] assert isinstance(h1, logging.NullHandler) def test_when_logger(self): logger = get_logger(logging.getLogger('foo')) h1 = logger.handlers[0] assert isinstance(h1, logging.NullHandler) def test_with_custom_handler(self): logger = logging.getLogger('bar') handler = logging.NullHandler() logger.addHandler(handler) logger = get_logger('bar') assert logger.handlers[0] is handler def test_get_loglevel(self): assert get_loglevel('DEBUG') == logging.DEBUG assert get_loglevel('ERROR') == logging.ERROR assert get_loglevel(logging.INFO) == logging.INFO def test_safe_format(): fmt = 'The %r jumped %x over the %s' args = ['frog', 'foo', 'elephant'] res = list(safeify_format(fmt, args)) assert [x.strip('u') for x in res] == ["'frog'", 'foo', 'elephant'] class test_LogMixin: def setup_method(self): self.log = Log('Log', Mock()) self.logger = self.log.logger def test_debug(self): self.log.debug('debug') self.logger.log.assert_called_with(logging.DEBUG, 'Log - debug') def test_info(self): self.log.info('info') self.logger.log.assert_called_with(logging.INFO, 'Log - info') def test_warning(self): self.log.warn('warning') self.logger.log.assert_called_with(logging.WARN, 'Log - warning') def test_error(self): self.log.error('error', exc_info='exc') self.logger.log.assert_called_with( logging.ERROR, 'Log - error', exc_info='exc', ) def test_critical(self): self.log.critical('crit', exc_info='exc') self.logger.log.assert_called_with( logging.CRITICAL, 'Log - crit', exc_info='exc', ) def test_error_when_DISABLE_TRACEBACKS(self): from kombu import log log.DISABLE_TRACEBACKS = True try: self.log.error('error') self.logger.log.assert_called_with(logging.ERROR, 'Log - error') finally: log.DISABLE_TRACEBACKS = False def test_get_loglevel(self): assert self.log.get_loglevel('DEBUG') == logging.DEBUG assert self.log.get_loglevel('ERROR') == logging.ERROR assert self.log.get_loglevel(logging.INFO) == logging.INFO def test_is_enabled_for(self): self.logger.isEnabledFor.return_value = True assert self.log.is_enabled_for('DEBUG') self.logger.isEnabledFor.assert_called_with(logging.DEBUG) def test_LogMixin_get_logger(self): assert LogMixin().get_logger() is logging.getLogger('LogMixin') def test_Log_get_logger(self): assert Log('test_Log').get_logger() is logging.getLogger('test_Log') def test_log_when_not_enabled(self): self.logger.isEnabledFor.return_value = False self.log.debug('debug') self.logger.log.assert_not_called() def test_log_with_format(self): self.log.debug('Host %r removed', 'example.com') self.logger.log.assert_called_with( logging.DEBUG, 'Log - Host %s removed', ANY, ) assert self.logger.log.call_args[0][2].strip('u') == "'example.com'" class test_setup_logging: @patch('logging.getLogger') def test_set_up_default_values(self, getLogger): logger = logging.getLogger.return_value = Mock() logger.handlers = [] setup_logging() logger.setLevel.assert_called_with(logging.ERROR) logger.addHandler.assert_called() ah_args, _ = logger.addHandler.call_args handler = ah_args[0] assert isinstance(handler, logging.StreamHandler) assert handler.stream is sys.__stderr__ @patch('logging.getLogger') @patch('kombu.log.WatchedFileHandler') def test_setup_custom_values(self, getLogger, WatchedFileHandler): logger = logging.getLogger.return_value = Mock() logger.handlers = [] setup_logging(loglevel=logging.DEBUG, logfile='/var/logfile') logger.setLevel.assert_called_with(logging.DEBUG) logger.addHandler.assert_called() WatchedFileHandler.assert_called() @patch('logging.getLogger') def test_logger_already_setup(self, getLogger): logger = logging.getLogger.return_value = Mock() logger.handlers = [Mock()] setup_logging() logger.setLevel.assert_not_called() kombu-5.5.3/t/unit/test_matcher.py000066400000000000000000000021551477772317200171430ustar00rootroot00000000000000from __future__ import annotations import pytest from kombu.matcher import (MatcherNotInstalled, fnmatch, match, register, registry, rematch, unregister) class test_Matcher: def test_register_match_unregister_matcher(self): register("test_matcher", rematch) registry.matcher_pattern_first.append("test_matcher") assert registry._matchers["test_matcher"] == rematch assert match("data", r"d.*", "test_matcher") is not None assert registry._default_matcher == fnmatch registry._set_default_matcher("test_matcher") assert registry._default_matcher == rematch unregister("test_matcher") assert "test_matcher" not in registry._matchers registry._set_default_matcher("glob") assert registry._default_matcher == fnmatch def test_unregister_matcher_not_registered(self): with pytest.raises(MatcherNotInstalled): unregister('notinstalled') def test_match_using_unregistered_matcher(self): with pytest.raises(MatcherNotInstalled): match("data", r"d.*", "notinstalled") kombu-5.5.3/t/unit/test_message.py000066400000000000000000000024051477772317200171420ustar00rootroot00000000000000from __future__ import annotations import sys from unittest.mock import Mock, patch import pytest from kombu.message import Message class test_Message: def test_repr(self): assert repr(Message('b', channel=Mock())) def test_decode(self): m = Message('body', channel=Mock()) decode = m._decode = Mock() assert m._decoded_cache is None assert m.decode() is m._decode.return_value assert m._decoded_cache is m._decode.return_value m._decode.assert_called_with() m._decode = Mock() assert m.decode() is decode.return_value def test_reraise_error(self): m = Message('body', channel=Mock()) callback = Mock(name='callback') try: raise KeyError('foo') except KeyError: m.errors.append(sys.exc_info()) m._reraise_error(callback) callback.assert_called() with pytest.raises(KeyError): m._reraise_error(None) @patch('kombu.message.decompress') def test_decompression_stores_error(self, decompress): decompress.side_effect = RuntimeError() m = Message('body', channel=Mock(), headers={'compression': 'zlib'}) with pytest.raises(RuntimeError): m._reraise_error(None) kombu-5.5.3/t/unit/test_messaging.py000066400000000000000000000647751477772317200175150ustar00rootroot00000000000000from __future__ import annotations import pickle import sys from collections import defaultdict from unittest.mock import ANY, Mock, patch import pytest from kombu import Connection, Consumer, Exchange, Producer, Queue from kombu.exceptions import MessageStateError, OperationalError from kombu.utils import json from kombu.utils.functional import ChannelPromise from t.mocks import Transport class test_Producer: def setup_method(self): self.exchange = Exchange('foo', 'direct') self.connection = Connection(transport=Transport) self.connection.connect() assert self.connection.connection.connected assert not self.exchange.is_bound def test_repr(self): p = Producer(self.connection) assert repr(p) def test_pickle(self): chan = Mock() producer = Producer(chan, serializer='pickle') p2 = pickle.loads(pickle.dumps(producer)) assert p2.serializer == producer.serializer def test_no_channel(self): p = Producer(None) assert not p._channel @patch('kombu.messaging.maybe_declare') def test_maybe_declare(self, maybe_declare): p = self.connection.Producer() q = Queue('foo') p.maybe_declare(q) maybe_declare.assert_called_with(q, p.channel, False) @patch('kombu.common.maybe_declare') def test_maybe_declare_when_entity_false(self, maybe_declare): p = self.connection.Producer() p.maybe_declare(None) maybe_declare.assert_not_called() def test_auto_declare(self): channel = self.connection.channel() p = Producer(channel, self.exchange, auto_declare=True) # creates Exchange clone at bind assert p.exchange is not self.exchange assert p.exchange.is_bound # auto_declare declares exchange' assert 'exchange_declare' not in channel p.publish('foo') assert 'exchange_declare' in channel def test_manual_declare(self): channel = self.connection.channel() p = Producer(channel, self.exchange, auto_declare=False) assert p.exchange.is_bound # auto_declare=False does not declare exchange assert 'exchange_declare' not in channel # p.declare() declares exchange') p.declare() assert 'exchange_declare' in channel def test_prepare(self): message = {'the quick brown fox': 'jumps over the lazy dog'} channel = self.connection.channel() p = Producer(channel, self.exchange, serializer='json') m, ctype, cencoding = p._prepare(message, headers={}) assert json.loads(m) == message assert ctype == 'application/json' assert cencoding == 'utf-8' def test_prepare_compression(self): message = {'the quick brown fox': 'jumps over the lazy dog'} channel = self.connection.channel() p = Producer(channel, self.exchange, serializer='json') headers = {} m, ctype, cencoding = p._prepare(message, compression='zlib', headers=headers) assert ctype == 'application/json' assert cencoding == 'utf-8' assert headers['compression'] == 'application/x-gzip' import zlib assert json.loads(zlib.decompress(m).decode('utf-8')) == message def test_prepare_custom_content_type(self): message = b'the quick brown fox' channel = self.connection.channel() p = Producer(channel, self.exchange, serializer='json') m, ctype, cencoding = p._prepare(message, content_type='custom') assert m == message assert ctype == 'custom' assert cencoding == 'binary' m, ctype, cencoding = p._prepare(message, content_type='custom', content_encoding='alien') assert m == message assert ctype == 'custom' assert cencoding == 'alien' def test_prepare_is_already_unicode(self): message = 'the quick brown fox' channel = self.connection.channel() p = Producer(channel, self.exchange, serializer='json') m, ctype, cencoding = p._prepare(message, content_type='text/plain') assert m == message.encode('utf-8') assert ctype == 'text/plain' assert cencoding == 'utf-8' m, ctype, cencoding = p._prepare(message, content_type='text/plain', content_encoding='utf-8') assert m == message.encode('utf-8') assert ctype == 'text/plain' assert cencoding == 'utf-8' def test_publish_retry_policy(self): p = self.connection.Producer() p.channel = Mock() p.channel.connection.client.declared_entities = set() expected_retry_policy = { 'max_retries': 20 } p.publish('hello', retry=True, retry_policy=expected_retry_policy) assert self.connection.transport_options == expected_retry_policy def test_publish_with_Exchange_instance(self): p = self.connection.Producer() p.channel = Mock() p.channel.connection.client.declared_entities = set() p.publish('hello', exchange=Exchange('foo'), delivery_mode='transient') assert p._channel.basic_publish.call_args[1]['exchange'] == 'foo' def test_publish_with_expiration(self): p = self.connection.Producer() p.channel = Mock() p.channel.connection.client.declared_entities = set() p.publish('hello', exchange=Exchange('foo'), expiration=10) properties = p._channel.prepare_message.call_args[0][5] assert properties['expiration'] == '10000' def test_publish_with_timeout(self): p = self.connection.Producer() p.channel = Mock() p.channel.connection.client.declared_entities = set() p.publish('test_timeout', exchange=Exchange('foo'), timeout=1) timeout = p._channel.basic_publish.call_args[1]['timeout'] assert timeout == 1 def test_publish_with_timeout_and_retry_policy(self): p = self.connection.Producer() p.channel = Mock() p.channel.connection.client.declared_entities = set() p.publish('test_timeout', exchange=Exchange('foo'), timeout=1, retry_policy={ "max_retries": 20, "interval_start": 1, "interval_step": 2, "interval_max": 30, "retry_errors": (OperationalError,) }) timeout = p._channel.basic_publish.call_args[1]['timeout'] assert timeout == 1 def test_publish_with_confirm_timeout(self): p = self.connection.Producer() p.channel = Mock() p.channel.connection.client.declared_entities = set() p.publish('test_timeout', exchange=Exchange('foo'), confirm_timeout=1) confirm_timeout = p._channel.basic_publish.call_args[1]['confirm_timeout'] assert confirm_timeout == 1 @patch('kombu.messaging.maybe_declare') def test_publish_maybe_declare_with_retry_policy(self, maybe_declare): p = self.connection.Producer(exchange=Exchange('foo')) p.channel = Mock() expected_retry_policy = { "max_retries": 20, "interval_start": 1, "interval_step": 2, "interval_max": 30, "retry_errors": (OperationalError,) } p.publish('test_maybe_declare', exchange=Exchange('foo'), retry=True, retry_policy=expected_retry_policy) maybe_declare.assert_called_once_with(ANY, ANY, True, **expected_retry_policy) @patch('kombu.common._imaybe_declare') def test_publish_maybe_declare_with_retry_policy_ensure_connection(self, _imaybe_declare): p = self.connection.Producer(exchange=Exchange('foo')) p.channel = Mock() expected_retry_policy = { "max_retries": 20, "interval_start": 1, "interval_step": 2, "interval_max": 30, "retry_errors": (OperationalError,) } p.publish('test_maybe_declare', exchange=Exchange('foo'), retry=True, retry_policy=expected_retry_policy) _imaybe_declare.assert_called_once_with(ANY, ANY, **expected_retry_policy) def test_publish_with_reply_to(self): p = self.connection.Producer() p.channel = Mock() p.channel.connection.client.declared_entities = set() assert not p.exchange.name p.publish('hello', exchange=Exchange('foo'), reply_to=Queue('foo')) properties = p._channel.prepare_message.call_args[0][5] assert properties['reply_to'] == 'foo' def test_set_on_return(self): chan = Mock() chan.events = defaultdict(Mock) p = Producer(ChannelPromise(lambda: chan), on_return='on_return') p.channel chan.events['basic_return'].add.assert_called_with('on_return') def test_publish_retry_calls_ensure(self): p = Producer(Mock()) p._connection = Mock() p._connection.declared_entities = set() ensure = p.connection.ensure = Mock() p.publish('foo', exchange='foo', retry=True) ensure.assert_called() def test_publish_retry_with_declare(self): p = self.connection.Producer() p.maybe_declare = Mock() p.connection.ensure = Mock() ex = Exchange('foo') p._publish('hello', 0, '', '', {}, {}, 'rk', 0, 0, ex, declare=[ex]) p.maybe_declare.assert_called_with(ex, retry=False) def test_revive_when_channel_is_connection(self): p = self.connection.Producer() p.exchange = Mock() new_conn = Connection('memory://') defchan = new_conn.default_channel p.revive(new_conn) assert p.channel is defchan p.exchange.revive.assert_called_with(defchan) def test_enter_exit(self): p = self.connection.Producer() p.release = Mock() with p as x: assert x is p p.release.assert_called_with() def test_connection_property_handles_AttributeError(self): p = self.connection.Producer() p.channel = object() p.__connection__ = None assert p.connection is None def test_publish(self): channel = self.connection.channel() p = Producer(channel, self.exchange, serializer='json') message = {'the quick brown fox': 'jumps over the lazy dog'} ret = p.publish(message, routing_key='process') assert 'prepare_message' in channel assert 'basic_publish' in channel m, exc, rkey = ret assert json.loads(m['body']) == message assert m['content_type'] == 'application/json' assert m['content_encoding'] == 'utf-8' assert m['priority'] == 0 assert m['properties']['delivery_mode'] == 2 assert exc == p.exchange.name assert rkey == 'process' def test_no_exchange(self): chan = self.connection.channel() p = Producer(chan) assert not p.exchange.name def test_revive(self): chan = self.connection.channel() p = Producer(chan) chan2 = self.connection.channel() p.revive(chan2) assert p.channel is chan2 assert p.exchange.channel is chan2 def test_on_return(self): chan = self.connection.channel() def on_return(exception, exchange, routing_key, message): pass p = Producer(chan, on_return=on_return) assert on_return in chan.events['basic_return'] assert p.on_return class test_Consumer: def setup_method(self): self.connection = Connection(transport=Transport) self.connection.connect() assert self.connection.connection.connected self.exchange = Exchange('foo', 'direct') def test_accept(self): a = Consumer(self.connection) assert a.accept is None b = Consumer(self.connection, accept=['json', 'pickle']) assert b.accept == { 'application/json', 'application/x-python-serialize', } c = Consumer(self.connection, accept=b.accept) assert b.accept == c.accept def test_enter_exit_cancel_raises(self): c = Consumer(self.connection) c.cancel = Mock(name='Consumer.cancel') c.cancel.side_effect = KeyError('foo') with c: pass c.cancel.assert_called_with() def test_enter_exit_cancel_not_called_on_connection_error(self): c = Consumer(self.connection) c.cancel = Mock(name='Consumer.cancel') assert self.connection.connection_errors with pytest.raises(self.connection.connection_errors[0]): with c: raise self.connection.connection_errors[0]() c.cancel.assert_not_called() def test_receive_callback_accept(self): message = Mock(name='Message') message.errors = [] callback = Mock(name='on_message') c = Consumer(self.connection, accept=['json'], on_message=callback) c.on_decode_error = None c.channel = Mock(name='channel') c.channel.message_to_python = None c._receive_callback(message) callback.assert_called_with(message) assert message.accept == c.accept def test_accept__content_disallowed(self): conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = conn.Producer() p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) callback = Mock(name='callback') with conn.Consumer(queues=[q], callbacks=[callback]) as consumer: with pytest.raises(consumer.ContentDisallowed): conn.drain_events(timeout=1) callback.assert_not_called() def test_accept__content_allowed(self): conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = conn.Producer() p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) callback = Mock(name='callback') with conn.Consumer(queues=[q], accept=['pickle'], callbacks=[callback]): conn.drain_events(timeout=1) callback.assert_called() body, message = callback.call_args[0] assert body['complex'] def test_set_no_channel(self): c = Consumer(None) assert c.channel is None c.revive(Mock()) assert c.channel def test_set_no_ack(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True, no_ack=True) assert consumer.no_ack def test_add_queue_when_auto_declare(self): consumer = self.connection.Consumer(auto_declare=True) q = Mock() q.return_value = q consumer.add_queue(q) assert q in consumer.queues q.declare.assert_called_with() def test_add_queue_when_not_auto_declare(self): consumer = self.connection.Consumer(auto_declare=False) q = Mock() q.return_value = q consumer.add_queue(q) assert q in consumer.queues assert not q.declare.call_count def test_consume_without_queues_returns(self): consumer = self.connection.Consumer() consumer.queues[:] = [] assert consumer.consume() is None def test_consuming_from(self): consumer = self.connection.Consumer() consumer.queues[:] = [Queue('a'), Queue('b'), Queue('d')] consumer._active_tags = {'a': 1, 'b': 2} assert not consumer.consuming_from(Queue('c')) assert not consumer.consuming_from('c') assert not consumer.consuming_from(Queue('d')) assert not consumer.consuming_from('d') assert consumer.consuming_from(Queue('a')) assert consumer.consuming_from(Queue('b')) assert consumer.consuming_from('b') def test_receive_callback_without_m2p(self): channel = self.connection.channel() c = channel.Consumer() m2p = getattr(channel, 'message_to_python') channel.message_to_python = None try: message = Mock() message.errors = [] message.decode.return_value = 'Hello' recv = c.receive = Mock() c._receive_callback(message) recv.assert_called_with('Hello', message) finally: channel.message_to_python = m2p def test_receive_callback__message_errors(self): channel = self.connection.channel() channel.message_to_python = None c = channel.Consumer() message = Mock() try: raise KeyError('foo') except KeyError: message.errors = [sys.exc_info()] message._reraise_error.side_effect = KeyError() with pytest.raises(KeyError): c._receive_callback(message) def test_set_callbacks(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') callbacks = [lambda x, y: x, lambda x, y: x] consumer = Consumer(channel, queue, auto_declare=True, callbacks=callbacks) assert consumer.callbacks == callbacks def test_auto_declare(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.consume() consumer.consume() # twice is a noop assert consumer.queues[0] is not queue assert consumer.queues[0].is_bound assert consumer.queues[0].exchange.is_bound assert consumer.queues[0].exchange is not self.exchange for meth in ('exchange_declare', 'queue_declare', 'queue_bind', 'basic_consume'): assert meth in channel assert channel.called.count('basic_consume') == 1 assert consumer._active_tags consumer.cancel_by_queue(queue.name) consumer.cancel_by_queue(queue.name) assert not consumer._active_tags def test_consumer_tag_prefix(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, tag_prefix='consumer_') consumer.consume() assert consumer._active_tags[queue.name].startswith('consumer_') def test_manual_declare(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=False) assert consumer.queues[0] is not queue assert consumer.queues[0].is_bound assert consumer.queues[0].exchange.is_bound assert consumer.queues[0].exchange is not self.exchange for meth in ('exchange_declare', 'queue_declare', 'basic_consume'): assert meth not in channel consumer.declare() for meth in ('exchange_declare', 'queue_declare', 'queue_bind'): assert meth in channel assert 'basic_consume' not in channel consumer.consume() assert 'basic_consume' in channel def test_consume__cancel(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.consume() consumer.cancel() assert 'basic_cancel' in channel assert not consumer._active_tags def test___enter____exit__(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) context = consumer.__enter__() assert context is consumer assert consumer._active_tags res = consumer.__exit__(None, None, None) assert not res assert 'basic_cancel' in channel assert not consumer._active_tags def test_flow(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.flow(False) assert 'flow' in channel def test_qos(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.qos(30, 10, False) assert 'basic_qos' in channel def test_purge(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') b2 = Queue('qname2', self.exchange, 'rkey') b3 = Queue('qname3', self.exchange, 'rkey') b4 = Queue('qname4', self.exchange, 'rkey') consumer = Consumer(channel, [b1, b2, b3, b4], auto_declare=True) consumer.purge() assert channel.called.count('queue_purge') == 4 def test_multiple_queues(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') b2 = Queue('qname2', self.exchange, 'rkey') b3 = Queue('qname3', self.exchange, 'rkey') b4 = Queue('qname4', self.exchange, 'rkey') consumer = Consumer(channel, [b1, b2, b3, b4]) consumer.consume() assert channel.called.count('exchange_declare') == 4 assert channel.called.count('queue_declare') == 4 assert channel.called.count('queue_bind') == 4 assert channel.called.count('basic_consume') == 4 assert len(consumer._active_tags) == 4 consumer.cancel() assert channel.called.count('basic_cancel') == 4 assert not len(consumer._active_tags) def test_receive_callback(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) received = [] def callback(message_data, message): received.append(message_data) message.ack() message.payload # trigger cache consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) assert 'basic_ack' in channel assert 'message_to_python' in channel assert received[0] == {'foo': 'bar'} def test_basic_ack_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.ack() message.ack() consumer.register_callback(callback) with pytest.raises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) def test_basic_reject(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.reject() consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject' in channel def test_basic_reject_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.reject() message.reject() consumer.register_callback(callback) with pytest.raises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject' in channel def test_basic_reject__requeue(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.requeue() consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject:requeue' in channel def test_basic_reject__requeue_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.requeue() message.requeue() consumer.register_callback(callback) with pytest.raises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject:requeue' in channel def test_receive_without_callbacks_raises(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) with pytest.raises(NotImplementedError): consumer.receive(1, 2) def test_decode_error(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) consumer.channel.throw_decode_error = True with pytest.raises(ValueError): consumer._receive_callback({'foo': 'bar'}) def test_on_decode_error_callback(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') thrown = [] def on_decode_error(msg, exc): thrown.append((msg.body, exc)) consumer = Consumer(channel, [b1], on_decode_error=on_decode_error) consumer.channel.throw_decode_error = True consumer._receive_callback({'foo': 'bar'}) assert thrown m, exc = thrown[0] assert json.loads(m) == {'foo': 'bar'} assert isinstance(exc, ValueError) def test_recover(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) consumer.recover() assert 'basic_recover' in channel def test_revive(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) channel2 = self.connection.channel() consumer.revive(channel2) assert consumer.channel is channel2 assert consumer.queues[0].channel is channel2 assert consumer.queues[0].exchange.channel is channel2 def test_revive__with_prefetch_count(self): channel = Mock(name='channel') b1 = Queue('qname1', self.exchange, 'rkey') Consumer(channel, [b1], prefetch_count=14) channel.basic_qos.assert_called_with(0, 14, False) def test__repr__(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') assert repr(Consumer(channel, [b1])) def test_connection_property_handles_AttributeError(self): p = self.connection.Consumer() p.channel = object() assert p.connection is None kombu-5.5.3/t/unit/test_mixins.py000066400000000000000000000206351477772317200170320ustar00rootroot00000000000000from __future__ import annotations import socket from unittest.mock import Mock, patch import pytest from kombu.mixins import ConsumerMixin from t.mocks import ContextMock def Message(body, content_type='text/plain', content_encoding='utf-8'): m = Mock(name='Message') m.body = body m.content_type = content_type m.content_encoding = content_encoding return m class Cons(ConsumerMixin): def __init__(self, consumers): self.calls = Mock(name='ConsumerMixin') self.calls.get_consumers.return_value = consumers self.get_consumers = self.calls.get_consumers self.on_connection_revived = self.calls.on_connection_revived self.on_consume_ready = self.calls.on_consume_ready self.on_consume_end = self.calls.on_consume_end self.on_iteration = self.calls.on_iteration self.on_decode_error = self.calls.on_decode_error self.on_connection_error = self.calls.on_connection_error self.extra_context = ContextMock(name='extra_context') self.extra_context.return_value = self.extra_context class test_ConsumerMixin: def _context(self): Acons = ContextMock(name='consumerA') Bcons = ContextMock(name='consumerB') c = Cons([Acons, Bcons]) _conn = c.connection = ContextMock(name='connection') est = c.establish_connection = Mock(name='est_connection') est.return_value = _conn return c, Acons, Bcons def test_consume(self): c, Acons, Bcons = self._context() c.should_stop = False it = c.consume(no_ack=True) next(it) Acons.__enter__.assert_called_with() Bcons.__enter__.assert_called_with() c.extra_context.__enter__.assert_called_with() c.on_consume_ready.assert_called() c.on_iteration.assert_called_with() c.connection.drain_events.assert_called_with(timeout=1) next(it) next(it) next(it) c.should_stop = True with pytest.raises(StopIteration): next(it) def test_consume_drain_raises_socket_error(self): c, Acons, Bcons = self._context() c.should_stop = False it = c.consume(no_ack=True) c.connection.drain_events.side_effect = socket.error with pytest.raises(socket.error): next(it) def se2(*args, **kwargs): c.should_stop = True raise OSError() c.connection.drain_events.side_effect = se2 it = c.consume(no_ack=True) with pytest.raises(StopIteration): next(it) def test_consume_drain_raises_socket_timeout(self): c, Acons, Bcons = self._context() c.should_stop = False it = c.consume(no_ack=True, timeout=1) def se(*args, **kwargs): c.should_stop = True raise socket.timeout() c.connection.drain_events.side_effect = se with pytest.raises(socket.error): next(it) c.connection.heartbeat_check.assert_called() def test_consume_drain_heartbeat_check_no_timeout(self): c, Acons, Bcons = self._context() c.should_stop = False it = c.consume(no_ack=True, timeout=None) def se(*args, **kwargs): c.should_stop = True raise socket.timeout() c.connection.drain_events.side_effect = se with pytest.raises(StopIteration): next(it) c.connection.heartbeat_check.assert_called() it = c.consume(no_ack=True, timeout=0) c.connection.drain_events.side_effect = se with pytest.raises(StopIteration): next(it) c.connection.heartbeat_check.assert_called() def test_Consumer_context(self): c, Acons, Bcons = self._context() with c.Consumer() as (conn, channel, consumer): assert conn is c.connection assert channel is conn.default_channel c.on_connection_revived.assert_called_with() c.get_consumers.assert_called() cls = c.get_consumers.call_args[0][0] subcons = cls() assert subcons.on_decode_error is c.on_decode_error assert subcons.channel is conn.default_channel Acons.__enter__.assert_called_with() Bcons.__enter__.assert_called_with() c.on_consume_end.assert_called_with(conn, channel) class test_ConsumerMixin_interface: def setup_method(self): self.c = ConsumerMixin() def test_get_consumers(self): with pytest.raises(NotImplementedError): self.c.get_consumers(Mock(), Mock()) def test_on_connection_revived(self): assert self.c.on_connection_revived() is None def test_on_consume_ready(self): assert self.c.on_consume_ready(Mock(), Mock(), []) is None def test_on_consume_end(self): assert self.c.on_consume_end(Mock(), Mock()) is None def test_on_iteration(self): assert self.c.on_iteration() is None def test_on_decode_error(self): message = Message('foo') with patch('kombu.mixins.error') as error: self.c.on_decode_error(message, KeyError('foo')) error.assert_called() message.ack.assert_called_with() def test_on_connection_error(self): with patch('kombu.mixins.warn') as warn: self.c.on_connection_error(KeyError('foo'), 3) warn.assert_called() def test_extra_context(self): with self.c.extra_context(Mock(), Mock()): pass def test_restart_limit(self): assert self.c.restart_limit def test_connection_errors(self): conn = Mock(name='connection') self.c.connection = conn conn.connection_errors = (KeyError,) assert self.c.connection_errors == conn.connection_errors conn.channel_errors = (ValueError,) assert self.c.channel_errors == conn.channel_errors def test__consume_from(self): a = ContextMock(name='A') b = ContextMock(name='B') a.__enter__ = Mock(name='A.__enter__') b.__enter__ = Mock(name='B.__enter__') with self.c._consume_from(a, b): pass a.__enter__.assert_called_with() b.__enter__.assert_called_with() def test_establish_connection(self): conn = ContextMock(name='connection') conn.clone.return_value = conn self.c.connection = conn self.c.connect_max_retries = 3 with self.c.establish_connection() as conn: assert conn conn.ensure_connection.assert_called_with( self.c.on_connection_error, 3, ) def test_maybe_conn_error(self): conn = ContextMock(name='connection') conn.connection_errors = (KeyError,) conn.channel_errors = () self.c.connection = conn def raises(): raise KeyError('foo') self.c.maybe_conn_error(raises) def test_run(self): conn = ContextMock(name='connection') self.c.connection = conn conn.connection_errors = (KeyError,) conn.channel_errors = () consume = self.c.consume = Mock(name='c.consume') def se(*args, **kwargs): self.c.should_stop = True return [1] self.c.should_stop = False consume.side_effect = se self.c.run() def test_run_restart_rate_limited(self): conn = ContextMock(name='connection') self.c.connection = conn conn.connection_errors = (KeyError,) conn.channel_errors = () consume = self.c.consume = Mock(name='c.consume') with patch('kombu.mixins.sleep') as sleep: counter = [0] def se(*args, **kwargs): if counter[0] >= 1: self.c.should_stop = True counter[0] += 1 return counter self.c.should_stop = False consume.side_effect = se self.c.run() sleep.assert_called() def test_run_raises(self): conn = ContextMock(name='connection') self.c.connection = conn conn.connection_errors = (KeyError,) conn.channel_errors = () consume = self.c.consume = Mock(name='c.consume') with patch('kombu.mixins.warn') as warn: def se_raises(*args, **kwargs): self.c.should_stop = True raise KeyError('foo') self.c.should_stop = False consume.side_effect = se_raises self.c.run() warn.assert_called() kombu-5.5.3/t/unit/test_pidbox.py000066400000000000000000000304061477772317200170050ustar00rootroot00000000000000from __future__ import annotations import socket import warnings from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor from unittest.mock import Mock, patch import pytest from kombu import Connection, pidbox from kombu.exceptions import ContentDisallowed, InconsistencyError from kombu.utils.uuid import uuid def is_cast(message): return message['method'] def is_call(message): return message['method'] and message['reply_to'] class test_Mailbox: class Mailbox(pidbox.Mailbox): def _collect(self, *args, **kwargs): return 'COLLECTED' def setup_method(self): self.mailbox = self.Mailbox('test_pidbox') self.connection = Connection(transport='memory') self.state = {'var': 1} self.handlers = {'mymethod': self._handler} self.bound = self.mailbox(self.connection) self.default_chan = self.connection.channel() self.node = self.bound.Node( 'test_pidbox', state=self.state, handlers=self.handlers, channel=self.default_chan, ) def _handler(self, state): return self.stats['var'] def test_broadcast_matcher_pattern_string_type(self): mailbox = pidbox.Mailbox("test_matcher_str")(self.connection) with pytest.raises(ValueError): mailbox._broadcast("ping", pattern=1, matcher=2) def test_publish_reply_ignores_InconsistencyError(self): mailbox = pidbox.Mailbox('test_reply__collect')(self.connection) with patch('kombu.pidbox.Producer') as Producer: producer = Producer.return_value = Mock(name='producer') producer.publish.side_effect = InconsistencyError() mailbox._publish_reply( {'foo': 'bar'}, mailbox.reply_exchange, mailbox.oid, 'foo', ) producer.publish.assert_called() def test_reply__collect(self): mailbox = pidbox.Mailbox('test_reply__collect')(self.connection) exchange = mailbox.reply_exchange.name channel = self.connection.channel() mailbox.reply_queue(channel).declare() ticket = uuid() mailbox._publish_reply({'foo': 'bar'}, exchange, mailbox.oid, ticket) _callback_called = [False] def callback(body): _callback_called[0] = True reply = mailbox._collect(ticket, limit=1, callback=callback, channel=channel) assert reply == [{'foo': 'bar'}] assert _callback_called[0] ticket = uuid() mailbox._publish_reply({'biz': 'boz'}, exchange, mailbox.oid, ticket) reply = mailbox._collect(ticket, limit=1, channel=channel) assert reply == [{'biz': 'boz'}] mailbox._publish_reply({'foo': 'BAM'}, exchange, mailbox.oid, 'doom', serializer='pickle') with pytest.raises(ContentDisallowed): reply = mailbox._collect('doom', limit=1, channel=channel) mailbox._publish_reply( {'foo': 'BAMBAM'}, exchange, mailbox.oid, 'doom', serializer='pickle', ) reply = mailbox._collect('doom', limit=1, channel=channel, accept=['pickle']) assert reply[0]['foo'] == 'BAMBAM' de = mailbox.connection.drain_events = Mock() de.side_effect = socket.timeout mailbox._collect(ticket, limit=1, channel=channel) def test_reply__collect_uses_default_channel(self): class ConsumerCalled(Exception): pass def fake_Consumer(channel, *args, **kwargs): raise ConsumerCalled(channel) ticket = uuid() with patch('kombu.pidbox.Consumer') as Consumer: mailbox = pidbox.Mailbox('test_reply__collect')(self.connection) assert mailbox.connection.default_channel is not None Consumer.side_effect = fake_Consumer try: mailbox._collect(ticket, limit=1) except ConsumerCalled as c: assert c.args[0] is not None except Exception: raise else: assert False, "Consumer not called" def test__publish_uses_default_channel(self): class QueueCalled(Exception): pass def queue__call__side(channel, *args, **kwargs): raise QueueCalled(channel) ticket = uuid() with patch.object(pidbox.Queue, '__call__') as queue__call__: mailbox = pidbox.Mailbox('test_reply__collect')(self.connection) queue__call__.side_effect = queue__call__side try: mailbox._publish(ticket, {}, reply_ticket=ticket) except QueueCalled as c: assert c.args[0] is not None except Exception: raise else: assert False, "Queue not called" def test_constructor(self): assert self.mailbox.connection is None assert self.mailbox.exchange.name assert self.mailbox.reply_exchange.name def test_bound(self): bound = self.mailbox(self.connection) assert bound.connection is self.connection def test_Node(self): assert self.node.hostname assert self.node.state assert self.node.mailbox is self.bound assert self.handlers # No initial handlers node2 = self.bound.Node('test_pidbox2', state=self.state) assert node2.handlers == {} def test_Node_consumer(self): consumer1 = self.node.Consumer() assert consumer1.channel is self.default_chan assert consumer1.no_ack chan2 = self.connection.channel() consumer2 = self.node.Consumer(channel=chan2, no_ack=False) assert consumer2.channel is chan2 assert not consumer2.no_ack def test_Node_consumer_multiple_listeners(self): warnings.resetwarnings() consumer = self.node.Consumer() q = consumer.queues[0] with warnings.catch_warnings(record=True) as log: q.on_declared('foo', 1, 1) assert log assert 'already using this' in log[0].message.args[0] with warnings.catch_warnings(record=True) as log: q.on_declared('foo', 1, 0) assert not log def test_handler(self): node = self.bound.Node('test_handler', state=self.state) @node.handler def my_handler_name(state): return 42 assert 'my_handler_name' in node.handlers def test_dispatch(self): node = self.bound.Node('test_dispatch', state=self.state) @node.handler def my_handler_name(state, x=None, y=None): return x + y assert node.dispatch('my_handler_name', arguments={'x': 10, 'y': 10}) == 20 def test_dispatch_raising_SystemExit(self): node = self.bound.Node('test_dispatch_raising_SystemExit', state=self.state) @node.handler def my_handler_name(state): raise SystemExit with pytest.raises(SystemExit): node.dispatch('my_handler_name') def test_dispatch_raising(self): node = self.bound.Node('test_dispatch_raising', state=self.state) @node.handler def my_handler_name(state): raise KeyError('foo') res = node.dispatch('my_handler_name') assert 'error' in res assert 'KeyError' in res['error'] def test_dispatch_replies(self): _replied = [False] def reply(data, **options): _replied[0] = True node = self.bound.Node('test_dispatch', state=self.state) node.reply = reply @node.handler def my_handler_name(state, x=None, y=None): return x + y node.dispatch('my_handler_name', arguments={'x': 10, 'y': 10}, reply_to={'exchange': 'foo', 'routing_key': 'bar'}) assert _replied[0] def test_reply(self): _replied = [(None, None, None)] def publish_reply(data, exchange, routing_key, ticket, **kwargs): _replied[0] = (data, exchange, routing_key, ticket) mailbox = self.mailbox(self.connection) mailbox._publish_reply = publish_reply node = mailbox.Node('test_reply') @node.handler def my_handler_name(state): return 42 node.dispatch('my_handler_name', reply_to={'exchange': 'exchange', 'routing_key': 'rkey'}, ticket='TICKET') data, exchange, routing_key, ticket = _replied[0] assert data == {'test_reply': 42} assert exchange == 'exchange' assert routing_key == 'rkey' assert ticket == 'TICKET' def test_handle_message(self): node = self.bound.Node('test_dispatch_from_message') @node.handler def my_handler_name(state, x=None, y=None): return x * y body = {'method': 'my_handler_name', 'arguments': {'x': 64, 'y': 64}} assert node.handle_message(body, None) == 64 * 64 # message not for me should not be processed. body['destination'] = ['some_other_node'] assert node.handle_message(body, None) is None # message for me should be processed. body['destination'] = ['test_dispatch_from_message'] assert node.handle_message(body, None) is not None # message not for me should not be processed. body.pop("destination") body['matcher'] = 'glob' body["pattern"] = "something*" assert node.handle_message(body, None) is None body["pattern"] = "test*" assert node.handle_message(body, None) is not None def test_handle_message_adjusts_clock(self): node = self.bound.Node('test_adjusts_clock') @node.handler def my_handler_name(state): return 10 body = {'method': 'my_handler_name', 'arguments': {}} message = Mock(name='message') message.headers = {'clock': 313} node.adjust_clock = Mock(name='adjust_clock') res = node.handle_message(body, message) node.adjust_clock.assert_called_with(313) assert res == 10 def test_listen(self): consumer = self.node.listen() assert consumer.callbacks[0] == self.node.handle_message assert consumer.channel == self.default_chan def test_cast(self): self.bound.cast(['somenode'], 'mymethod') consumer = self.node.Consumer() assert is_cast(self.get_next(consumer)) def test_abcast(self): self.bound.abcast('mymethod') consumer = self.node.Consumer() assert is_cast(self.get_next(consumer)) def test_call_destination_must_be_sequence(self): with pytest.raises(ValueError): self.bound.call('some_node', 'mymethod') def test_call(self): assert self.bound.call(['some_node'], 'mymethod') == 'COLLECTED' consumer = self.node.Consumer() assert is_call(self.get_next(consumer)) def test_multi_call(self): assert self.bound.multi_call('mymethod') == 'COLLECTED' consumer = self.node.Consumer() assert is_call(self.get_next(consumer)) def get_next(self, consumer): m = consumer.queues[0].get() if m: return m.payload GLOBAL_PIDBOX = pidbox.Mailbox('global_unittest_mailbox') def getoid(): return GLOBAL_PIDBOX.oid class test_PidboxOid: """Unittests checking oid consistency of Pidbox""" def test_oid_consistency(self): """Tests that oid is consistent in single process""" m1 = pidbox.Mailbox('mailbox1') m2 = pidbox.Mailbox('mailbox2') assert m1.oid == m1.oid assert m2.oid == m2.oid assert m1.oid != m2.oid def test_subprocess_oid(self): """Tests that subprocess will not share oid with parent process.""" oid = GLOBAL_PIDBOX.oid with ProcessPoolExecutor() as e: res = e.submit(getoid) subprocess_oid = res.result() assert subprocess_oid != oid def test_thread_oid(self): """Tests that threads will not share oid.""" oid = GLOBAL_PIDBOX.oid with ThreadPoolExecutor() as e: res = e.submit(getoid) subprocess_oid = res.result() assert subprocess_oid != oid kombu-5.5.3/t/unit/test_pools.py000066400000000000000000000172051477772317200166560ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock import pytest from kombu import Connection, Producer, pools from kombu.connection import ConnectionPool, PooledConnection from kombu.utils.collections import eqhash class test_ProducerPool: Pool = pools.ProducerPool class MyPool(pools.ProducerPool): def __init__(self, *args, **kwargs): self.instance = Mock() super().__init__(*args, **kwargs) def Producer(self, connection): return self.instance def setup_method(self): self.connections = Mock() self.pool = self.Pool(self.connections, limit=10) def test_close_resource(self): self.pool.close_resource(Mock(name='resource')) def test_releases_connection_when_Producer_raises(self): self.pool.Producer = Mock() self.pool.Producer.side_effect = IOError() acq = self.pool._acquire_connection = Mock() conn = acq.return_value = Mock() with pytest.raises(IOError): self.pool.create_producer() conn.release.assert_called_with() def test_exception_during_connection_use(self): """Tests that the connection is closed in case of an exception.""" with pytest.raises(IOError): with self.pool.acquire() as producer: producer.__connection__ = Mock(spec=PooledConnection) producer.__connection__._pool = self.connections producer.publish = Mock() producer.publish.side_effect = IOError() producer.publish("test data") self.connections.replace.assert_called_once() def test_prepare_release_connection_on_error(self): pp = Mock() p = pp.return_value = Mock() p.revive.side_effect = IOError() acq = self.pool._acquire_connection = Mock() conn = acq.return_value = Mock() p._channel = None with pytest.raises(IOError): self.pool.prepare(pp) conn.release.assert_called_with() def test_release_releases_connection(self): p = Mock() p.__connection__ = Mock() self.pool.release(p) p.__connection__.release.assert_called_with() p.__connection__ = None self.pool.release(p) def test_init(self): assert self.pool.connections is self.connections def test_Producer(self): assert isinstance(self.pool.Producer(Mock()), Producer) def test_acquire_connection(self): self.pool._acquire_connection() self.connections.acquire.assert_called_with(block=True) def test_new(self): promise = self.pool.new() producer = promise() assert isinstance(producer, Producer) self.connections.acquire.assert_called_with(block=True) def test_setup_unlimited(self): pool = self.Pool(self.connections, limit=None) pool.setup() assert not pool._resource.queue def test_setup(self): assert len(self.pool._resource.queue) == self.pool.limit first = self.pool._resource.get_nowait() producer = first() assert isinstance(producer, Producer) def test_prepare(self): connection = self.connections.acquire.return_value = Mock() pool = self.MyPool(self.connections, limit=10) pool.instance._channel = None first = pool._resource.get_nowait() producer = pool.prepare(first) self.connections.acquire.assert_called() producer.revive.assert_called_with(connection) def test_prepare_channel_already_created(self): self.connections.acquire.return_value = Mock() pool = self.MyPool(self.connections, limit=10) pool.instance._channel = Mock() first = pool._resource.get_nowait() self.connections.acquire.reset() producer = pool.prepare(first) producer.revive.assert_not_called() def test_prepare_not_callable(self): x = Producer(Mock) self.pool.prepare(x) def test_release(self): p = Mock() p.channel = Mock() p.__connection__ = Mock() self.pool.release(p) p.__connection__.release.assert_called_with() assert p.channel is None class test_PoolGroup: Group = pools.PoolGroup class MyGroup(pools.PoolGroup): def create(self, resource, limit): return resource, limit def test_interface_create(self): g = self.Group() with pytest.raises(NotImplementedError): g.create(Mock(), 10) def test_getitem_using_global_limit(self): g = self.MyGroup(limit=pools.use_global_limit) res = g['foo'] assert res == ('foo', pools.get_limit()) def test_getitem_using_custom_limit(self): g = self.MyGroup(limit=102456) res = g['foo'] assert res == ('foo', 102456) def test_delitem(self): g = self.MyGroup() g['foo'] del g['foo'] assert 'foo' not in g def test_Connections(self): conn = Connection('memory://') p = pools.connections[conn] assert p assert isinstance(p, ConnectionPool) assert p.connection is conn assert p.limit == pools.get_limit() def test_Producers(self): conn = Connection('memory://') p = pools.producers[conn] assert p assert isinstance(p, pools.ProducerPool) assert p.connections is pools.connections[conn] assert p.limit == p.connections.limit assert p.limit == pools.get_limit() def test_all_groups(self): conn = Connection('memory://') pools.connections[conn] assert list(pools._all_pools()) def test_reset(self): pools.reset() class MyGroup(dict): clear_called = False def clear(self): self.clear_called = True p1 = pools.connections['foo'] = Mock() g1 = MyGroup() pools._groups.append(g1) pools.reset() p1.force_close_all.assert_called_with() assert g1.clear_called p1 = pools.connections['foo'] = Mock() p1.force_close_all.side_effect = KeyError() pools.reset() def test_set_limit(self): pools.reset() pools.set_limit(34576) limit = pools.get_limit() assert limit == 34576 conn = Connection('memory://') pool = pools.connections[conn] with pool.acquire(): pools.set_limit(limit + 1) assert pools.get_limit() == limit + 1 limit = pools.get_limit() with pytest.raises(RuntimeError): pools.set_limit(limit - 1) pools.set_limit(limit - 1, force=True) assert pools.get_limit() == limit - 1 pools.set_limit(pools.get_limit()) def test_remove_limit(self): conn = Connection('memory://') pool = pools.connections[conn] pool.limit = 10 with pool.acquire(): pool.limit = 0 class test_fun_PoolGroup: def test_connections_behavior(self): c1u = 'memory://localhost:123' c2u = 'memory://localhost:124' c1 = Connection(c1u) c2 = Connection(c2u) c3 = Connection(c1u) assert eqhash(c1) != eqhash(c2) assert eqhash(c1) == eqhash(c3) c4 = Connection(c1u, transport_options={'confirm_publish': True}) assert eqhash(c3) != eqhash(c4) p1 = pools.connections[c1] p2 = pools.connections[c2] p3 = pools.connections[c3] assert p1 is not p2 assert p1 is p3 r1 = p1.acquire() assert p1._dirty assert p3._dirty assert not p2._dirty r1.release() assert not p1._dirty assert not p3._dirty kombu-5.5.3/t/unit/test_serialization.py000066400000000000000000000256321477772317200204020ustar00rootroot00000000000000#!/usr/bin/python from __future__ import annotations from base64 import b64decode from unittest.mock import call, patch import pytest import t.skip from kombu.exceptions import ContentDisallowed, DecodeError, EncodeError from kombu.serialization import (SerializerNotInstalled, disable_insecure_serializers, dumps, enable_insecure_serializers, loads, pickle, pickle_protocol, prepare_accept_content, raw_encode, register, register_msgpack, register_pickle, register_yaml, registry, unregister) from kombu.utils.encoding import str_to_bytes # For content_encoding tests unicode_string = 'abcdé\u8463' unicode_string_as_utf8 = unicode_string.encode('utf-8') latin_string = 'abcdé' latin_string_as_latin1 = latin_string.encode('latin-1') latin_string_as_utf8 = latin_string.encode('utf-8') # For serialization tests py_data = { 'string': 'The quick brown fox jumps over the lazy dog', 'int': 10, 'float': 3.14159265, 'unicode': 'Thé quick brown fox jumps over thé lazy dog', 'list': ['george', 'jerry', 'elaine', 'cosmo'], } # JSON serialization tests json_data = """\ {"int": 10, "float": 3.1415926500000002, \ "list": ["george", "jerry", "elaine", "cosmo"], \ "string": "The quick brown fox jumps over the lazy \ dog", "unicode": "Th\\u00e9 quick brown fox jumps over \ th\\u00e9 lazy dog"}\ """ # Pickle serialization tests pickle_data = pickle.dumps(py_data, protocol=pickle_protocol) # YAML serialization tests yaml_data = """\ float: 3.1415926500000002 int: 10 list: [george, jerry, elaine, cosmo] string: The quick brown fox jumps over the lazy dog unicode: "Th\\xE9 quick brown fox jumps over th\\xE9 lazy dog" """ msgpack_py_data = dict(py_data) msgpack_py_data['unicode'] = 'Th quick brown fox jumps over th lazy dog' # Unicode chars are lost in transmit :( msgpack_data = b64decode(str_to_bytes("""\ haNpbnQKpWZsb2F0y0AJIftTyNTxpGxpc3SUpmdlb3JnZaVqZXJyeaZlbGFpbmWlY29zbW+mc3Rya\ W5n2gArVGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIHRoZSBsYXp5IGRvZ6d1bmljb2Rl2g\ ApVGggcXVpY2sgYnJvd24gZm94IGp1bXBzIG92ZXIgdGggbGF6eSBkb2c=\ """)) registry.register('testS', lambda s: s, lambda s: 'decoded', 'application/testS', 'utf-8') class test_Serialization: def test_disable(self): disabled = registry._disabled_content_types try: registry.disable('testS') assert 'application/testS' in disabled disabled.clear() registry.disable('application/testS') assert 'application/testS' in disabled finally: disabled.clear() def test_enable(self): registry._disabled_content_types.add('application/json') registry.enable('json') assert 'application/json' not in registry._disabled_content_types registry._disabled_content_types.add('application/json') registry.enable('application/json') assert 'application/json' not in registry._disabled_content_types def test_loads_when_disabled(self): disabled = registry._disabled_content_types try: registry.disable('testS') with pytest.raises(SerializerNotInstalled): loads('xxd', 'application/testS', 'utf-8', force=False) ret = loads('xxd', 'application/testS', 'utf-8', force=True) assert ret == 'decoded' finally: disabled.clear() def test_loads_when_data_is_None(self): loads(None, 'application/testS', 'utf-8') def test_content_type_decoding(self): assert loads( unicode_string_as_utf8, content_type='plain/text', content_encoding='utf-8') == unicode_string assert loads( latin_string_as_latin1, content_type='application/data', content_encoding='latin-1') == latin_string def test_content_type_binary(self): assert isinstance( loads(unicode_string_as_utf8, content_type='application/data', content_encoding='binary'), bytes) assert loads( unicode_string_as_utf8, content_type='application/data', content_encoding='binary') == unicode_string_as_utf8 def test_content_type_encoding(self): # Using the 'raw' serializer assert (dumps(unicode_string, serializer='raw')[-1] == unicode_string_as_utf8) assert (dumps(latin_string, serializer='raw')[-1] == latin_string_as_utf8) # And again w/o a specific serializer to check the # code where we force unicode objects into a string. assert dumps(unicode_string)[-1] == unicode_string_as_utf8 assert dumps(latin_string)[-1] == latin_string_as_utf8 def test_enable_insecure_serializers(self): with patch('kombu.serialization.registry') as registry: enable_insecure_serializers() registry.assert_has_calls([ call.enable('pickle'), call.enable('yaml'), call.enable('msgpack'), ]) registry.enable.side_effect = KeyError() enable_insecure_serializers() with patch('kombu.serialization.registry') as registry: enable_insecure_serializers(['msgpack']) registry.assert_has_calls([call.enable('msgpack')]) def test_disable_insecure_serializers(self): with patch('kombu.serialization.registry') as registry: registry._decoders = ['pickle', 'yaml', 'doomsday'] disable_insecure_serializers(allowed=['doomsday']) registry.disable.assert_has_calls([call('pickle'), call('yaml')]) registry.enable.assert_has_calls([call('doomsday')]) disable_insecure_serializers(allowed=None) registry.disable.assert_has_calls([ call('pickle'), call('yaml'), call('doomsday') ]) def test_reraises_EncodeError(self): with pytest.raises(EncodeError): dumps([object()], serializer='json') def test_reraises_DecodeError(self): with pytest.raises(DecodeError): loads(object(), content_type='application/json', content_encoding='utf-8') def test_json_loads(self): assert loads(json_data, content_type='application/json', content_encoding='utf-8') == py_data def test_json_dumps(self): a = loads( dumps(py_data, serializer='json')[-1], content_type='application/json', content_encoding='utf-8', ) b = loads( json_data, content_type='application/json', content_encoding='utf-8', ) assert a == b @t.skip.if_pypy def test_msgpack_loads(self): register_msgpack() pytest.importorskip('msgpack') res = loads(msgpack_data, content_type='application/x-msgpack', content_encoding='binary') assert res == msgpack_py_data @t.skip.if_pypy def test_msgpack_dumps(self): pytest.importorskip('msgpack') register_msgpack() a = loads( dumps(msgpack_py_data, serializer='msgpack')[-1], content_type='application/x-msgpack', content_encoding='binary', ) b = loads( msgpack_data, content_type='application/x-msgpack', content_encoding='binary', ) assert a == b def test_yaml_loads(self): pytest.importorskip('yaml') register_yaml() assert loads( yaml_data, content_type='application/x-yaml', content_encoding='utf-8') == py_data def test_yaml_dumps(self): pytest.importorskip('yaml') register_yaml() a = loads( dumps(py_data, serializer='yaml')[-1], content_type='application/x-yaml', content_encoding='utf-8', ) b = loads( yaml_data, content_type='application/x-yaml', content_encoding='utf-8', ) assert a == b def test_pickle_loads(self): assert loads( pickle_data, content_type='application/x-python-serialize', content_encoding='binary') == py_data def test_pickle_dumps(self): a = pickle.loads(pickle_data), b = pickle.loads(dumps(py_data, serializer='pickle')[-1]), assert a == b def test_register(self): register(None, None, None, None) def test_unregister(self): with pytest.raises(SerializerNotInstalled): unregister('nonexisting') dumps('foo', serializer='pickle') unregister('pickle') with pytest.raises(SerializerNotInstalled): dumps('foo', serializer='pickle') register_pickle() def test_set_default_serializer_missing(self): with pytest.raises(SerializerNotInstalled): registry._set_default_serializer('nonexisting') def test_dumps_missing(self): with pytest.raises(SerializerNotInstalled): dumps('foo', serializer='nonexisting') def test_dumps__no_serializer(self): ctyp, cenc, data = dumps(str_to_bytes('foo')) assert ctyp == 'application/data' assert cenc == 'binary' def test_loads__trusted_content(self): loads('tainted', 'application/data', 'binary', accept=[]) loads('tainted', 'application/text', 'utf-8', accept=[]) def test_loads__not_accepted(self): with pytest.raises(ContentDisallowed): loads('tainted', 'application/x-evil', 'binary', accept=[]) with pytest.raises(ContentDisallowed): loads('tainted', 'application/x-evil', 'binary', accept=['application/x-json']) assert loads('tainted', 'application/x-doomsday', 'binary', accept=['application/x-doomsday']) def test_raw_encode(self): assert raw_encode(b'foo') == ( 'application/data', 'binary', b'foo', ) @pytest.mark.masked_modules('yaml') def test_register_yaml__no_yaml(self, mask_modules): register_yaml() with pytest.raises(SerializerNotInstalled): loads('foo', 'application/x-yaml', 'utf-8') @pytest.mark.masked_modules('msgpack') def test_register_msgpack__no_msgpack(self, mask_modules): register_msgpack() with pytest.raises(SerializerNotInstalled): loads('foo', 'application/x-msgpack', 'utf-8') def test_prepare_accept_content(self): assert {'application/json'} == prepare_accept_content(['json']) assert {'application/json'} == prepare_accept_content( ['application/json']) def test_prepare_accept_content_bad_serializer(self): with pytest.raises(SerializerNotInstalled): prepare_accept_content(['bad_serializer']) kombu-5.5.3/t/unit/test_simple.py000066400000000000000000000150761477772317200170170ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock import pytest from kombu import Connection, Exchange, Queue from kombu.exceptions import ContentDisallowed class SimpleBase: def Queue(self, name, *args, **kwargs): q = name if not isinstance(q, Queue): q = self.__class__.__name__ if name: q = f'{q}.{name}' return self._Queue(q, *args, **kwargs) def _Queue(self, *args, **kwargs): raise NotImplementedError() def setup_method(self): self.connection = Connection(transport='memory') self.connection.default_channel.exchange_declare('amq.direct') def teardown_method(self): self.connection.close() self.connection = None def test_produce__consume(self): q = self.Queue('test_produce__consume', no_ack=True) q.put({'hello': 'Simple'}) assert q.get(timeout=1).payload == {'hello': 'Simple'} with pytest.raises(q.Empty): q.get(timeout=0.1) def test_produce__basic_get(self): q = self.Queue('test_produce__basic_get', no_ack=True) q.put({'hello': 'SimpleSync'}) assert q.get_nowait().payload == {'hello': 'SimpleSync'} with pytest.raises(q.Empty): q.get_nowait() q.put({'hello': 'SimpleSync'}) assert q.get(block=False).payload == {'hello': 'SimpleSync'} with pytest.raises(q.Empty): q.get(block=False) def test_get_nowait_accept(self): q = self.Queue('test_accept', serializer='pickle', accept=['json']) q.put({'hello': 'SimpleSync'}) with pytest.raises(ContentDisallowed): q.get_nowait().payload q = self.Queue('test_accept1', serializer='json', accept=[]) q.put({'hello': 'SimpleSync'}) with pytest.raises(ContentDisallowed): q.get_nowait().payload q = self.Queue( 'test_accept2', serializer='pickle', accept=['json', 'pickle']) q.put({'hello': 'SimpleSync'}) assert q.get_nowait().payload == {'hello': 'SimpleSync'} def test_get_accept(self): q = self.Queue('test_accept', serializer='pickle', accept=['json']) q.put({'hello': 'SimpleSync'}) with pytest.raises(ContentDisallowed): q.get().payload q = self.Queue('test_accept1', serializer='pickle', accept=[]) q.put({'hello': 'SimpleSync'}) with pytest.raises(ContentDisallowed): q.get().payload q = self.Queue( 'test_accept2', serializer='pickle', accept=['json', 'pickle']) q.put({'hello': 'SimpleSync'}) assert q.get().payload == {'hello': 'SimpleSync'} def test_clear(self): q = self.Queue('test_clear', no_ack=True) for i in range(10): q.put({'hello': 'SimplePurge%d' % (i,)}) assert q.clear() == 10 def test_enter_exit(self): q = self.Queue('test_enter_exit') q.close = Mock() with q as x: assert x is q q.close.assert_called_with() def test_qsize(self): q = self.Queue('test_clear', no_ack=True) for i in range(10): q.put({'hello': 'SimplePurge%d' % (i,)}) assert q.qsize() == 10 assert len(q) == 10 def test_autoclose(self): channel = self.connection.channel() q = self.Queue('test_autoclose', no_ack=True, channel=channel) q.close() def test_custom_Queue(self): n = self.__class__.__name__ exchange = Exchange(f'{n}-test.custom.Queue') queue = Queue(f'{n}-test.custom.Queue', exchange, 'my.routing.key') q = self.Queue(queue) assert q.consumer.queues[0] == queue q.close() def test_bool(self): q = self.Queue('test_nonzero') assert q class test_SimpleQueue(SimpleBase): def _Queue(self, *args, **kwargs): return self.connection.SimpleQueue(*args, **kwargs) def test_is_ack(self): q = self.Queue('test_is_no_ack') assert not q.no_ack def test_queue_args(self): q = self.Queue('test_queue_args', queue_args={'x-queue-mode': 'lazy'}) assert len(q.queue.queue_arguments) == 1 assert q.queue.queue_arguments['x-queue-mode'] == 'lazy' q = self.Queue('test_queue_args') assert q.queue.queue_arguments == {} def test_exchange_opts(self): q = self.Queue('test_exchange_opts_a', exchange_opts={'durable': True, 'type': 'fanout', 'delivery_mode': 'persistent'}) assert q.queue.exchange.type == 'fanout' assert q.queue.exchange.durable assert not q.queue.exchange.auto_delete delivery_mode_code = q.queue.exchange.PERSISTENT_DELIVERY_MODE assert q.queue.exchange.delivery_mode == delivery_mode_code q = self.Queue('test_exchange_opts_b') assert q.queue.exchange.type == 'direct' assert q.queue.exchange.durable assert not q.queue.exchange.auto_delete def test_queue_opts(self): q = self.Queue('test_queue_opts', queue_opts={'auto_delete': False}) assert not q.queue.auto_delete class test_SimpleBuffer(SimpleBase): def Queue(self, *args, **kwargs): return self.connection.SimpleBuffer(*args, **kwargs) def test_is_no_ack(self): q = self.Queue('test_is_no_ack') assert q.no_ack def test_queue_args(self): q = self.Queue('test_queue_args', queue_args={'x-queue-mode': 'lazy'}) assert len(q.queue.queue_arguments) == 1 assert q.queue.queue_arguments['x-queue-mode'] == 'lazy' def test_exchange_opts(self): q = self.Queue('test_exchange_opts_a', exchange_opts={'durable': True, 'auto_delete': True, 'delivery_mode': 'persistent'}) assert q.queue.exchange.type == 'direct' assert q.queue.exchange.durable assert q.queue.exchange.auto_delete delivery_mode_code = q.queue.exchange.PERSISTENT_DELIVERY_MODE assert q.queue.exchange.delivery_mode == delivery_mode_code q = self.Queue('test_exchange_opts_b') assert q.queue.exchange.type == 'direct' assert not q.queue.exchange.durable assert q.queue.exchange.auto_delete def test_queue_opts(self): q = self.Queue('test_queue_opts', queue_opts={'auto_delete': False}) assert not q.queue.durable assert not q.queue.auto_delete q = self.Queue('test_queue_opts') assert not q.queue.durable assert q.queue.auto_delete kombu-5.5.3/t/unit/transport/000077500000000000000000000000001477772317200161405ustar00rootroot00000000000000kombu-5.5.3/t/unit/transport/__init__.py000066400000000000000000000000001477772317200202370ustar00rootroot00000000000000kombu-5.5.3/t/unit/transport/test_SQS.py000066400000000000000000001144421477772317200202250ustar00rootroot00000000000000"""Testing module for the kombu.transport.SQS package. NOTE: The SQSQueueMock and SQSConnectionMock classes originally come from http://github.com/pcsforeducation/sqs-mock-python. They have been patched slightly. """ from __future__ import annotations import base64 import os import random import string from datetime import datetime, timedelta from queue import Empty from unittest.mock import Mock, patch import pytest from kombu import Connection, Exchange, Queue, messaging boto3 = pytest.importorskip('boto3') from botocore.exceptions import ClientError # noqa from kombu.transport import SQS # noqa SQS_Channel_sqs = SQS.Channel.sqs example_predefined_queues = { 'queue-1': { 'url': 'https://sqs.us-east-1.amazonaws.com/xxx/queue-1', 'access_key_id': 'a', 'secret_access_key': 'b', 'backoff_tasks': ['svc.tasks.tasks.task1'], 'backoff_policy': {1: 10, 2: 20, 3: 40, 4: 80, 5: 320, 6: 640} }, 'queue-2': { 'url': 'https://sqs.us-east-1.amazonaws.com/xxx/queue-2', 'access_key_id': 'c', 'secret_access_key': 'd', }, 'queue-3.fifo': { 'url': 'https://sqs.us-east-1.amazonaws.com/xxx/queue-3.fifo', 'access_key_id': 'e', 'secret_access_key': 'f', } } class SQSMessageMock: def __init__(self): """ Imitate the SQS Message from boto3. """ self.body = "" self.receipt_handle = "receipt_handle_xyz" class QueueMock: """ Hold information about a queue. """ def __init__(self, url, creation_attributes=None): self.url = url # arguments of boto3.sqs.create_queue self.creation_attributes = creation_attributes self.attributes = {'ApproximateNumberOfMessages': '0'} self.messages = [] def __repr__(self): return f'QueueMock: {self.url} {len(self.messages)} messages' class SQSClientMock: def __init__(self, QueueName='unittest_queue'): """ Imitate the SQS Client from boto3. """ self._receive_messages_calls = 0 # _queues doesn't exist on the real client, here for testing. self._queues = {} url = self.create_queue(QueueName=QueueName)['QueueUrl'] self.send_message(QueueUrl=url, MessageBody='hello') def _get_q(self, url): """ Helper method to quickly get a queue. """ for q in self._queues.values(): if q.url == url: return q raise Exception(f"Queue url {url} not found") def create_queue(self, QueueName=None, Attributes=None): q = self._queues[QueueName] = QueueMock( 'https://sqs.us-east-1.amazonaws.com/xxx/' + QueueName, Attributes, ) return {'QueueUrl': q.url} def list_queues(self, QueueNamePrefix=None): """ Return a list of queue urls """ urls = (val.url for key, val in self._queues.items() if key.startswith(QueueNamePrefix)) return {'QueueUrls': urls} def get_queue_url(self, QueueName=None): return self._queues[QueueName] def send_message(self, QueueUrl=None, MessageBody=None, MessageAttributes=None): for q in self._queues.values(): if q.url == QueueUrl: handle = ''.join(random.choice(string.ascii_lowercase) for x in range(10)) q.messages.append({'Body': MessageBody, 'ReceiptHandle': handle, 'MessageAttributes': MessageAttributes}) break def receive_message(self, QueueUrl=None, MaxNumberOfMessages=1, WaitTimeSeconds=10): self._receive_messages_calls += 1 for q in self._queues.values(): if q.url == QueueUrl: msgs = q.messages[:MaxNumberOfMessages] q.messages = q.messages[MaxNumberOfMessages:] return {'Messages': msgs} if msgs else {} def get_queue_attributes(self, QueueUrl=None, AttributeNames=None): if 'ApproximateNumberOfMessages' in AttributeNames: count = len(self._get_q(QueueUrl).messages) return {'Attributes': {'ApproximateNumberOfMessages': count}} def purge_queue(self, QueueUrl=None): for q in self._queues.values(): if q.url == QueueUrl: q.messages = [] def delete_queue(self, QueueUrl=None): queue_name = None for key, val in self._queues.items(): if val.url == QueueUrl: queue_name = key break if queue_name is None: raise Exception(f"Queue url {QueueUrl} not found") del self._queues[queue_name] class test_Channel: def handleMessageCallback(self, message): self.callback_message = message def setup_method(self): """Mock the back-end SQS classes""" # Sanity check... if SQS is None, then it did not import and we # cannot execute our tests. SQS.Channel._queue_cache.clear() # Common variables used in the unit tests self.queue_name = 'unittest' # Mock the sqs() method that returns an SQSConnection object and # instead return an SQSConnectionMock() object. sqs_conn_mock = SQSClientMock() self.sqs_conn_mock = sqs_conn_mock predefined_queues_sqs_conn_mocks = { 'queue-1': SQSClientMock(QueueName='queue-1'), 'queue-2': SQSClientMock(QueueName='queue-2'), 'queue-3.fifo': SQSClientMock(QueueName='queue-3.fifo') } def mock_sqs(): def sqs(self, queue=None): if queue in predefined_queues_sqs_conn_mocks: return predefined_queues_sqs_conn_mocks[queue] return sqs_conn_mock return sqs SQS.Channel.sqs = mock_sqs() # Set up a task exchange for passing tasks through the queue self.exchange = Exchange('test_SQS', type='direct') self.queue = Queue(self.queue_name, self.exchange, self.queue_name) # Mock up a test SQS Queue with the QueueMock class (and always # make sure its a clean empty queue) self.sqs_queue_mock = QueueMock('sqs://' + self.queue_name) # Now, create our Connection object with the SQS Transport and store # the connection/channel objects as references for use in these tests. self.connection = Connection(transport=SQS.Transport) self.channel = self.connection.channel() self.queue(self.channel).declare() self.producer = messaging.Producer(self.channel, self.exchange, routing_key=self.queue_name) # Lastly, make sure that we're set up to 'consume' this queue. self.channel.basic_consume(self.queue_name, no_ack=False, callback=self.handleMessageCallback, consumer_tag='unittest') def teardown_method(self): # Removes QoS reserved messages so we don't restore msgs on shutdown. try: qos = self.channel._qos except AttributeError: pass else: if qos: qos._dirty.clear() qos._delivered.clear() def test_init(self): """kombu.SQS.Channel instantiates correctly with mocked queues""" assert self.queue_name in self.channel._queue_cache def test_region(self): _environ = dict(os.environ) # when the region is unspecified connection = Connection(transport=SQS.Transport) channel = connection.channel() assert channel.transport_options.get('region') is None # the default region is us-east-1 assert channel.region == 'us-east-1' # when boto3 picks a region os.environ['AWS_DEFAULT_REGION'] = 'us-east-2' assert boto3.Session().region_name == 'us-east-2' # the default region should match connection = Connection(transport=SQS.Transport) channel = connection.channel() assert channel.region == 'us-east-2' # when transport_options are provided connection = Connection(transport=SQS.Transport, transport_options={ 'region': 'us-west-2' }) channel = connection.channel() assert channel.transport_options.get('region') == 'us-west-2' # the specified region should be used assert connection.channel().region == 'us-west-2' os.environ.clear() os.environ.update(_environ) def test_endpoint_url(self): url = 'sqs://@localhost:5493' self.connection = Connection(hostname=url, transport=SQS.Transport) self.channel = self.connection.channel() self.channel._sqs = None expected_endpoint_url = 'http://localhost:5493' assert self.channel.endpoint_url == expected_endpoint_url boto3_sqs = SQS_Channel_sqs.__get__(self.channel, SQS.Channel) assert boto3_sqs()._endpoint.host == expected_endpoint_url def test_none_hostname_persists(self): conn = Connection(hostname=None, transport=SQS.Transport) assert conn.hostname == conn.clone().hostname def test_entity_name(self): assert self.channel.entity_name('foo') == 'foo' assert self.channel.entity_name('foo.bar-baz*qux_quux') == \ 'foo-bar-baz_qux_quux' assert self.channel.entity_name('abcdef.fifo') == 'abcdef.fifo' def test_resolve_queue_url(self): queue_name = 'unittest_queue' assert self.sqs_conn_mock._queues[queue_name].url == \ self.channel._resolve_queue_url(queue_name) def test_new_queue(self): queue_name = 'new_unittest_queue' self.channel._new_queue(queue_name) assert queue_name in self.sqs_conn_mock._queues.keys() # For cleanup purposes, delete the queue and the queue file self.channel._delete(queue_name) def test_new_queue_custom_creation_attributes(self): self.connection.transport_options['sqs-creation-attributes'] = { 'KmsMasterKeyId': 'alias/aws/sqs', } queue_name = 'new_custom_attribute_queue' self.channel._new_queue(queue_name) assert queue_name in self.sqs_conn_mock._queues.keys() queue = self.sqs_conn_mock._queues[queue_name] assert 'KmsMasterKeyId' in queue.creation_attributes assert queue.creation_attributes['KmsMasterKeyId'] == 'alias/aws/sqs' # For cleanup purposes, delete the queue and the queue file self.channel._delete(queue_name) def test_botocore_config_override(self): expected_connect_timeout = 5 client_config = {'connect_timeout': expected_connect_timeout} self.connection = Connection( transport=SQS.Transport, transport_options={'client-config': client_config}, ) self.channel = self.connection.channel() self.channel._sqs = None boto3_sqs = SQS_Channel_sqs.__get__(self.channel, SQS.Channel) botocore_config = boto3_sqs()._client_config assert botocore_config.connect_timeout == expected_connect_timeout def test_dont_create_duplicate_new_queue(self): # All queue names start with "q", except "unittest_queue". # which is definitely out of cache when get_all_queues returns the # first 1000 queues sorted by name. queue_name = 'unittest_queue' # This should not create a new queue. self.channel._new_queue(queue_name) assert queue_name in self.sqs_conn_mock._queues.keys() queue = self.sqs_conn_mock._queues[queue_name] # The queue originally had 1 message in it. assert 1 == len(queue.messages) assert 'hello' == queue.messages[0]['Body'] def test_delete(self): queue_name = 'new_unittest_queue' self.channel._new_queue(queue_name) self.channel._delete(queue_name) assert queue_name not in self.channel._queue_cache assert queue_name not in self.sqs_conn_mock._queues def test_get_from_sqs(self): # Test getting a single message message = 'my test message' self.producer.publish(message) result = self.channel._get(self.queue_name) assert 'body' in result.keys() # Now test getting many messages for i in range(3): message = f'message: {i}' self.producer.publish(message) self.channel._get_bulk(self.queue_name, max_if_unlimited=3) assert len(self.sqs_conn_mock._queues[self.queue_name].messages) == 0 def test_get_with_empty_list(self): with pytest.raises(Empty): self.channel._get(self.queue_name) def test_get_bulk_raises_empty(self): with pytest.raises(Empty): self.channel._get_bulk(self.queue_name) def test_optional_b64_decode(self): raw = b'{"id": "4cc7438e-afd4-4f8f-a2f3-f46567e7ca77","task": "celery.task.PingTask",' \ b'"args": [],"kwargs": {},"retries": 0,"eta": "2009-11-17T12:30:56.527191"}' b64_enc = base64.b64encode(raw) assert self.channel._optional_b64_decode(b64_enc) == raw assert self.channel._optional_b64_decode(raw) == raw assert self.channel._optional_b64_decode(b"test123") == b"test123" def test_messages_to_python(self): from kombu.asynchronous.aws.sqs.message import Message kombu_message_count = 3 json_message_count = 3 # Create several test messages and publish them for i in range(kombu_message_count): message = 'message: %s' % i self.producer.publish(message) # json formatted message NOT created by kombu for i in range(json_message_count): message = {'foo': 'bar'} self.channel._put(self.producer.routing_key, message) q_url = self.channel._new_queue(self.queue_name) # Get the messages now kombu_messages = [] for m in self.sqs_conn_mock.receive_message( QueueUrl=q_url, MaxNumberOfMessages=kombu_message_count)['Messages']: m['Body'] = Message(body=m['Body']).decode() kombu_messages.append(m) json_messages = [] for m in self.sqs_conn_mock.receive_message( QueueUrl=q_url, MaxNumberOfMessages=json_message_count)['Messages']: m['Body'] = Message(body=m['Body']).decode() json_messages.append(m) # Now convert them to payloads kombu_payloads = self.channel._messages_to_python( kombu_messages, self.queue_name, ) json_payloads = self.channel._messages_to_python( json_messages, self.queue_name, ) # We got the same number of payloads back, right? assert len(kombu_payloads) == kombu_message_count assert len(json_payloads) == json_message_count # Make sure they're payload-style objects for p in kombu_payloads: assert 'properties' in p for p in json_payloads: assert 'properties' in p def test_put_and_get(self): message = 'my test message' self.producer.publish(message) results = self.queue(self.channel).get().payload assert message == results def test_redelivered(self): self.channel.sqs().change_message_visibility = \ Mock(name='change_message_visibility') message = { 'redelivered': True, 'properties': {'delivery_tag': 'test_message_id'} } self.channel._put(self.producer.routing_key, message) self.sqs_conn_mock.change_message_visibility.assert_called_once() def test_put_and_get_bulk(self): # With QoS.prefetch_count = 0 message = 'my test message' self.producer.publish(message) self.channel.connection._deliver = Mock(name='_deliver') self.channel._get_bulk(self.queue_name) self.channel.connection._deliver.assert_called_once() def test_puts_and_get_bulk(self): # Generate 8 messages message_count = 8 # Set the prefetch_count to 5 self.channel.qos.prefetch_count = 5 # Now, generate all the messages for i in range(message_count): message = 'message: %s' % i self.producer.publish(message) # Count how many messages are retrieved the first time. Should # be 5 (message_count). self.channel.connection._deliver = Mock(name='_deliver') self.channel._get_bulk(self.queue_name) assert self.channel.connection._deliver.call_count == 5 for i in range(5): self.channel.qos.append(Mock(name=f'message{i}'), i) # Now, do the get again, the number of messages returned should be 1. self.channel.connection._deliver.reset_mock() self.channel._get_bulk(self.queue_name) self.channel.connection._deliver.assert_called_once() # hub required for successful instantiation of AsyncSQSConnection @pytest.mark.usefixtures('hub') def test_get_async(self): """Basic coverage of async code typically used via: basic_consume > _loop1 > _schedule_queue > _get_bulk_async""" # Prepare for i in range(3): message = 'message: %s' % i self.producer.publish(message) # SQS.Channel.asynsqs constructs AsyncSQSConnection using self.sqs # which is already a mock thanks to `setup` above, we just need to # mock the async-specific methods (as test_AsyncSQSConnection does) async_sqs_conn = self.channel.asynsqs(self.queue_name) async_sqs_conn.get_list = Mock(name='X.get_list') # Call key method self.channel._get_bulk_async(self.queue_name) assert async_sqs_conn.get_list.call_count == 1 get_list_args = async_sqs_conn.get_list.call_args[0] get_list_kwargs = async_sqs_conn.get_list.call_args[1] assert get_list_args[0] == 'ReceiveMessage' assert get_list_args[1] == { 'MaxNumberOfMessages': SQS.SQS_MAX_MESSAGES, 'WaitTimeSeconds': self.channel.wait_time_seconds, } assert get_list_args[3] == \ self.channel.sqs().get_queue_url(self.queue_name).url assert get_list_kwargs['parent'] == self.queue_name assert get_list_kwargs['protocol_params'] == { 'json': {'AttributeNames': ['ApproximateReceiveCount']}, 'query': {'AttributeName.1': 'ApproximateReceiveCount'}, } def test_fetch_message_attributes(self): self.connection.transport_options['fetch_message_attributes'] = ["Attribute1", "Attribute2"] async_sqs_conn = self.channel.asynsqs(self.queue_name) assert async_sqs_conn.fetch_message_attributes == ['Attribute1', 'Attribute2'] def test_drain_events_with_empty_list(self): def mock_can_consume(): return False self.channel.qos.can_consume = mock_can_consume with pytest.raises(Empty): self.channel.drain_events() def test_drain_events_with_prefetch_5(self): # Generate 20 messages message_count = 20 prefetch_count = 5 current_delivery_tag = [1] # Set the prefetch_count to 5 self.channel.qos.prefetch_count = prefetch_count self.channel.connection._deliver = Mock(name='_deliver') def on_message_delivered(message, queue): current_delivery_tag[0] += 1 self.channel.qos.append(message, current_delivery_tag[0]) self.channel.connection._deliver.side_effect = on_message_delivered # Now, generate all the messages for i in range(message_count): self.producer.publish('message: %s' % i) # Now drain all the events for i in range(1000): try: self.channel.drain_events(timeout=0) except Empty: break else: assert False, 'disabled infinite loop' self.channel.qos._flush() assert len(self.channel.qos._delivered) == prefetch_count assert self.channel.connection._deliver.call_count == prefetch_count def test_drain_events_with_prefetch_none(self): # Generate 20 messages message_count = 20 expected_receive_messages_count = 3 current_delivery_tag = [1] # Set the prefetch_count to None self.channel.qos.prefetch_count = None self.channel.connection._deliver = Mock(name='_deliver') def on_message_delivered(message, queue): current_delivery_tag[0] += 1 self.channel.qos.append(message, current_delivery_tag[0]) self.channel.connection._deliver.side_effect = on_message_delivered # Now, generate all the messages for i in range(message_count): self.producer.publish('message: %s' % i) # Now drain all the events for i in range(1000): try: self.channel.drain_events(timeout=0) except Empty: break else: assert False, 'disabled infinite loop' assert self.channel.connection._deliver.call_count == message_count # How many times was the SQSConnectionMock receive_message method # called? assert (expected_receive_messages_count == self.sqs_conn_mock._receive_messages_calls) def test_basic_ack(self, ): """Test that basic_ack calls the delete_message properly""" message = { 'sqs_message': { 'ReceiptHandle': '1' }, 'sqs_queue': 'testing_queue' } mock_messages = Mock() mock_messages.delivery_info = message self.channel.qos.append(mock_messages, 1) self.channel.sqs().delete_message = Mock() self.channel.basic_ack(1) self.sqs_conn_mock.delete_message.assert_called_with( QueueUrl=message['sqs_queue'], ReceiptHandle=message['sqs_message']['ReceiptHandle'] ) assert {1} == self.channel.qos._dirty @patch('kombu.transport.virtual.base.Channel.basic_ack') @patch('kombu.transport.virtual.base.Channel.basic_reject') def test_basic_ack_with_mocked_channel_methods(self, basic_reject_mock, basic_ack_mock): """Test that basic_ack calls the delete_message properly""" message = { 'sqs_message': { 'ReceiptHandle': '1' }, 'sqs_queue': 'testing_queue' } mock_messages = Mock() mock_messages.delivery_info = message self.channel.qos.append(mock_messages, 1) self.channel.sqs().delete_message = Mock() self.channel.basic_ack(1) self.sqs_conn_mock.delete_message.assert_called_with( QueueUrl=message['sqs_queue'], ReceiptHandle=message['sqs_message']['ReceiptHandle'] ) basic_ack_mock.assert_called_with(1) assert not basic_reject_mock.called @patch('kombu.transport.virtual.base.Channel.basic_ack') @patch('kombu.transport.virtual.base.Channel.basic_reject') def test_basic_ack_without_sqs_message(self, basic_reject_mock, basic_ack_mock): """Test that basic_ack calls the delete_message properly""" message = { 'sqs_queue': 'testing_queue' } mock_messages = Mock() mock_messages.delivery_info = message self.channel.qos.append(mock_messages, 1) self.channel.sqs().delete_message = Mock() self.channel.basic_ack(1) assert not self.sqs_conn_mock.delete_message.called basic_ack_mock.assert_called_with(1) assert not basic_reject_mock.called @patch('kombu.transport.virtual.base.Channel.basic_ack') @patch('kombu.transport.virtual.base.Channel.basic_reject') def test_basic_ack_invalid_receipt_handle(self, basic_reject_mock, basic_ack_mock): """Test that basic_ack calls the delete_message properly""" message = { 'sqs_message': { 'ReceiptHandle': '2' }, 'sqs_queue': 'testing_queue' } error_response = { 'Error': { 'Code': 'InvalidParameterValue', 'Message': 'Value 2 for parameter ReceiptHandle is invalid.' ' Reason: The receipt handle has expired.' } } operation_name = 'DeleteMessage' mock_messages = Mock() mock_messages.delivery_info = message self.channel.qos.append(mock_messages, 2) self.channel.sqs().delete_message = Mock() self.channel.sqs().delete_message.side_effect = ClientError( error_response=error_response, operation_name=operation_name ) self.channel.basic_ack(2) self.sqs_conn_mock.delete_message.assert_called_with( QueueUrl=message['sqs_queue'], ReceiptHandle=message['sqs_message']['ReceiptHandle'] ) basic_reject_mock.assert_called_with(2) assert not basic_ack_mock.called @patch('kombu.transport.virtual.base.Channel.basic_ack') @patch('kombu.transport.virtual.base.Channel.basic_reject') def test_basic_ack_access_denied(self, basic_reject_mock, basic_ack_mock): """Test that basic_ack raises AccessDeniedQueueException when access is denied""" message = { 'sqs_message': { 'ReceiptHandle': '2' }, 'sqs_queue': 'testing_queue' } error_response = { 'Error': { 'Code': 'AccessDenied', 'Message': """An error occurred (AccessDenied) when calling the DeleteMessage operation.""" } } operation_name = 'DeleteMessage' mock_messages = Mock() mock_messages.delivery_info = message self.channel.qos.append(mock_messages, 2) self.channel.sqs().delete_message = Mock() self.channel.sqs().delete_message.side_effect = ClientError( error_response=error_response, operation_name=operation_name ) # Expecting the custom AccessDeniedQueueException to be raised with pytest.raises(SQS.AccessDeniedQueueException): self.channel.basic_ack(2) self.sqs_conn_mock.delete_message.assert_called_with( QueueUrl=message['sqs_queue'], ReceiptHandle=message['sqs_message']['ReceiptHandle'] ) assert not basic_reject_mock.called assert not basic_ack_mock.called def test_reject_when_no_predefined_queues(self): connection = Connection(transport=SQS.Transport, transport_options={}) channel = connection.channel() mock_apply_backoff_policy = Mock() channel.qos.apply_backoff_policy = mock_apply_backoff_policy queue_name = "queue-1" exchange = Exchange('test_SQS', type='direct') queue = Queue(queue_name, exchange, queue_name) queue(channel).declare() message_mock = Mock() message_mock.delivery_info = {'routing_key': queue_name} channel.qos._delivered['test_message_id'] = message_mock channel.qos.reject('test_message_id') mock_apply_backoff_policy.assert_not_called() def test_predefined_queues_primes_queue_cache(self): connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, }) channel = connection.channel() assert 'queue-1' in channel._queue_cache assert 'queue-2' in channel._queue_cache def test_predefined_queues_new_queue_raises_if_queue_not_exists(self): connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, }) channel = connection.channel() with pytest.raises(SQS.UndefinedQueueException): channel._new_queue('queue-99') def test_predefined_queues_get_from_sqs(self): connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, }) channel = connection.channel() def message_to_python(message, queue_name, queue): return message channel._message_to_python = Mock(side_effect=message_to_python) queue_name = "queue-1" exchange = Exchange('test_SQS', type='direct') p = messaging.Producer(channel, exchange, routing_key=queue_name) queue = Queue(queue_name, exchange, queue_name) queue(channel).declare() # Getting a single message p.publish('message') result = channel._get(queue_name) assert 'Body' in result.keys() # Getting many messages for i in range(3): p.publish(f'message: {i}') channel.connection._deliver = Mock(name='_deliver') channel._get_bulk(queue_name, max_if_unlimited=3) channel.connection._deliver.assert_called() assert len(channel.sqs(queue_name)._queues[queue_name].messages) == 0 def test_predefined_queues_backoff_policy(self): connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, }) channel = connection.channel() def apply_backoff_policy( queue_name, delivery_tag, retry_policy, backoff_tasks): return None mock_apply_policy = Mock(side_effect=apply_backoff_policy) channel.qos.apply_backoff_policy = mock_apply_policy queue_name = "queue-1" exchange = Exchange('test_SQS', type='direct') queue = Queue(queue_name, exchange, queue_name) queue(channel).declare() message_mock = Mock() message_mock.delivery_info = {'routing_key': queue_name} channel.qos._delivered['test_message_id'] = message_mock channel.qos.reject('test_message_id') mock_apply_policy.assert_called_once_with( 'queue-1', 'test_message_id', {1: 10, 2: 20, 3: 40, 4: 80, 5: 320, 6: 640}, ['svc.tasks.tasks.task1'] ) def test_predefined_queues_change_visibility_timeout(self): connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, }) channel = connection.channel() def extract_task_name_and_number_of_retries(delivery_tag): return 'svc.tasks.tasks.task1', 2 mock_extract_task_name_and_number_of_retries = Mock( side_effect=extract_task_name_and_number_of_retries) channel.qos.extract_task_name_and_number_of_retries = \ mock_extract_task_name_and_number_of_retries queue_name = "queue-1" exchange = Exchange('test_SQS', type='direct') queue = Queue(queue_name, exchange, queue_name) queue(channel).declare() message_mock = Mock() message_mock.delivery_info = {'routing_key': queue_name} channel.qos._delivered['test_message_id'] = message_mock channel.sqs = Mock() sqs_queue_mock = Mock() channel.sqs.return_value = sqs_queue_mock channel.qos.reject('test_message_id') sqs_queue_mock.change_message_visibility.assert_called_once_with( QueueUrl='https://sqs.us-east-1.amazonaws.com/xxx/queue-1', ReceiptHandle='test_message_id', VisibilityTimeout=20) def test_predefined_queues_put_to_fifo_queue(self): connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, }) channel = connection.channel() queue_name = 'queue-3.fifo' exchange = Exchange('test_SQS', type='direct') p = messaging.Producer(channel, exchange, routing_key=queue_name) queue = Queue(queue_name, exchange, queue_name) queue(channel).declare() channel.sqs = Mock() sqs_queue_mock = Mock() channel.sqs.return_value = sqs_queue_mock p.publish('message') sqs_queue_mock.send_message.assert_called_once() assert 'MessageGroupId' in sqs_queue_mock.send_message.call_args[1] assert 'MessageDeduplicationId' in \ sqs_queue_mock.send_message.call_args[1] def test_predefined_queues_put_to_queue(self): connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, }) channel = connection.channel() queue_name = 'queue-2' exchange = Exchange('test_SQS', type='direct') p = messaging.Producer(channel, exchange, routing_key=queue_name) queue = Queue(queue_name, exchange, queue_name) queue(channel).declare() channel.sqs = Mock() sqs_queue_mock = Mock() channel.sqs.return_value = sqs_queue_mock p.publish('message', DelaySeconds=10) sqs_queue_mock.send_message.assert_called_once() assert 'DelaySeconds' in sqs_queue_mock.send_message.call_args[1] assert sqs_queue_mock.send_message.call_args[1]['DelaySeconds'] == 10 @pytest.mark.parametrize('predefined_queues', ( { 'invalid-fifo-queue-name': { 'url': 'https://sqs.us-east-1.amazonaws.com/xxx/queue.fifo', 'access_key_id': 'a', 'secret_access_key': 'b' } }, { 'standard-queue.fifo': { 'url': 'https://sqs.us-east-1.amazonaws.com/xxx/queue', 'access_key_id': 'a', 'secret_access_key': 'b' } } )) def test_predefined_queues_invalid_configuration(self, predefined_queues): connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': predefined_queues, }) with pytest.raises(SQS.InvalidQueueException): connection.channel() def test_sts_new_session(self): # Arrange connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, 'sts_role_arn': 'test::arn' }) channel = connection.channel() sqs = SQS_Channel_sqs.__get__(channel, SQS.Channel) queue_name = 'queue-1' mock_generate_sts_session_token = Mock() mock_new_sqs_client = Mock() channel.new_sqs_client = mock_new_sqs_client mock_generate_sts_session_token.side_effect = [ { 'Expiration': 123, 'SessionToken': 123, 'AccessKeyId': 123, 'SecretAccessKey': 123 } ] channel.generate_sts_session_token = mock_generate_sts_session_token # Act sqs(queue=queue_name) # Assert mock_generate_sts_session_token.assert_called_once() def test_sts_session_expired(self): # Arrange connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, 'sts_role_arn': 'test::arn' }) channel = connection.channel() sqs = SQS_Channel_sqs.__get__(channel, SQS.Channel) channel.sts_expiration = datetime.utcnow() - timedelta(days=1) queue_name = 'queue-1' mock_generate_sts_session_token = Mock() mock_new_sqs_client = Mock() channel.new_sqs_client = mock_new_sqs_client mock_generate_sts_session_token.side_effect = [ { 'Expiration': 123, 'SessionToken': 123, 'AccessKeyId': 123, 'SecretAccessKey': 123 } ] channel.generate_sts_session_token = mock_generate_sts_session_token # Act sqs(queue=queue_name) # Assert mock_generate_sts_session_token.assert_called_once() def test_sts_session_not_expired(self): # Arrange connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, 'sts_role_arn': 'test::arn' }) channel = connection.channel() channel.sts_expiration = datetime.utcnow() + timedelta(days=1) queue_name = 'queue-1' mock_generate_sts_session_token = Mock() mock_new_sqs_client = Mock() channel.new_sqs_client = mock_new_sqs_client channel._predefined_queue_clients = {queue_name: 'mock_client'} mock_generate_sts_session_token.side_effect = [ { 'Expiration': 123, 'SessionToken': 123, 'AccessKeyId': 123, 'SecretAccessKey': 123 } ] channel.generate_sts_session_token = mock_generate_sts_session_token # Act channel.sqs(queue=queue_name) # Assert mock_generate_sts_session_token.assert_not_called() def test_sts_session_with_multiple_predefined_queues(self): connection = Connection(transport=SQS.Transport, transport_options={ 'predefined_queues': example_predefined_queues, 'sts_role_arn': 'test::arn' }) channel = connection.channel() sqs = SQS_Channel_sqs.__get__(channel, SQS.Channel) mock_generate_sts_session_token = Mock() mock_new_sqs_client = Mock() channel.new_sqs_client = mock_new_sqs_client mock_generate_sts_session_token.return_value = { 'Expiration': datetime.utcnow() + timedelta(days=1), 'SessionToken': 123, 'AccessKeyId': 123, 'SecretAccessKey': 123 } channel.generate_sts_session_token = mock_generate_sts_session_token # Act sqs(queue='queue-1') sqs(queue='queue-2') # Assert mock_generate_sts_session_token.assert_called() mock_new_sqs_client.assert_called() def test_message_attribute(self): message = 'my test message' self.producer.publish(message, message_attributes={ 'Attribute1': {'DataType': 'String', 'StringValue': 'STRING_VALUE'} } ) output_message = self.queue(self.channel).get() assert message == output_message.payload # It's not propagated to the properties assert 'message_attributes' not in output_message.properties kombu-5.5.3/t/unit/transport/test_azureservicebus.py000066400000000000000000000363641477772317200230060ustar00rootroot00000000000000from __future__ import annotations import base64 import json import random from collections import namedtuple from queue import Empty from unittest.mock import MagicMock, patch import pytest from kombu import Connection, Exchange, Queue, messaging pytest.importorskip('azure.servicebus') import azure.core.exceptions # noqa import azure.servicebus.exceptions # noqa from azure.servicebus import ServiceBusMessage, ServiceBusReceiveMode # noqa try: from azure.identity import (DefaultAzureCredential, ManagedIdentityCredential) except ImportError: DefaultAzureCredential = None ManagedIdentityCredential = None from kombu.transport import azureservicebus # noqa class ASBQueue: def __init__(self, kwargs): self.options = kwargs self.items = [] self.waiting_ack = [] self.send_calls = [] self.recv_calls = [] def get_receiver(self, kwargs): receive_mode = kwargs.get( 'receive_mode', ServiceBusReceiveMode.PEEK_LOCK) class Receiver: def close(self): pass def receive_messages(_self, **kwargs2): max_message_count = kwargs2.get('max_message_count', 1) result = [] if self.items: while self.items or len(result) > max_message_count: item = self.items.pop(0) if receive_mode is ServiceBusReceiveMode.PEEK_LOCK: self.waiting_ack.append(item) result.append(item) self.recv_calls.append({ 'receiver_options': kwargs, 'receive_messages_options': kwargs2, 'messages': result }) return result return Receiver() def get_sender(self): class Sender: def close(self): pass def send_messages(_self, msg): self.send_calls.append(msg) self.items.append(msg) return Sender() class ASBMock: def __init__(self): self.queues = {} def get_queue_receiver(self, queue_name, **kwargs): return self.queues[queue_name].get_receiver(kwargs) def get_queue_sender(self, queue_name, **kwargs): return self.queues[queue_name].get_sender() class ASBMgmtMock: def __init__(self, queues): self.queues = queues def create_queue(self, queue_name, **kwargs): if queue_name in self.queues: raise azure.core.exceptions.ResourceExistsError() self.queues[queue_name] = ASBQueue(kwargs) def delete_queue(self, queue_name): self.queues.pop(queue_name, None) def get_queue_runtime_properties(self, queue_name): count = len(self.queues[queue_name].items) mock = MagicMock() mock.total_message_count = count return mock URL_NOCREDS = 'azureservicebus://' URL_CREDS_SAS = 'azureservicebus://policyname:ke/y@hostname' URL_CREDS_SAS_FQ = 'azureservicebus://policyname:ke/y@hostname.servicebus.windows.net' URL_CREDS_DA = 'azureservicebus://DefaultAzureCredential@hostname' URL_CREDS_DA_FQ = 'azureservicebus://DefaultAzureCredential@hostname.servicebus.windows.net' # noqa URL_CREDS_MI = 'azureservicebus://ManagedIdentityCredential@hostname' URL_CREDS_MI_FQ = 'azureservicebus://ManagedIdentityCredential@hostname.servicebus.windows.net' # noqa def test_queue_service_nocredentials(): conn = Connection(URL_NOCREDS, transport=azureservicebus.Transport) with pytest.raises(ValueError) as exc: conn.channel() assert exc == 'Need an URI like azureservicebus://{SAS policy name}:{SAS key}@{ServiceBus Namespace}' # noqa def test_queue_service_sas(): # Test getting queue service without credentials conn = Connection(URL_CREDS_SAS, transport=azureservicebus.Transport) with patch('kombu.transport.azureservicebus.ServiceBusClient') as m: channel = conn.channel() # Check the SAS token "ke/y" has been parsed from the url correctly assert channel._sas_key == 'ke/y' m.from_connection_string.return_value = 'test' # Remove queue service to get from service bus again channel._queue_service = None assert channel.queue_service == 'test' assert m.from_connection_string.call_count == 1 # Ensure that queue_service is cached assert channel.queue_service == 'test' assert m.from_connection_string.call_count == 1 assert channel._namespace == 'hostname.servicebus.windows.net' def test_queue_service_da(): conn = Connection(URL_CREDS_DA, transport=azureservicebus.Transport) channel = conn.channel() # Check the DefaultAzureCredential has been parsed from the url correctly # and the credential is a ManagedIdentityCredential assert isinstance(channel._credential, DefaultAzureCredential) assert channel._namespace == 'hostname.servicebus.windows.net' def test_queue_service_mi(): conn = Connection(URL_CREDS_MI, transport=azureservicebus.Transport) channel = conn.channel() # Check the ManagedIdentityCredential has been parsed from the url # correctly and the credential is a ManagedIdentityCredential assert isinstance(channel._credential, ManagedIdentityCredential) assert channel._namespace == 'hostname.servicebus.windows.net' def test_conninfo(): conn = Connection(URL_CREDS_SAS, transport=azureservicebus.Transport) channel = conn.channel() assert channel.conninfo is conn def test_transport_type(): conn = Connection(URL_CREDS_SAS, transport=azureservicebus.Transport) channel = conn.channel() assert not channel.transport_options def test_default_wait_timeout_seconds(): conn = Connection(URL_CREDS_SAS, transport=azureservicebus.Transport) channel = conn.channel() assert channel.wait_time_seconds == \ azureservicebus.Channel.default_wait_time_seconds def test_custom_wait_timeout_seconds(): conn = Connection( URL_CREDS_SAS, transport=azureservicebus.Transport, transport_options={'wait_time_seconds': 10} ) channel = conn.channel() assert channel.wait_time_seconds == 10 def test_default_peek_lock_seconds(): conn = Connection(URL_CREDS_SAS, transport=azureservicebus.Transport) channel = conn.channel() assert channel.peek_lock_seconds == \ azureservicebus.Channel.default_peek_lock_seconds def test_custom_peek_lock_seconds(): conn = Connection(URL_CREDS_SAS, transport=azureservicebus.Transport, transport_options={'peek_lock_seconds': 65}) channel = conn.channel() assert channel.peek_lock_seconds == 65 def test_invalid_peek_lock_seconds(): # Max is 300 conn = Connection(URL_CREDS_SAS, transport=azureservicebus.Transport, transport_options={'peek_lock_seconds': 900}) channel = conn.channel() assert channel.peek_lock_seconds == 300 @pytest.fixture def random_queue(): return f'azureservicebus_queue_{random.randint(1000, 9999)}' @pytest.fixture def mock_asb(): return ASBMock() @pytest.fixture def mock_asb_management(mock_asb): return ASBMgmtMock(queues=mock_asb.queues) MockQueue = namedtuple( 'MockQueue', ['queue_name', 'asb', 'asb_mgmt', 'conn', 'channel', 'producer', 'queue'] ) @pytest.fixture(autouse=True) def sbac_class_patch(): with patch('kombu.transport.azureservicebus.ServiceBusAdministrationClient') as sbac: # noqa yield sbac @pytest.fixture(autouse=True) def sbc_class_patch(): with patch('kombu.transport.azureservicebus.ServiceBusClient') as sbc: # noqa yield sbc @pytest.fixture(autouse=True) def mock_clients( sbc_class_patch, sbac_class_patch, mock_asb, mock_asb_management ): sbc_class_patch.from_connection_string.return_value = mock_asb sbac_class_patch.from_connection_string.return_value = mock_asb_management @pytest.fixture def mock_queue(mock_asb, mock_asb_management, random_queue) -> MockQueue: exchange = Exchange('test_servicebus', type='direct') queue = Queue(random_queue, exchange, random_queue) conn = Connection(URL_CREDS_SAS, transport=azureservicebus.Transport) channel = conn.channel() queue(channel).declare() producer = messaging.Producer(channel, exchange, routing_key=random_queue) return MockQueue( random_queue, mock_asb, mock_asb_management, conn, channel, producer, queue ) def test_basic_put_get(mock_queue: MockQueue): text_message = "test message" # This ends up hitting channel._put mock_queue.producer.publish(text_message) assert len(mock_queue.asb.queues[mock_queue.queue_name].items) == 1 azure_msg = mock_queue.asb.queues[mock_queue.queue_name].items[0] assert isinstance(azure_msg, ServiceBusMessage) message = mock_queue.channel._get(mock_queue.queue_name) azure_msg_decoded = json.loads(str(azure_msg)) assert message['body'] == azure_msg_decoded['body'] # Check the message has been annotated with the azure message object # which is used to ack later assert message['properties']['delivery_info']['azure_message'] is azure_msg assert base64.b64decode(message['body']).decode() == text_message # Ack is on by default, check an ack is waiting assert len(mock_queue.asb.queues[mock_queue.queue_name].waiting_ack) == 1 def test_empty_queue_get(mock_queue: MockQueue): with pytest.raises(Empty): mock_queue.channel._get(mock_queue.queue_name) def test_delete_empty_queue(mock_queue: MockQueue): chan = mock_queue.channel queue_name = f'random_queue_{random.randint(1000, 9999)}' chan._new_queue(queue_name) assert queue_name in chan._queue_cache chan._delete(queue_name) assert queue_name not in chan._queue_cache def test_delete_populated_queue(mock_queue: MockQueue): mock_queue.producer.publish('test1234') mock_queue.channel._delete(mock_queue.queue_name) assert mock_queue.queue_name not in mock_queue.channel._queue_cache def test_purge(mock_queue: MockQueue): mock_queue.producer.publish('test1234') mock_queue.producer.publish('test1234') mock_queue.producer.publish('test1234') mock_queue.producer.publish('test1234') size = mock_queue.channel._size(mock_queue.queue_name) assert size == 4 assert mock_queue.channel._purge(mock_queue.queue_name) == 4 size = mock_queue.channel._size(mock_queue.queue_name) assert size == 0 assert len(mock_queue.asb.queues[mock_queue.queue_name].waiting_ack) == 0 def test_custom_queue_name_prefix(): conn = Connection( URL_CREDS_SAS, transport=azureservicebus.Transport, transport_options={'queue_name_prefix': 'test-queue'} ) channel = conn.channel() assert channel.queue_name_prefix == 'test-queue' def test_custom_entity_name(): conn = Connection(URL_CREDS_SAS, transport=azureservicebus.Transport) channel = conn.channel() # dashes allowed and dots replaced by dashes assert channel.entity_name('test-celery') == 'test-celery' assert channel.entity_name('test.celery') == 'test-celery' # all other punctuations replaced by underscores assert channel.entity_name('test_celery') == 'test_celery' assert channel.entity_name('test:celery') == 'test_celery' assert channel.entity_name('test+celery') == 'test_celery' def test_basic_ack_complete_message(mock_queue: MockQueue): mock_queue.producer.publish("test message") message = mock_queue.channel._get(mock_queue.queue_name) mock_queue.channel.qos.get = MagicMock( return_value=mock_queue.channel.Message( message, mock_queue.channel ) ) receiver_mock = MagicMock() receiver_mock.complete_message = MagicMock(return_value=None) queue_object_mock = MagicMock() queue_object_mock.receiver = receiver_mock mock_queue.channel._get_asb_receiver = MagicMock( return_value=queue_object_mock) with patch( 'kombu.transport.virtual.base.Channel.basic_ack' ) as super_basic_ack: mock_queue.channel.basic_ack("test_delivery_tag") assert mock_queue.channel.qos.get.call_count == 1 assert mock_queue.channel._get_asb_receiver.call_count == 1 assert queue_object_mock.receiver.complete_message.call_count == 1 assert super_basic_ack.call_count == 1 def test_basic_ack_when_already_settled(mock_queue: MockQueue): mock_queue.producer.publish("test message") message = mock_queue.channel._get(mock_queue.queue_name) mock_queue.channel.qos.get = MagicMock( return_value=mock_queue.channel.Message( message, mock_queue.channel ) ) receiver_mock = MagicMock() receiver_mock.complete_message = MagicMock( side_effect=azure.servicebus.exceptions.MessageAlreadySettled()) queue_object_mock = MagicMock() queue_object_mock.receiver = receiver_mock mock_queue.channel._get_asb_receiver = MagicMock( return_value=queue_object_mock) with patch( 'kombu.transport.virtual.base.Channel.basic_ack' ) as super_basic_ack: mock_queue.channel.basic_ack("test_delivery_tag") assert mock_queue.channel.qos.get.call_count == 1 assert mock_queue.channel._get_asb_receiver.call_count == 1 assert queue_object_mock.receiver.complete_message.call_count == 1 assert super_basic_ack.call_count == 1 def test_basic_ack_when_qos_raises_keyerror(mock_queue: MockQueue): """Test that basic_ack calls super method when keyerror""" mock_queue.channel.qos.get = MagicMock(side_effect=KeyError()) with patch( 'kombu.transport.virtual.base.Channel.basic_ack' ) as super_basic_ack: mock_queue.channel.basic_ack("invented_delivery_tag") assert super_basic_ack.call_count == 1 assert mock_queue.channel.qos.get.call_count == 1 def test_basic_ack_reject_message_when_raises_exception( mock_queue: MockQueue ): mock_queue.producer.publish("test message") message = mock_queue.channel._get(mock_queue.queue_name) mock_queue.channel.qos.get = MagicMock( return_value=mock_queue.channel.Message( message, mock_queue.channel ) ) receiver_mock = MagicMock() receiver_mock.complete_message = MagicMock(side_effect=Exception()) queue_object_mock = MagicMock() queue_object_mock.receiver = receiver_mock mock_queue.channel._get_asb_receiver = MagicMock( return_value=queue_object_mock) with patch( 'kombu.transport.virtual.base.Channel.basic_reject' ) as super_basic_reject: mock_queue.channel.basic_ack("test_delivery_tag") assert mock_queue.channel.qos.get.call_count == 1 assert mock_queue.channel._get_asb_receiver.call_count == 1 assert queue_object_mock.receiver.complete_message.call_count == 1 assert super_basic_reject.call_count == 1 def test_returning_sas(): conn = Connection(URL_CREDS_SAS, transport=azureservicebus.Transport) assert conn.as_uri(True) == URL_CREDS_SAS_FQ def test_returning_da(): conn = Connection(URL_CREDS_DA, transport=azureservicebus.Transport) assert conn.as_uri(True) == URL_CREDS_DA_FQ def test_returning_mi(): conn = Connection(URL_CREDS_MI, transport=azureservicebus.Transport) assert conn.as_uri(True) == URL_CREDS_MI_FQ kombu-5.5.3/t/unit/transport/test_azurestoragequeues.py000066400000000000000000000065761477772317200235320ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import patch import pytest from azure.identity import DefaultAzureCredential, ManagedIdentityCredential from kombu import Connection pytest.importorskip('azure.storage.queue') from kombu.transport import azurestoragequeues # noqa URL_NOCREDS = 'azurestoragequeues://' URL_CREDS = 'azurestoragequeues://sas/key%@https://STORAGE_ACCOUNT_NAME.queue.core.windows.net/' # noqa AZURITE_CREDS = 'azurestoragequeues://Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==@http://localhost:10001/devstoreaccount1' # noqa AZURITE_CREDS_DOCKER_COMPOSE = 'azurestoragequeues://Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==@http://azurite:10001/devstoreaccount1' # noqa DEFAULT_AZURE_URL_CREDS = 'azurestoragequeues://DefaultAzureCredential@https://STORAGE_ACCOUNT_NAME.queue.core.windows.net/' # noqa MANAGED_IDENTITY_URL_CREDS = 'azurestoragequeues://ManagedIdentityCredential@https://STORAGE_ACCOUNT_NAME.queue.core.windows.net/' # noqa def test_queue_service_nocredentials(): conn = Connection(URL_NOCREDS, transport=azurestoragequeues.Transport) with pytest.raises( ValueError, match='Need a URI like azurestoragequeues://{SAS or access key}@{URL}' ): conn.channel() def test_queue_service(): # Test getting queue service without credentials conn = Connection(URL_CREDS, transport=azurestoragequeues.Transport) with patch('kombu.transport.azurestoragequeues.QueueServiceClient'): channel = conn.channel() # Check the SAS token "sas/key%" has been parsed from the url correctly assert channel._credential == 'sas/key%' assert channel._url == 'https://STORAGE_ACCOUNT_NAME.queue.core.windows.net/' # noqa @pytest.mark.parametrize( "creds, hostname", [ (AZURITE_CREDS, 'localhost'), (AZURITE_CREDS_DOCKER_COMPOSE, 'azurite'), ] ) def test_queue_service_works_for_azurite(creds, hostname): conn = Connection(creds, transport=azurestoragequeues.Transport) with patch('kombu.transport.azurestoragequeues.QueueServiceClient'): channel = conn.channel() assert channel._credential == { 'account_name': 'devstoreaccount1', 'account_key': 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==' # noqa } assert channel._url == f'http://{hostname}:10001/devstoreaccount1' # noqa def test_queue_service_works_for_default_azure_credentials(): conn = Connection( DEFAULT_AZURE_URL_CREDS, transport=azurestoragequeues.Transport ) with patch("kombu.transport.azurestoragequeues.QueueServiceClient"): channel = conn.channel() assert isinstance(channel._credential, DefaultAzureCredential) assert ( channel._url == "https://STORAGE_ACCOUNT_NAME.queue.core.windows.net/" ) def test_queue_service_works_for_managed_identity_credentials(): conn = Connection( MANAGED_IDENTITY_URL_CREDS, transport=azurestoragequeues.Transport ) with patch("kombu.transport.azurestoragequeues.QueueServiceClient"): channel = conn.channel() assert isinstance(channel._credential, ManagedIdentityCredential) assert ( channel._url == "https://STORAGE_ACCOUNT_NAME.queue.core.windows.net/" ) kombu-5.5.3/t/unit/transport/test_base.py000066400000000000000000000127771477772317200205010ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock import pytest from kombu import Connection, Consumer, Exchange, Producer, Queue from kombu.message import Message from kombu.transport.base import (Management, StdChannel, Transport, to_rabbitmq_queue_arguments) @pytest.mark.parametrize('args,input,expected', [ ({}, {'message_ttl': 20}, {'x-message-ttl': 20000}), ({}, {'message_ttl': None}, {}), ({'foo': 'bar'}, {'expires': 30.3}, {'x-expires': 30300, 'foo': 'bar'}), ({'x-expires': 3}, {'expires': 4}, {'x-expires': 4000}), ({}, {'max_length': 10}, {'x-max-length': 10}), ({}, {'max_length_bytes': 1033}, {'x-max-length-bytes': 1033}), ({}, {'max_priority': 303}, {'x-max-priority': 303}), ]) def test_rabbitmq_queue_arguments(args, input, expected): assert to_rabbitmq_queue_arguments(args, **input) == expected class test_StdChannel: def setup_method(self): self.conn = Connection('memory://') self.channel = self.conn.channel() self.channel.queues.clear() self.conn.connection.state.clear() def test_Consumer(self): q = Queue('foo', Exchange('foo')) cons = self.channel.Consumer(q) assert isinstance(cons, Consumer) assert cons.channel is self.channel def test_Producer(self): prod = self.channel.Producer() assert isinstance(prod, Producer) assert prod.channel is self.channel def test_interface_get_bindings(self): with pytest.raises(NotImplementedError): StdChannel().get_bindings() def test_interface_after_reply_message_received(self): assert StdChannel().after_reply_message_received(Queue('foo')) is None class test_Message: def setup_method(self): self.conn = Connection('memory://') self.channel = self.conn.channel() self.message = Message(channel=self.channel, delivery_tag=313) def test_postencode(self): m = Message('FOO', channel=self.channel, postencode='ccyzz') with pytest.raises(LookupError): m._reraise_error() m.ack() def test_ack_respects_no_ack_consumers(self): self.channel.no_ack_consumers = {'abc'} self.message.delivery_info['consumer_tag'] = 'abc' ack = self.channel.basic_ack = Mock() self.message.ack() assert self.message._state != 'ACK' ack.assert_not_called() def test_ack_missing_consumer_tag(self): self.channel.no_ack_consumers = {'abc'} self.message.delivery_info = {} ack = self.channel.basic_ack = Mock() self.message.ack() ack.assert_called_with(self.message.delivery_tag, multiple=False) def test_ack_not_no_ack(self): self.channel.no_ack_consumers = set() self.message.delivery_info['consumer_tag'] = 'abc' ack = self.channel.basic_ack = Mock() self.message.ack() ack.assert_called_with(self.message.delivery_tag, multiple=False) def test_ack_log_error_when_no_error(self): ack = self.message.ack = Mock() self.message.ack_log_error(Mock(), KeyError) ack.assert_called_with(multiple=False) def test_ack_log_error_when_error(self): ack = self.message.ack = Mock() ack.side_effect = KeyError('foo') logger = Mock() self.message.ack_log_error(logger, KeyError) ack.assert_called_with(multiple=False) logger.critical.assert_called() assert "Couldn't ack" in logger.critical.call_args[0][0] def test_reject_log_error_when_no_error(self): reject = self.message.reject = Mock() self.message.reject_log_error(Mock(), KeyError, requeue=True) reject.assert_called_with(requeue=True) def test_reject_log_error_when_error(self): reject = self.message.reject = Mock() reject.side_effect = KeyError('foo') logger = Mock() self.message.reject_log_error(logger, KeyError) reject.assert_called_with(requeue=False) logger.critical.assert_called() assert "Couldn't reject" in logger.critical.call_args[0][0] class test_interface: def test_establish_connection(self): with pytest.raises(NotImplementedError): Transport(None).establish_connection() def test_close_connection(self): with pytest.raises(NotImplementedError): Transport(None).close_connection(None) def test_create_channel(self): with pytest.raises(NotImplementedError): Transport(None).create_channel(None) def test_close_channel(self): with pytest.raises(NotImplementedError): Transport(None).close_channel(None) def test_drain_events(self): with pytest.raises(NotImplementedError): Transport(None).drain_events(None) def test_heartbeat_check(self): Transport(None).heartbeat_check(Mock(name='connection')) def test_driver_version(self): assert Transport(None).driver_version() def test_register_with_event_loop(self): Transport(None).register_with_event_loop( Mock(name='connection'), Mock(name='loop'), ) def test_unregister_from_event_loop(self): Transport(None).unregister_from_event_loop( Mock(name='connection'), Mock(name='loop'), ) def test_manager(self): assert Transport(None).manager class test_Management: def test_get_bindings(self): m = Management(Mock(name='transport')) with pytest.raises(NotImplementedError): m.get_bindings() kombu-5.5.3/t/unit/transport/test_consul.py000066400000000000000000000051711477772317200210600ustar00rootroot00000000000000from __future__ import annotations from array import array from queue import Empty from unittest.mock import Mock import pytest from kombu.transport.consul import Channel, Transport pytest.importorskip('consul') class test_Consul: def setup_method(self): self.connection = Mock() self.connection._used_channel_ids = array('H') self.connection.channel_max = 65535 self.connection.client.transport_options = {} self.connection.client.port = 303 self.consul = self.patching('consul.Consul').return_value self.channel = Channel(connection=self.connection) def test_driver_version(self): assert Transport(self.connection.client).driver_version() def test_failed_get(self): self.channel._acquire_lock = Mock(return_value=False) self.channel.client.kv.get.return_value = (1, None) with pytest.raises(Empty): self.channel._get('empty')() def test_test_purge(self): self.channel._destroy_session = Mock(return_value=True) self.consul.kv.delete = Mock(return_value=True) assert self.channel._purge('foo') def test_variables(self): assert self.channel.session_ttl == 30 assert self.channel.timeout == '10s' def test_lock_key(self): key = self.channel._lock_key('myqueue') assert key == 'kombu/myqueue.lock' def test_key_prefix(self): key = self.channel._key_prefix('myqueue') assert key == 'kombu/myqueue' def test_get_or_create_session(self): queue = 'myqueue' session_id = '123456' self.consul.session.create.return_value = session_id assert self.channel._get_or_create_session(queue) == session_id def test_create_delete_queue(self): queue = 'mynewqueue' self.consul.kv.put.return_value = True assert self.channel._new_queue(queue) self.consul.kv.delete.return_value = True self.channel._destroy_session = Mock() self.channel._delete(queue) def test_size(self): self.consul.kv.get.return_value = [(1, {}), (2, {})] assert self.channel._size('q') == 2 def test_get(self): self.channel._obtain_lock = Mock(return_value=True) self.channel._release_lock = Mock(return_value=True) self.consul.kv.get.return_value = [1, [ {'Key': 'myqueue', 'ModifyIndex': 1, 'Value': '1'}, ]] self.consul.kv.delete.return_value = True assert self.channel._get('myqueue') is not None def test_put(self): self.consul.kv.put.return_value = True assert self.channel._put('myqueue', 'mydata') is None kombu-5.5.3/t/unit/transport/test_etcd.py000066400000000000000000000044511477772317200204740ustar00rootroot00000000000000from __future__ import annotations from array import array from queue import Empty from unittest.mock import Mock, patch import pytest from kombu.transport.etcd import Channel, Transport pytest.importorskip('etcd') class test_Etcd: def setup_method(self): self.connection = Mock() self.connection._used_channel_ids = array('H') self.connection.channel_max = 65535 self.connection.client.transport_options = {} self.connection.client.port = 2739 self.client = self.patching('etcd.Client').return_value self.channel = Channel(connection=self.connection) def test_driver_version(self): assert Transport(self.connection.client).driver_version() def test_failed_get(self): self.channel._acquire_lock = Mock(return_value=False) self.channel.client.read.side_effect = IndexError with patch('etcd.Lock'): with pytest.raises(Empty): self.channel._get('empty')() def test_test_purge(self): with patch('etcd.Lock'): self.client.delete = Mock(return_value=True) assert self.channel._purge('foo') def test_key_prefix(self): key = self.channel._key_prefix('myqueue') assert key == 'kombu/myqueue' def test_create_delete_queue(self): queue = 'mynewqueue' with patch('etcd.Lock'): self.client.write.return_value = self.patching('etcd.EtcdResult') assert self.channel._new_queue(queue) self.client.delete.return_value = self.patching('etcd.EtcdResult') self.channel._delete(queue) def test_size(self): with patch('etcd.Lock'): self.client.read.return_value = self.patching( 'etcd.EtcdResult', _children=[{}, {}]) assert self.channel._size('q') == 2 def test_get(self): with patch('etcd.Lock'): self.client.read.return_value = self.patching( 'etcd.EtcdResult', _children=[{'key': 'myqueue', 'modifyIndex': 1, 'value': '1'}]) assert self.channel._get('myqueue') is not None def test_put(self): with patch('etcd.Lock'): self.client.write.return_value = self.patching('etcd.EtcdResult') assert self.channel._put('myqueue', 'mydata') is None kombu-5.5.3/t/unit/transport/test_filesystem.py000066400000000000000000000246721477772317200217500ustar00rootroot00000000000000from __future__ import annotations import tempfile from queue import Empty from unittest.mock import call, patch import pytest import t.skip from kombu import Connection, Consumer, Exchange, Producer, Queue @t.skip.if_win32 class test_FilesystemTransport: def setup_method(self): self.channels = set() try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() except Exception: pytest.skip('filesystem transport: cannot create tempfiles') self.c = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_in, 'data_folder_out': data_folder_out, }) self.channels.add(self.c.default_channel) self.p = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_out, 'data_folder_out': data_folder_in, }) self.channels.add(self.p.default_channel) self.e = Exchange('test_transport_filesystem') self.q = Queue('test_transport_filesystem', exchange=self.e, routing_key='test_transport_filesystem') self.q2 = Queue('test_transport_filesystem2', exchange=self.e, routing_key='test_transport_filesystem2') def teardown_method(self): # make sure we don't attempt to restore messages at shutdown. for channel in self.channels: try: channel._qos._dirty.clear() except AttributeError: pass try: channel._qos._delivered.clear() except AttributeError: pass def _add_channel(self, channel): self.channels.add(channel) return channel def test_produce_consume_noack(self): producer = Producer(self._add_channel(self.p.channel()), self.e) consumer = Consumer(self._add_channel(self.c.channel()), self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() assert len(_received) == 10 def test_produce_consume(self): producer_channel = self._add_channel(self.p.channel()) consumer_channel = self._add_channel(self.c.channel()) producer = Producer(producer_channel, self.e) consumer1 = Consumer(consumer_channel, self.q) consumer2 = Consumer(consumer_channel, self.q2) self.q2(consumer_channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: if len(_received1) + len(_received2) == 20: break self.c.drain_events() assert len(_received1) + len(_received2) == 20 # compression producer.publish({'compressed': True}, routing_key='test_transport_filesystem', compression='zlib') m = self.q(consumer_channel).get() assert m.payload == {'compressed': True} # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') assert self.q(consumer_channel).get() self.q(consumer_channel).delete() self.q(consumer_channel).declare() assert self.q(consumer_channel).get() is None # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem2') assert self.q2(consumer_channel).get() self.q2(consumer_channel).purge() assert self.q2(consumer_channel).get() is None @t.skip.if_win32 class test_FilesystemFanout: def setup_method(self): try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() control_folder = tempfile.mkdtemp() except Exception: pytest.skip("filesystem transport: cannot create tempfiles") self.consumer_connection = Connection( transport="filesystem", transport_options={ "data_folder_in": data_folder_in, "data_folder_out": data_folder_out, "control_folder": control_folder, }, ) self.consume_channel = self.consumer_connection.channel() self.produce_connection = Connection( transport="filesystem", transport_options={ "data_folder_in": data_folder_out, "data_folder_out": data_folder_in, "control_folder": control_folder, }, ) self.producer_channel = self.produce_connection.channel() self.exchange = Exchange("filesystem_exchange_fanout", type="fanout") self.q1 = Queue("queue1", exchange=self.exchange) self.q2 = Queue("queue2", exchange=self.exchange) def teardown_method(self): # make sure we don't attempt to restore messages at shutdown. for channel in [self.producer_channel, self.consumer_connection]: try: channel._qos._dirty.clear() except AttributeError: pass try: channel._qos._delivered.clear() except AttributeError: pass def test_produce_consume(self): producer = Producer(self.producer_channel, self.exchange) consumer1 = Consumer(self.consume_channel, self.q1) consumer2 = Consumer(self.consume_channel, self.q2) self.q2(self.consume_channel).declare() for i in range(10): producer.publish({"foo": i}) _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: try: self.consume_channel.drain_events() except Empty: break assert len(_received1) + len(_received2) == 20 # queue.delete for i in range(10): producer.publish({"foo": i}) assert self.q1(self.consume_channel).get() self.q1(self.consume_channel).delete() self.q1(self.consume_channel).declare() assert self.q1(self.consume_channel).get() is None # queue.purge assert self.q2(self.consume_channel).get() self.q2(self.consume_channel).purge() assert self.q2(self.consume_channel).get() is None @t.skip.if_win32 class test_FilesystemLock: def setup_method(self): try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() control_folder = tempfile.mkdtemp() except Exception: pytest.skip("filesystem transport: cannot create tempfiles") self.consumer_connection = Connection( transport="filesystem", transport_options={ "data_folder_in": data_folder_in, "data_folder_out": data_folder_out, "control_folder": control_folder, }, ) self.consume_channel = self.consumer_connection.channel() self.produce_connection = Connection( transport="filesystem", transport_options={ "data_folder_in": data_folder_out, "data_folder_out": data_folder_in, "control_folder": control_folder, }, ) self.producer_channel = self.produce_connection.channel() self.exchange = Exchange("filesystem_exchange_lock", type="fanout") self.q = Queue("queue1", exchange=self.exchange) def teardown_method(self): # make sure we don't attempt to restore messages at shutdown. for channel in [self.producer_channel, self.consumer_connection]: try: channel._qos._dirty.clear() except AttributeError: pass try: channel._qos._delivered.clear() except AttributeError: pass def test_lock_during_process(self): pytest.importorskip('fcntl') from fcntl import LOCK_EX, LOCK_SH producer = Producer(self.producer_channel, self.exchange) with patch("kombu.transport.filesystem.lock") as lock_m, patch( "kombu.transport.filesystem.unlock" ) as unlock_m: Consumer(self.consume_channel, self.q) assert unlock_m.call_count == 1 lock_m.assert_called_once_with(unlock_m.call_args[0][0], LOCK_EX) self.q(self.consume_channel).declare() with patch("kombu.transport.filesystem.lock") as lock_m, patch( "kombu.transport.filesystem.unlock" ) as unlock_m: producer.publish({"foo": 1}) assert unlock_m.call_count == 2 assert lock_m.call_count == 2 exchange_file_obj = unlock_m.call_args_list[0][0][0] msg_file_obj = unlock_m.call_args_list[1][0][0] assert lock_m.call_args_list == [call(exchange_file_obj, LOCK_SH), call(msg_file_obj, LOCK_EX)] kombu-5.5.3/t/unit/transport/test_gcpubsub.py000066400000000000000000000720321477772317200213670ustar00rootroot00000000000000from __future__ import annotations from concurrent.futures import Future from datetime import datetime from queue import Empty from unittest.mock import MagicMock, call, patch import pytest from _socket import timeout as socket_timeout from google.api_core.exceptions import (AlreadyExists, DeadlineExceeded, PermissionDenied) from google.pubsub_v1.types.pubsub import Subscription from kombu.transport.gcpubsub import (AtomicCounter, Channel, QueueDescriptor, Transport, UnackedIds) class test_UnackedIds: def setup_method(self): self.unacked_ids = UnackedIds() def test_append(self): self.unacked_ids.append('test_id') assert self.unacked_ids[0] == 'test_id' def test_extend(self): self.unacked_ids.extend(['test_id1', 'test_id2']) assert self.unacked_ids[0] == 'test_id1' assert self.unacked_ids[1] == 'test_id2' def test_pop(self): self.unacked_ids.append('test_id') popped_id = self.unacked_ids.pop() assert popped_id == 'test_id' assert len(self.unacked_ids) == 0 def test_remove(self): self.unacked_ids.append('test_id') self.unacked_ids.remove('test_id') assert len(self.unacked_ids) == 0 def test_len(self): self.unacked_ids.append('test_id') assert len(self.unacked_ids) == 1 def test_getitem(self): self.unacked_ids.append('test_id') assert self.unacked_ids[0] == 'test_id' class test_AtomicCounter: def setup_method(self): self.counter = AtomicCounter() def test_inc(self): assert self.counter.inc() == 1 assert self.counter.inc(5) == 6 def test_dec(self): self.counter.inc(5) assert self.counter.dec() == 4 assert self.counter.dec(2) == 2 def test_get(self): self.counter.inc(7) assert self.counter.get() == 7 @pytest.fixture def channel(): with patch.object(Channel, '__init__', lambda self: None): channel = Channel() channel.connection = MagicMock() channel.queue_name_prefix = "kombu-" channel.project_id = "test_project" channel._queue_cache = {} channel._n_channels = MagicMock() channel._stop_extender = MagicMock() channel.subscriber = MagicMock() channel.publisher = MagicMock() channel.closed = False with patch.object( Channel, 'conninfo', new_callable=MagicMock ), patch.object( Channel, 'transport_options', new_callable=MagicMock ), patch.object( Channel, 'qos', new_callable=MagicMock ): yield channel class test_Channel: @patch('kombu.transport.gcpubsub.ThreadPoolExecutor') @patch('kombu.transport.gcpubsub.threading.Event') @patch('kombu.transport.gcpubsub.threading.Thread') @patch( 'kombu.transport.gcpubsub.Channel._get_free_channel_id', return_value=1, ) @patch( 'kombu.transport.gcpubsub.Channel._n_channels.inc', return_value=1, ) def test_channel_init( self, n_channels_in_mock, channel_id_mock, mock_thread, mock_event, mock_executor, ): mock_connection = MagicMock() ch = Channel(mock_connection) ch._n_channels.inc.assert_called_once() mock_thread.assert_called_once_with( target=ch._extend_unacked_deadline, daemon=True, ) mock_thread.return_value.start.assert_called_once() def test_entity_name(self, channel): name = "test_queue" result = channel.entity_name(name) assert result == "kombu-test_queue" @patch('kombu.transport.gcpubsub.uuid3', return_value='uuid') @patch('kombu.transport.gcpubsub.gethostname', return_value='hostname') @patch('kombu.transport.gcpubsub.getpid', return_value=1234) def test_queue_bind_direct( self, mock_pid, mock_hostname, mock_uuid, channel ): exchange = 'direct' routing_key = 'test_routing_key' pattern = 'test_pattern' queue = 'test_queue' subscription_path = 'projects/project-id/subscriptions/test_queue' channel.subscriber.subscription_path = MagicMock( return_value=subscription_path ) channel._create_topic = MagicMock(return_value='topic_path') channel._create_subscription = MagicMock() # Mock the state and exchange type mock_connection = MagicMock(name='mock_connection') channel.connection = mock_connection channel.state.exchanges = {exchange: {'type': 'direct'}} mock_exchange = MagicMock(name='mock_exchange', type='direct') channel.exchange_types = {'direct': mock_exchange} channel._queue_bind(exchange, routing_key, pattern, queue) channel._create_topic.assert_called_once_with( channel.project_id, exchange, channel.expiration_seconds ) channel._create_subscription.assert_called_once_with( topic_path='topic_path', subscription_path=subscription_path, filter_args={'filter': f'attributes.routing_key="{routing_key}"'}, msg_retention=channel.expiration_seconds, ) assert channel.entity_name(queue) in channel._queue_cache @patch('kombu.transport.gcpubsub.uuid3', return_value='uuid') @patch('kombu.transport.gcpubsub.gethostname', return_value='hostname') @patch('kombu.transport.gcpubsub.getpid', return_value=1234) def test_queue_bind_fanout( self, mock_pid, mock_hostname, mock_uuid, channel ): exchange = 'test_exchange' routing_key = 'test_routing_key' pattern = 'test_pattern' queue = 'test_queue' uniq_sub_name = 'test_queue-uuid' subscription_path = ( f'projects/project-id/subscriptions/{uniq_sub_name}' ) channel.subscriber.subscription_path = MagicMock( return_value=subscription_path ) channel._create_topic = MagicMock(return_value='topic_path') channel._create_subscription = MagicMock() # Mock the state and exchange type mock_connection = MagicMock(name='mock_connection') channel.connection = mock_connection channel.state.exchanges = {exchange: {'type': 'fanout'}} mock_exchange = MagicMock(name='mock_exchange', type='fanout') channel.exchange_types = {'fanout': mock_exchange} channel._queue_bind(exchange, routing_key, pattern, queue) channel._create_topic.assert_called_once_with( channel.project_id, exchange, 600 ) channel._create_subscription.assert_called_once_with( topic_path='topic_path', subscription_path=subscription_path, filter_args={}, msg_retention=600, ) assert channel.entity_name(queue) in channel._queue_cache assert subscription_path in channel._tmp_subscriptions assert exchange in channel._fanout_exchanges def test_queue_bind_not_implemented(self, channel): exchange = 'test_exchange' routing_key = 'test_routing_key' pattern = 'test_pattern' queue = 'test_queue' channel.typeof = MagicMock(return_value=MagicMock(type='unsupported')) with pytest.raises(NotImplementedError): channel._queue_bind(exchange, routing_key, pattern, queue) def test_create_topic(self, channel): channel.project_id = "project_id" topic_id = "topic_id" channel._is_topic_exists = MagicMock(return_value=False) channel.publisher.topic_path = MagicMock(return_value="topic_path") channel.publisher.create_topic = MagicMock() result = channel._create_topic(channel.project_id, topic_id) assert result == "topic_path" channel.publisher.create_topic.assert_called_once() channel._create_topic( channel.project_id, topic_id, message_retention_duration=10 ) assert ( dict( request={ 'name': 'topic_path', 'message_retention_duration': '10s', } ) in channel.publisher.create_topic.call_args ) channel.publisher.create_topic.side_effect = AlreadyExists( "test_error" ) channel._create_topic( channel.project_id, topic_id, message_retention_duration=10 ) def test_is_topic_exists(self, channel): topic_path = "projects/project-id/topics/test_topic" mock_topic = MagicMock() mock_topic.name = topic_path channel.publisher.list_topics.return_value = [mock_topic] result = channel._is_topic_exists(topic_path) assert result is True channel.publisher.list_topics.assert_called_once_with( request={"project": f'projects/{channel.project_id}'} ) def test_is_topic_not_exists(self, channel): topic_path = "projects/project-id/topics/test_topic" channel.publisher.list_topics.return_value = [] result = channel._is_topic_exists(topic_path) assert result is False channel.publisher.list_topics.assert_called_once_with( request={"project": f'projects/{channel.project_id}'} ) def test_create_subscription(self, channel): channel.project_id = "project_id" topic_id = "topic_id" subscription_path = "subscription_path" topic_path = "topic_path" channel.subscriber.subscription_path = MagicMock( return_value=subscription_path ) channel.publisher.topic_path = MagicMock(return_value=topic_path) channel.subscriber.create_subscription = MagicMock() result = channel._create_subscription( project_id=channel.project_id, topic_id=topic_id, subscription_path=subscription_path, topic_path=topic_path, ) assert result == subscription_path channel.subscriber.create_subscription.assert_called_once() def test_create_subscription_protobuf_compat(self): request = { 'name': 'projects/my_project/subscriptions/kombu-1111-2222', 'topic': 'projects/jether-fox/topics/reply.celery.pidbox', 'ack_deadline_seconds': 240, 'expiration_policy': {'ttl': '86400s'}, 'message_retention_duration': '86400s', 'filter': 'attributes.routing_key="1111-2222"', } Subscription(request) def test_delete(self, channel): queue = "test_queue" subscription_path = "projects/project-id/subscriptions/test_queue" qdesc = QueueDescriptor( name=queue, topic_path="projects/project-id/topics/test_topic", subscription_id=queue, subscription_path=subscription_path, ) channel.subscriber = MagicMock() channel._queue_cache[channel.entity_name(queue)] = qdesc channel._delete(queue) channel.subscriber.delete_subscription.assert_called_once_with( request={"subscription": subscription_path} ) assert queue not in channel._queue_cache def test_put(self, channel): queue = "test_queue" message = { "properties": {"delivery_info": {"routing_key": "test_key"}} } channel.entity_name = MagicMock(return_value=queue) channel._queue_cache[channel.entity_name(queue)] = QueueDescriptor( name=queue, topic_path="topic_path", subscription_id=queue, subscription_path="subscription_path", ) channel._get_routing_key = MagicMock(return_value="test_key") channel.publisher.publish = MagicMock() channel._put(queue, message) channel.publisher.publish.assert_called_once() def test_put_fanout(self, channel): exchange = "test_exchange" message = { "properties": {"delivery_info": {"routing_key": "test_key"}} } routing_key = "test_key" channel._lookup = MagicMock() channel.publisher.topic_path = MagicMock(return_value="topic_path") channel.publisher.publish = MagicMock() channel._put_fanout(exchange, message, routing_key) channel._lookup.assert_called_once_with(exchange, routing_key) channel.publisher.topic_path.assert_called_once_with( channel.project_id, exchange ) assert 'topic_path', ( b'{"properties": {"delivery_info": {"routing_key": "test_key"}}}' in channel.publisher.publish.call_args ) def test_get(self, channel): queue = "test_queue" channel.entity_name = MagicMock(return_value=queue) channel._queue_cache[queue] = QueueDescriptor( name=queue, topic_path="topic_path", subscription_id=queue, subscription_path="subscription_path", ) channel.subscriber.pull = MagicMock( return_value=MagicMock( received_messages=[ MagicMock( ack_id="ack_id", message=MagicMock( data=b'{"properties": ' b'{"delivery_info": ' b'{"exchange": "exchange"},"delivery_mode": 1}}' ), ) ] ) ) channel.subscriber.acknowledge = MagicMock() payload = channel._get(queue) assert ( payload["properties"]["delivery_info"]["exchange"] == "exchange" ) channel.subscriber.pull.side_effect = DeadlineExceeded("test_error") with pytest.raises(Empty): channel._get(queue) def test_get_bulk(self, channel): queue = "test_queue" subscription_path = "projects/project-id/subscriptions/test_queue" qdesc = QueueDescriptor( name=queue, topic_path="projects/project-id/topics/test_topic", subscription_id=queue, subscription_path=subscription_path, ) channel._queue_cache[channel.entity_name(queue)] = qdesc data = b'{"properties": {"delivery_info": {"exchange": "exchange"}}}' received_message = MagicMock( ack_id="ack_id", message=MagicMock(data=data), ) channel.subscriber.pull = MagicMock( return_value=MagicMock(received_messages=[received_message]) ) channel.bulk_max_messages = 10 channel._is_auto_ack = MagicMock(return_value=True) channel._do_ack = MagicMock() channel.qos.can_consume_max_estimate = MagicMock(return_value=None) queue, payloads = channel._get_bulk(queue, timeout=10) assert len(payloads) == 1 assert ( payloads[0]["properties"]["delivery_info"]["exchange"] == "exchange" ) channel._do_ack.assert_called_once_with(["ack_id"], subscription_path) channel.subscriber.pull.side_effect = DeadlineExceeded("test_error") with pytest.raises(Empty): channel._get_bulk(queue, timeout=10) def test_lookup(self, channel): exchange = "test_exchange" routing_key = "test_key" default = None channel.connection = MagicMock() channel.state.exchanges = {exchange: {"type": "direct"}} channel.typeof = MagicMock( return_value=MagicMock(lookup=MagicMock(return_value=["queue1"])) ) channel.get_table = MagicMock(return_value="table") result = channel._lookup(exchange, routing_key, default) channel.typeof.return_value.lookup.assert_called_once_with( "table", exchange, routing_key, default ) assert result == ["queue1"] # Test the case where no queues are bound to the exchange channel.typeof.return_value.lookup.return_value = None channel.queue_bind = MagicMock() result = channel._lookup(exchange, routing_key, default) channel.queue_bind.assert_called_once_with( exchange, exchange, routing_key ) assert result == [exchange] @patch('kombu.transport.gcpubsub.monitoring_v3') @patch('kombu.transport.gcpubsub.query.Query') def test_size(self, mock_query, mock_monitor, channel): queue = "test_queue" subscription_id = "test_subscription" qdesc = QueueDescriptor( name=queue, topic_path="projects/project-id/topics/test_topic", subscription_id=subscription_id, subscription_path="projects/project-id/subscriptions/test_subscription", # E501 ) channel._queue_cache[channel.entity_name(queue)] = qdesc mock_query_result = MagicMock() mock_query_result.select_resources.return_value = [ MagicMock(points=[MagicMock(value=MagicMock(int64_value=5))]) ] mock_query.return_value = mock_query_result size = channel._size(queue) assert size == 5 # Test the case where the queue is not in the cache size = channel._size("non_existent_queue") assert size == 0 # Test the case where the query raises PermissionDenied mock_item = MagicMock() mock_item.points.__getitem__.side_effect = PermissionDenied( 'test_error' ) mock_query_result.select_resources.return_value = [mock_item] size = channel._size(queue) assert size == -1 def test_basic_ack(self, channel): delivery_tag = "test_delivery_tag" ack_id = "test_ack_id" queue = "test_queue" subscription_path = ( "projects/project-id/subscriptions/test_subscription" ) qdesc = QueueDescriptor( name=queue, topic_path="projects/project-id/topics/test_topic", subscription_id="test_subscription", subscription_path=subscription_path, ) channel._queue_cache[queue] = qdesc delivery_info = { 'gcpubsub_message': { 'queue': queue, 'ack_id': ack_id, 'subscription_path': subscription_path, } } channel.qos.get = MagicMock( return_value=MagicMock(delivery_info=delivery_info) ) channel._do_ack = MagicMock() channel.basic_ack(delivery_tag) channel._do_ack.assert_called_once_with([ack_id], subscription_path) assert ack_id not in qdesc.unacked_ids def test_do_ack(self, channel): ack_ids = ["ack_id1", "ack_id2"] subscription_path = ( "projects/project-id/subscriptions/test_subscription" ) channel.subscriber = MagicMock() channel._do_ack(ack_ids, subscription_path) assert subscription_path, ( ack_ids in channel.subscriber.acknowledge.call_args ) def test_purge(self, channel): queue = "test_queue" subscription_path = f"projects/project-id/subscriptions/{queue}" qdesc = QueueDescriptor( name=queue, topic_path="projects/project-id/topics/test_topic", subscription_id="test_subscription", subscription_path=subscription_path, ) channel._queue_cache[channel.entity_name(queue)] = qdesc channel.subscriber = MagicMock() with patch.object(channel, '_size', return_value=10), patch( 'kombu.transport.gcpubsub.datetime.datetime' ) as dt_mock: dt_mock.now.return_value = datetime(2021, 1, 1) result = channel._purge(queue) assert result == 10 channel.subscriber.seek.assert_called_once_with( request={ "subscription": subscription_path, "time": datetime(2021, 1, 1), } ) # Test the case where the queue is not in the cache result = channel._purge("non_existent_queue") assert result is None def test_extend_unacked_deadline(self, channel): queue = "test_queue" subscription_path = ( "projects/project-id/subscriptions/test_subscription" ) ack_ids = ["ack_id1", "ack_id2"] qdesc = QueueDescriptor( name=queue, topic_path="projects/project-id/topics/test_topic", subscription_id="test_subscription", subscription_path=subscription_path, ) channel.transport_options = {"ack_deadline_seconds": 240} channel._queue_cache[channel.entity_name(queue)] = qdesc qdesc.unacked_ids.extend(ack_ids) channel._stop_extender.wait = MagicMock(side_effect=[False, True]) channel.subscriber.modify_ack_deadline = MagicMock() channel._extend_unacked_deadline() channel.subscriber.modify_ack_deadline.assert_called_once_with( request={ "subscription": subscription_path, "ack_ids": ack_ids, "ack_deadline_seconds": 240, } ) for _ in ack_ids: qdesc.unacked_ids.pop() channel._stop_extender.wait = MagicMock(side_effect=[False, True]) modify_ack_deadline_calls = ( channel.subscriber.modify_ack_deadline.call_count ) channel._extend_unacked_deadline() assert ( channel.subscriber.modify_ack_deadline.call_count == modify_ack_deadline_calls ) def test_after_reply_message_received(self, channel): queue = 'test-queue' subscription_path = f'projects/test-project/subscriptions/{queue}' channel.subscriber.subscription_path.return_value = subscription_path channel.after_reply_message_received(queue) # Check that the subscription path is added to _tmp_subscriptions assert subscription_path in channel._tmp_subscriptions def test_subscriber(self, channel): assert channel.subscriber def test_publisher(self, channel): assert channel.publisher def test_transport_options(self, channel): assert channel.transport_options def test_bulk_max_messages_default(self, channel): assert channel.bulk_max_messages == channel.transport_options.get( 'bulk_max_messages' ) def test_close(self, channel): channel._tmp_subscriptions = {'sub1', 'sub2'} channel._n_channels.dec.return_value = 0 with patch.object( Channel._unacked_extender, 'join' ) as mock_join, patch( 'kombu.transport.virtual.Channel.close' ) as mock_super_close: channel.close() channel.subscriber.delete_subscription.assert_has_calls( [ call(request={"subscription": 'sub1'}), call(request={"subscription": 'sub2'}), ], any_order=True, ) channel._stop_extender.set.assert_called_once() mock_join.assert_called_once() mock_super_close.assert_called_once() @pytest.fixture def transport(): return Transport(client=MagicMock()) class test_Transport: def test_driver_version(self, transport): assert transport.driver_version() def test_as_uri(self, transport): result = transport.as_uri('gcpubsub://') assert result == 'gcpubsub://' def test_drain_events_timeout(self, transport): transport.polling_interval = 4 with patch.object( transport, '_drain_from_active_queues', side_effect=Empty ), patch( 'kombu.transport.gcpubsub.monotonic', side_effect=[0, 1, 2, 3, 4, 5], ), patch( 'kombu.transport.gcpubsub.sleep' ) as mock_sleep: with pytest.raises(socket_timeout): transport.drain_events(None, timeout=3) mock_sleep.assert_called() def test_drain_events_no_timeout(self, transport): with patch.object( transport, '_drain_from_active_queues', side_effect=[Empty, None] ), patch( 'kombu.transport.gcpubsub.monotonic', side_effect=[0, 1] ), patch( 'kombu.transport.gcpubsub.sleep' ) as mock_sleep: transport.drain_events(None, timeout=None) mock_sleep.assert_called() def test_drain_events_polling_interval(self, transport): transport.polling_interval = 2 with patch.object( transport, '_drain_from_active_queues', side_effect=[Empty, None] ), patch( 'kombu.transport.gcpubsub.monotonic', side_effect=[0, 1, 2] ), patch( 'kombu.transport.gcpubsub.sleep' ) as mock_sleep: transport.drain_events(None, timeout=5) mock_sleep.assert_called_with(2) def test_drain_from_active_queues_empty(self, transport): with patch.object( transport, '_rm_empty_bulk_requests' ) as mock_rm_empty, patch.object( transport, '_submit_get_bulk_requests' ) as mock_submit, patch( 'kombu.transport.gcpubsub.wait', return_value=(set(), set()) ) as mock_wait: with pytest.raises(Empty): transport._drain_from_active_queues(timeout=10) mock_rm_empty.assert_called_once() mock_submit.assert_called_once_with(timeout=10) mock_wait.assert_called_once() def test_drain_from_active_queues_done(self, transport): future = Future() future.set_result(('queue', [{'properties': {'delivery_info': {}}}])) with patch.object( transport, '_rm_empty_bulk_requests' ) as mock_rm_empty, patch.object( transport, '_submit_get_bulk_requests' ) as mock_submit, patch( 'kombu.transport.gcpubsub.wait', return_value=({future}, set()) ) as mock_wait, patch.object( transport, '_deliver' ) as mock_deliver: transport._callbacks = {'queue'} transport._drain_from_active_queues(timeout=10) mock_rm_empty.assert_called_once() mock_submit.assert_called_once_with(timeout=10) mock_wait.assert_called_once() mock_deliver.assert_called_once_with( {'properties': {'delivery_info': {}}}, 'queue' ) mock_deliver_call_count = mock_deliver.call_count transport._callbacks = {} transport._drain_from_active_queues(timeout=10) assert mock_deliver_call_count == mock_deliver.call_count def test_drain_from_active_queues_exception(self, transport): future = Future() future.set_exception(Exception("Test exception")) with patch.object( transport, '_rm_empty_bulk_requests' ) as mock_rm_empty, patch.object( transport, '_submit_get_bulk_requests' ) as mock_submit, patch( 'kombu.transport.gcpubsub.wait', return_value=({future}, set()) ) as mock_wait: with pytest.raises(Empty): transport._drain_from_active_queues(timeout=10) mock_rm_empty.assert_called_once() mock_submit.assert_called_once_with(timeout=10) mock_wait.assert_called_once() def test_rm_empty_bulk_requests(self, transport): # Create futures with exceptions to simulate empty requests future_with_exception = Future() future_with_exception.set_exception(Exception("Test exception")) transport._get_bulk_future_to_queue = { future_with_exception: 'queue1', } transport._rm_empty_bulk_requests() # Assert that the future with exception is removed assert ( future_with_exception not in transport._get_bulk_future_to_queue ) def test_submit_get_bulk_requests(self, transport): channel_mock = MagicMock(spec=Channel) channel_mock._active_queues = ['queue1', 'queue2'] transport.channels = [channel_mock] with patch.object( transport._pool, 'submit', return_value=MagicMock() ) as mock_submit: transport._submit_get_bulk_requests(timeout=10) # Check that submit was called twice, once for each queue assert mock_submit.call_count == 2 mock_submit.assert_any_call(channel_mock._get_bulk, 'queue1', 10) mock_submit.assert_any_call(channel_mock._get_bulk, 'queue2', 10) def test_submit_get_bulk_requests_with_existing_futures(self, transport): channel_mock = MagicMock(spec=Channel) channel_mock._active_queues = ['queue1', 'queue2'] transport.channels = [channel_mock] # Simulate existing futures future_mock = MagicMock() transport._get_bulk_future_to_queue = {future_mock: 'queue1'} with patch.object( transport._pool, 'submit', return_value=MagicMock() ) as mock_submit: transport._submit_get_bulk_requests(timeout=10) # Check that submit was called only once for the new queue assert mock_submit.call_count == 1 mock_submit.assert_called_with( channel_mock._get_bulk, 'queue2', 10 ) kombu-5.5.3/t/unit/transport/test_librabbitmq.py000066400000000000000000000114421477772317200220430ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock, patch import pytest pytest.importorskip('librabbitmq') from kombu.transport import librabbitmq # noqa class test_Message: def test_init(self): chan = Mock(name='channel') message = librabbitmq.Message( chan, {'prop': 42}, {'delivery_tag': 337}, 'body', ) assert message.body == 'body' assert message.delivery_tag == 337 assert message.properties['prop'] == 42 class test_Channel: def test_prepare_message(self): conn = Mock(name='connection') chan = librabbitmq.Channel(conn, 1) assert chan body = 'the quick brown fox...' properties = {'name': 'Elaine M.'} body2, props2 = chan.prepare_message( body, properties=properties, priority=999, content_type='ctype', content_encoding='cenc', headers={'H': 2}, ) assert props2['name'] == 'Elaine M.' assert props2['priority'] == 999 assert props2['content_type'] == 'ctype' assert props2['content_encoding'] == 'cenc' assert props2['headers'] == {'H': 2} assert body2 == body body3, props3 = chan.prepare_message(body, priority=777) assert props3['priority'] == 777 assert body3 == body class test_Transport: def setup_method(self): self.client = Mock(name='client') self.T = librabbitmq.Transport(self.client) def test_driver_version(self): assert self.T.driver_version() def test_create_channel(self): conn = Mock(name='connection') chan = self.T.create_channel(conn) assert chan conn.channel.assert_called_with() def test_drain_events(self): conn = Mock(name='connection') self.T.drain_events(conn, timeout=1.33) conn.drain_events.assert_called_with(timeout=1.33) def test_establish_connection_SSL_not_supported(self): self.client.ssl = True with pytest.raises(NotImplementedError): self.T.establish_connection() def test_establish_connection(self): self.T.Connection = Mock(name='Connection') self.T.client.ssl = False self.T.client.port = None self.T.client.transport_options = {} conn = self.T.establish_connection() assert self.T.client.port == self.T.default_connection_params['port'] assert conn.client == self.T.client assert self.T.client.drain_events == conn.drain_events def test_collect__no_conn(self): self.T.client.drain_events = 1234 self.T._collect(None) assert self.client.drain_events is None assert self.T.client is None def test_collect__with_conn(self): self.T.client.drain_events = 1234 conn = Mock(name='connection') chans = conn.channels = {1: Mock(name='chan1'), 2: Mock(name='chan2')} conn.callbacks = {'foo': Mock(name='cb1'), 'bar': Mock(name='cb2')} for i, chan in enumerate(conn.channels.values()): chan.connection = i with patch('os.close') as close: self.T._collect(conn) close.assert_called_with(conn.fileno()) assert not conn.channels assert not conn.callbacks for chan in chans.values(): assert chan.connection is None assert self.client.drain_events is None assert self.T.client is None with patch('os.close') as close: self.T.client = self.client close.side_effect = OSError() self.T._collect(conn) close.assert_called_with(conn.fileno()) def test_collect__with_fileno_raising_value_error(self): conn = Mock(name='connection') conn.channels = {1: Mock(name='chan1'), 2: Mock(name='chan2')} with patch('os.close') as close: self.T.client = self.client conn.fileno.side_effect = ValueError("Socket not connected") self.T._collect(conn) close.assert_not_called() conn.fileno.assert_called_with() assert self.client.drain_events is None assert self.T.client is None def test_register_with_event_loop(self): conn = Mock(name='conn') loop = Mock(name='loop') self.T.register_with_event_loop(conn, loop) loop.add_reader.assert_called_with( conn.fileno(), self.T.on_readable, conn, loop, ) def test_verify_connection(self): conn = Mock(name='connection') conn.connected = True assert self.T.verify_connection(conn) def test_close_connection(self): conn = Mock(name='connection') self.client.drain_events = 1234 self.T.close_connection(conn) assert self.client.drain_events is None conn.close.assert_called_with() kombu-5.5.3/t/unit/transport/test_memory.py000066400000000000000000000130221477772317200210570ustar00rootroot00000000000000from __future__ import annotations import socket import pytest from kombu import Connection, Consumer, Exchange, Producer, Queue class test_MemoryTransport: def setup_method(self): self.c = Connection(transport='memory') self.e = Exchange('test_transport_memory') self.q = Queue('test_transport_memory', exchange=self.e, routing_key='test_transport_memory') self.q2 = Queue('test_transport_memory2', exchange=self.e, routing_key='test_transport_memory2') self.fanout = Exchange('test_transport_memory_fanout', type='fanout') self.q3 = Queue('test_transport_memory_fanout1', exchange=self.fanout) self.q4 = Queue('test_transport_memory_fanout2', exchange=self.fanout) def test_driver_version(self): assert self.c.transport.driver_version() def test_produce_consume_noack(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer = Consumer(channel, self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() assert len(_received) == 10 def test_produce_consume_fanout(self): producer = self.c.Producer() consumer = self.c.Consumer([self.q3, self.q4]) producer.publish( {'hello': 'world'}, declare=consumer.queues, exchange=self.fanout, ) assert self.q3(self.c).get().payload == {'hello': 'world'} assert self.q4(self.c).get().payload == {'hello': 'world'} assert self.q3(self.c).get() is None assert self.q4(self.c).get() is None def test_produce_consume(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer1 = Consumer(channel, self.q) consumer2 = Consumer(channel, self.q2) self.q2(channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: if len(_received1) + len(_received2) == 20: break self.c.drain_events() assert len(_received1) + len(_received2) == 20 # compression producer.publish({'compressed': True}, routing_key='test_transport_memory', compression='zlib') m = self.q(channel).get() assert m.payload == {'compressed': True} # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') assert self.q(channel).get() self.q(channel).delete() self.q(channel).declare() assert self.q(channel).get() is None # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory2') assert self.q2(channel).get() self.q2(channel).purge() assert self.q2(channel).get() is None def test_drain_events(self): with pytest.raises(socket.timeout): self.c.drain_events(timeout=0.1) c1 = self.c.channel() c2 = self.c.channel() with pytest.raises(socket.timeout): self.c.drain_events(timeout=0.1) del c1 # so pyflakes doesn't complain. del c2 def test_drain_events_unregistered_queue(self): c1 = self.c.channel() producer = self.c.Producer() consumer = self.c.Consumer([self.q2]) producer.publish( {'hello': 'world'}, declare=consumer.queues, routing_key=self.q2.routing_key, exchange=self.q2.exchange, ) message = consumer.queues[0].get()._raw class Cycle: def get(self, callback, timeout=None): return (message, 'foo'), c1 self.c.transport.cycle = Cycle() self.c.drain_events() def test_queue_for(self): chan = self.c.channel() chan.queues.clear() x = chan._queue_for('foo') assert x assert chan._queue_for('foo') is x # see the issue # https://github.com/celery/kombu/issues/1050 def test_producer_on_return(self): def on_return(_exception, _exchange, _routing_key, _message): pass channel = self.c.channel() producer = Producer(channel, on_return=on_return) consumer = self.c.Consumer([self.q3]) producer.publish( {'hello': 'on return'}, declare=consumer.queues, exchange=self.fanout, ) assert self.q3(self.c).get().payload == {'hello': 'on return'} assert self.q3(self.c).get() is None kombu-5.5.3/t/unit/transport/test_mongodb.py000066400000000000000000000473621477772317200212120ustar00rootroot00000000000000from __future__ import annotations import datetime from queue import Empty from unittest.mock import MagicMock, call, patch import pytest from kombu import Connection pymongo = pytest.importorskip('pymongo') # must import following after above validation to avoid error # and skip tests if missing # these are used to define real spec of the corresponding mocks, # to ensure called methods exist in real objects # pylint: disable=C0413 from pymongo.collection import Collection # isort:skip # noqa: E402 from pymongo.database import Database # isort:skip # noqa: E402 from kombu.transport.mongodb import BroadcastCursor # isort:skip # noqa: E402 def _create_mock_connection(url='', **kwargs): from kombu.transport import mongodb class _Channel(mongodb.Channel): # reset _fanout_queues for each instance _fanout_queues = {} collections = {} now = datetime.datetime.utcnow() def _create_client(self): # not really a 'MongoClient', # but an actual pre-established Database connection mock = MagicMock(name='client', spec=Database) # we need new mock object for every collection def get_collection(name): try: return self.collections[name] except KeyError: mock = self.collections[name] = MagicMock( name='collection:%s' % name, spec=Collection, ) return mock mock.__getitem__.side_effect = get_collection return mock def get_now(self): return self.now class Transport(mongodb.Transport): Channel = _Channel return Connection(url, transport=Transport, **kwargs) class test_mongodb_uri_parsing: def test_defaults(self): url = 'mongodb://' channel = _create_mock_connection(url).default_channel hostname, dbname, options = channel._parse_uri() assert dbname == 'kombu_default' assert hostname == 'mongodb://127.0.0.1' def test_custom_host(self): url = 'mongodb://localhost' channel = _create_mock_connection(url).default_channel hostname, dbname, options = channel._parse_uri() assert dbname == 'kombu_default' def test_custom_port(self): url = 'mongodb://localhost:27018' channel = _create_mock_connection(url).default_channel hostname, dbname, options = channel._parse_uri() assert hostname == 'mongodb://localhost:27018' def test_replicaset_hosts(self): url = 'mongodb://mongodb1.example.com:27317,mongodb2.example.com:27017/?replicaSet=test_rs' channel = _create_mock_connection(url).default_channel hostname, dbname, options = channel._parse_uri() assert hostname == 'mongodb://mongodb1.example.com:27317,mongodb2.example.com:27017/?replicaSet=test_rs' # noqa assert options['replicaset'] == 'test_rs' def test_custom_database(self): url = 'mongodb://localhost/dbname' channel = _create_mock_connection(url).default_channel hostname, dbname, options = channel._parse_uri() assert dbname == 'dbname' def test_custom_credentials(self): url = 'mongodb://localhost/dbname' channel = _create_mock_connection( url, userid='foo', password='bar').default_channel hostname, dbname, options = channel._parse_uri() assert hostname == 'mongodb://foo:bar@localhost/dbname' assert dbname == 'dbname' def test_correct_readpreference(self): url = 'mongodb://localhost/dbname?readpreference=nearest' channel = _create_mock_connection(url).default_channel hostname, dbname, options = channel._parse_uri() assert options['readpreference'] == 'nearest' def test_normalizes_params_from_uri_only_once(self): channel = _create_mock_connection('mongodb://localhost/?serverselectiontimeoutms=1000').default_channel def server_info(self): return {'version': '3.6.0-rc'} with patch.object(pymongo.MongoClient, 'server_info', server_info): database = channel._open() client_options = database.client.options assert client_options.server_selection_timeout == 1.0 class BaseMongoDBChannelCase: def _get_method(self, cname, mname): collection = getattr(self.channel, cname) method = getattr(collection, mname.split('.', 1)[0]) for bit in mname.split('.')[1:]: method = getattr(method.return_value, bit) return method def set_operation_return_value(self, cname, mname, *values): method = self._get_method(cname, mname) if len(values) == 1: method.return_value = values[0] else: method.side_effect = values def declare_broadcast_queue(self, queue): self.channel.exchange_declare('fanout_exchange', type='fanout') self.channel._queue_bind('fanout_exchange', 'foo', '*', queue) assert queue in self.channel._broadcast_cursors def get_broadcast(self, queue): return self.channel._broadcast_cursors[queue] def set_broadcast_return_value(self, queue, *values): self.declare_broadcast_queue(queue) cursor = MagicMock(name='cursor', spec=BroadcastCursor) cursor.__iter__.return_value = iter(values) self.channel._broadcast_cursors[queue]._cursor = iter(cursor) def assert_collection_accessed(self, *collections): self.channel.client.__getitem__.assert_has_calls( [call(c) for c in collections], any_order=True) def assert_operation_has_calls(self, cname, mname, calls, any_order=False): method = self._get_method(cname, mname) method.assert_has_calls(calls, any_order=any_order) def assert_operation_called_with(self, cname, mname, *args, **kwargs): self.assert_operation_has_calls(cname, mname, [call(*args, **kwargs)]) class test_mongodb_channel(BaseMongoDBChannelCase): def setup_method(self): self.connection = _create_mock_connection() self.channel = self.connection.default_channel # Tests for "public" channel interface def test_new_queue(self): self.channel._new_queue('foobar') self.channel.client.assert_not_called() def test_get(self): self.set_operation_return_value('messages', 'find_one_and_delete', { '_id': 'docId', 'payload': '{"some": "data"}', }) event = self.channel._get('foobar') self.assert_collection_accessed('messages') self.assert_operation_called_with( 'messages', 'find_one_and_delete', {'queue': 'foobar'}, sort=[ ('priority', pymongo.ASCENDING), ], ) assert event == {'some': 'data'} self.set_operation_return_value( 'messages', 'find_one_and_delete', None, ) with pytest.raises(Empty): self.channel._get('foobar') def test_get_fanout(self): self.set_broadcast_return_value('foobar', { '_id': 'docId1', 'payload': '{"some": "data"}', }) event = self.channel._get('foobar') self.assert_collection_accessed('messages.broadcast') assert event == {'some': 'data'} with pytest.raises(Empty): self.channel._get('foobar') def test_put(self): self.channel._put('foobar', {'some': 'data'}) self.assert_collection_accessed('messages') self.assert_operation_called_with('messages', 'insert_one', { 'queue': 'foobar', 'priority': 9, 'payload': '{"some": "data"}', }) def test_put_fanout(self): self.declare_broadcast_queue('foobar') self.channel._put_fanout('foobar', {'some': 'data'}, 'foo') self.assert_collection_accessed('messages.broadcast') self.assert_operation_called_with('broadcast', 'insert_one', { 'queue': 'foobar', 'payload': '{"some": "data"}', }) def test_size(self): self.set_operation_return_value('messages', 'count_documents', 77) result = self.channel._size('foobar') self.assert_collection_accessed('messages') self.assert_operation_called_with( 'messages', 'count_documents', {'queue': 'foobar'}, ) assert result == 77 def test_size_fanout(self): self.declare_broadcast_queue('foobar') cursor = MagicMock(name='cursor', spec=BroadcastCursor) cursor.get_size.return_value = 77 self.channel._broadcast_cursors['foobar'] = cursor result = self.channel._size('foobar') assert result == 77 def test_purge(self): self.set_operation_return_value('messages', 'count_documents', 77) result = self.channel._purge('foobar') self.assert_collection_accessed('messages') self.assert_operation_called_with( 'messages', 'delete_many', {'queue': 'foobar'}, ) assert result == 77 def test_purge_fanout(self): self.declare_broadcast_queue('foobar') cursor = MagicMock(name='cursor', spec=BroadcastCursor) cursor.get_size.return_value = 77 self.channel._broadcast_cursors['foobar'] = cursor result = self.channel._purge('foobar') cursor.purge.assert_any_call() assert result == 77 def test_get_table(self): state_table = [('foo', '*', 'foo')] stored_table = [('bar', '*', 'bar')] self.channel.exchange_declare('test_exchange') self.channel.state.exchanges['test_exchange']['table'] = state_table self.set_operation_return_value('routing', 'find', [{ '_id': 'docId', 'routing_key': stored_table[0][0], 'pattern': stored_table[0][1], 'queue': stored_table[0][2], }]) result = self.channel.get_table('test_exchange') self.assert_collection_accessed('messages.routing') self.assert_operation_called_with( 'routing', 'find', {'exchange': 'test_exchange'}, ) assert set(result) == frozenset(state_table) | frozenset(stored_table) def test_queue_bind(self): self.channel._queue_bind('test_exchange', 'foo', '*', 'foo') self.assert_collection_accessed('messages.routing') self.assert_operation_called_with( 'routing', 'update_one', {'queue': 'foo', 'pattern': '*', 'routing_key': 'foo', 'exchange': 'test_exchange'}, {'$set': {'queue': 'foo', 'pattern': '*', 'routing_key': 'foo', 'exchange': 'test_exchange'}}, upsert=True, ) def test_queue_delete(self): self.channel.queue_delete('foobar') self.assert_collection_accessed('messages.routing') self.assert_operation_called_with( 'routing', 'delete_many', {'queue': 'foobar'}, ) def test_queue_delete_fanout(self): self.declare_broadcast_queue('foobar') cursor = MagicMock(name='cursor', spec=BroadcastCursor) self.channel._broadcast_cursors['foobar'] = cursor self.channel.queue_delete('foobar') cursor.close.assert_any_call() assert 'foobar' not in self.channel._broadcast_cursors assert 'foobar' not in self.channel._fanout_queues # Tests for channel internals def test_create_broadcast(self): self.channel._create_broadcast(self.channel.client) self.channel.client.create_collection.assert_called_with( 'messages.broadcast', capped=True, size=100000, ) def test_create_broadcast_exists(self): # simulate already created collection self.channel.client.list_collection_names.return_value = [ 'messages.broadcast' ] broadcast = self.channel._create_broadcast(self.channel.client) self.channel.client.create_collection.assert_not_called() assert broadcast is None # not returned since not created def test_get_broadcast_cursor_created(self): self.channel._fanout_queues['foobar'] = 'fanout_exchange' created_cursor = self.channel._get_broadcast_cursor('foobar') cached_cursor = self.channel._broadcast_cursors['foobar'] assert cached_cursor is created_cursor def test_get_broadcast_cursor_exists(self): self.declare_broadcast_queue('foobar') cached_cursor = self.channel._broadcast_cursors['foobar'] getter_cursor = self.channel._get_broadcast_cursor('foobar') assert cached_cursor is getter_cursor def test_ensure_indexes(self): self.channel._ensure_indexes(self.channel.client) self.assert_operation_called_with( 'messages', 'create_index', [('queue', 1), ('priority', 1), ('_id', 1)], background=True, ) self.assert_operation_called_with( 'broadcast', 'create_index', [('queue', 1)], ) self.assert_operation_called_with( 'routing', 'create_index', [('queue', 1), ('exchange', 1)], ) def test_create_broadcast_cursor(self): with patch.object(pymongo, 'version_tuple', (2, )): self.channel._create_broadcast_cursor( 'fanout_exchange', 'foo', '*', 'foobar', ) self.assert_collection_accessed('messages.broadcast') self.assert_operation_called_with( 'broadcast', 'find', tailable=True, query={'queue': 'fanout_exchange'}, ) if pymongo.version_tuple >= (3, ): self.channel._create_broadcast_cursor( 'fanout_exchange1', 'foo', '*', 'foobar', ) self.assert_collection_accessed('messages.broadcast') self.assert_operation_called_with( 'broadcast', 'find', cursor_type=pymongo.CursorType.TAILABLE, filter={'queue': 'fanout_exchange1'}, ) def test_open_rc_version(self): def server_info(self): return {'version': '3.6.0-rc'} with patch.object(pymongo.MongoClient, 'server_info', server_info): self.channel._open() class test_mongodb_channel_ttl(BaseMongoDBChannelCase): def setup_method(self): self.connection = _create_mock_connection( transport_options={'ttl': True}, ) self.channel = self.connection.default_channel self.expire_at = ( self.channel.get_now() + datetime.timedelta(milliseconds=777)) # Tests def test_new_queue(self): self.channel._new_queue('foobar') self.assert_operation_called_with( 'queues', 'update_one', {'_id': 'foobar'}, {'$set': {'_id': 'foobar', 'options': {}, 'expire_at': None}}, upsert=True, ) def test_get(self): self.set_operation_return_value('queues', 'find_one', { '_id': 'docId', 'options': {'arguments': {'x-expires': 777}}, }) self.set_operation_return_value('messages', 'find_one_and_delete', { '_id': 'docId', 'payload': '{"some": "data"}', }) self.channel._get('foobar') self.assert_collection_accessed('messages', 'messages.queues') self.assert_operation_called_with( 'messages', 'find_one_and_delete', {'queue': 'foobar'}, sort=[ ('priority', pymongo.ASCENDING), ], ) self.assert_operation_called_with( 'routing', 'update_many', {'queue': 'foobar'}, {'$set': {'expire_at': self.expire_at}}, ) def test_put(self): self.set_operation_return_value('queues', 'find_one', { '_id': 'docId', 'options': {'arguments': {'x-message-ttl': 777}}, }) self.channel._put('foobar', {'some': 'data'}) self.assert_collection_accessed('messages') self.assert_operation_called_with('messages', 'insert_one', { 'queue': 'foobar', 'priority': 9, 'payload': '{"some": "data"}', 'expire_at': self.expire_at, }) def test_queue_bind(self): self.set_operation_return_value('queues', 'find_one', { '_id': 'docId', 'options': {'arguments': {'x-expires': 777}}, }) self.channel._queue_bind('test_exchange', 'foo', '*', 'foo') self.assert_collection_accessed('messages.routing') self.assert_operation_called_with( 'routing', 'update_one', {'queue': 'foo', 'pattern': '*', 'routing_key': 'foo', 'exchange': 'test_exchange'}, {'$set': { 'queue': 'foo', 'pattern': '*', 'routing_key': 'foo', 'exchange': 'test_exchange', 'expire_at': self.expire_at }}, upsert=True, ) def test_queue_delete(self): self.channel.queue_delete('foobar') self.assert_collection_accessed('messages.queues') self.assert_operation_called_with( 'queues', 'delete_one', {'_id': 'foobar'}) def test_ensure_indexes(self): self.channel._ensure_indexes(self.channel.client) self.assert_operation_called_with( 'messages', 'create_index', [('expire_at', 1)], expireAfterSeconds=0) self.assert_operation_called_with( 'routing', 'create_index', [('expire_at', 1)], expireAfterSeconds=0) self.assert_operation_called_with( 'queues', 'create_index', [('expire_at', 1)], expireAfterSeconds=0) def test_get_queue_expire(self): result = self.channel._get_queue_expire( {'arguments': {'x-expires': 777}}, 'x-expires') self.channel.client.assert_not_called() assert result == self.expire_at self.set_operation_return_value('queues', 'find_one', { '_id': 'docId', 'options': {'arguments': {'x-expires': 777}}, }) result = self.channel._get_queue_expire('foobar', 'x-expires') assert result == self.expire_at def test_get_message_expire(self): assert self.channel._get_message_expire({ 'properties': {'expiration': 777}, }) == self.expire_at assert self.channel._get_message_expire({}) is None def test_update_queues_expire(self): self.set_operation_return_value('queues', 'find_one', { '_id': 'docId', 'options': {'arguments': {'x-expires': 777}}, }) self.channel._update_queues_expire('foobar') self.assert_collection_accessed('messages.routing', 'messages.queues') self.assert_operation_called_with( 'routing', 'update_many', {'queue': 'foobar'}, {'$set': {'expire_at': self.expire_at}}, ) self.assert_operation_called_with( 'queues', 'update_many', {'_id': 'foobar'}, {'$set': {'expire_at': self.expire_at}}, ) class test_mongodb_channel_calc_queue_size(BaseMongoDBChannelCase): def setup_method(self): self.connection = _create_mock_connection( transport_options={'calc_queue_size': False}) self.channel = self.connection.default_channel self.expire_at = ( self.channel.get_now() + datetime.timedelta(milliseconds=777)) # Tests def test_size(self): self.set_operation_return_value('messages', 'count_documents', 77) result = self.channel._size('foobar') self.assert_operation_has_calls('messages', 'find', []) assert result == 0 class test_mongodb_transport(BaseMongoDBChannelCase): def setup_method(self): self.connection = _create_mock_connection() def test_driver_version(self): version = self.connection.transport.driver_version() assert version == pymongo.__version__ kombu-5.5.3/t/unit/transport/test_native_delayed_delivery.py000066400000000000000000000122541477772317200244350ustar00rootroot00000000000000from __future__ import annotations import logging from unittest.mock import Mock, call import pytest from kombu.transport.native_delayed_delivery import ( CELERY_DELAYED_DELIVERY_EXCHANGE, bind_queue_to_native_delayed_delivery_exchange, calculate_routing_key, declare_native_delayed_delivery_exchanges_and_queues, level_name) class test_native_delayed_delivery_level_name: def test_level_name_with_negative_level(self): with pytest.raises(ValueError, match="level must be a non-negative number"): level_name(-1) def test_level_name_with_level_0(self): assert level_name(0) == 'celery_delayed_0' def test_level_name_with_level_1(self): assert level_name(1) == 'celery_delayed_1' class test_declare_native_delayed_delivery_exchanges_and_queues: def test_invalid_queue_type(self): with pytest.raises(ValueError, match="queue_type must be either classic or quorum"): declare_native_delayed_delivery_exchanges_and_queues(Mock(), 'foo') def test_classic_queue_type(self): declare_native_delayed_delivery_exchanges_and_queues(Mock(), 'classic') def test_quorum_queue_type(self): declare_native_delayed_delivery_exchanges_and_queues(Mock(), 'quorum') class test_bind_queue_to_native_delayed_delivery_exchange: def test_bind_to_direct_exchange(self, caplog): with caplog.at_level(logging.WARNING): queue_mock = Mock() queue_mock.bind().exchange.bind().type = 'direct' queue_mock.bind().exchange.bind().name = 'foo' bind_queue_to_native_delayed_delivery_exchange(Mock(), queue_mock) assert len(caplog.records) == 1 record = caplog.records[0] assert (record.message == "Exchange foo is a direct exchange " "and native delayed delivery do not support direct exchanges.\n" "ETA tasks published to this exchange will " "block the worker until the ETA arrives.") def test_bind_to_topic_exchange(self): queue_mock = Mock() queue_mock.bind().exchange.bind().type = 'topic' queue_mock.bind().exchange.bind().name = 'foo' queue_mock.bind().routing_key = 'foo' bind_queue_to_native_delayed_delivery_exchange(Mock(), queue_mock) queue_mock.bind().exchange.bind().bind_to.assert_called_once_with( CELERY_DELAYED_DELIVERY_EXCHANGE, routing_key="#.foo" ) queue_mock.bind().bind_to.assert_called_once_with( 'foo', routing_key="#.foo" ) def test_bind_to_topic_exchange_with_multiple_routing_keys(self): queue_mock = Mock() queue_mock.bind().exchange = None binding1 = Mock() binding1.exchange.bind().type = 'topic' binding1.exchange.bind().name = 'exchange1' binding1.routing_key = 'test1' binding2 = Mock() binding2.exchange.bind().type = 'topic' binding2.exchange.bind().name = 'exchange2' binding2.routing_key = 'test2' queue_mock.bind().bindings = [binding1, binding2] bind_queue_to_native_delayed_delivery_exchange(Mock(), queue_mock) queue_mock.bind().bind_to.assert_has_calls([ call('exchange1', routing_key='#.test1'), call('exchange2', routing_key='#.test2') ], any_order=True) queue_mock.bind().bindings[0].exchange.bind().bind_to.assert_called_once_with( CELERY_DELAYED_DELIVERY_EXCHANGE, routing_key="#.test1" ) queue_mock.bind().bindings[1].exchange.bind().bind_to.assert_called_once_with( CELERY_DELAYED_DELIVERY_EXCHANGE, routing_key="#.test2" ) def test_bind_to_topic_exchange_with_wildcard_routing_key(self): queue_mock = Mock() queue_mock.bind().exchange.bind().type = 'topic' queue_mock.bind().exchange.bind().name = 'foo' queue_mock.bind().routing_key = '#.foo' bind_queue_to_native_delayed_delivery_exchange(Mock(), queue_mock) queue_mock.bind().exchange.bind().bind_to.assert_called_once_with( CELERY_DELAYED_DELIVERY_EXCHANGE, routing_key="#.foo" ) queue_mock.bind().bind_to.assert_called_once_with( 'foo', routing_key="#.foo" ) class test_calculate_routing_key: def test_calculate_routing_key(self): assert (calculate_routing_key(1, 'destination') == '0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.destination') def test_negative_countdown(self): with pytest.raises(ValueError, match="countdown must be a positive number"): calculate_routing_key(-1, 'foo') def test_zero_countdown(self): with pytest.raises(ValueError, match="countdown must be a positive number"): calculate_routing_key(0, 'foo') def test_empty_routing_key(self): with pytest.raises(ValueError, match="routing_key must be non-empty"): calculate_routing_key(1, '') def test_none_routing_key(self): with pytest.raises(ValueError, match="routing_key must be non-empty"): calculate_routing_key(1, None) kombu-5.5.3/t/unit/transport/test_pyamqp.py000066400000000000000000000162041477772317200210630ustar00rootroot00000000000000from __future__ import annotations import sys from itertools import count from unittest.mock import MagicMock, Mock, patch import pytest from kombu import Connection from kombu.transport import pyamqp def test_amqps_connection(): conn = Connection('amqps://') assert conn.transport # evaluate transport, don't connect assert conn.ssl class MockConnection(dict): def __setattr__(self, key, value): self[key] = value def connect(self): pass class test_Channel: def setup_method(self): class Channel(pyamqp.Channel): wait_returns = [] def _x_open(self, *args, **kwargs): pass def wait(self, *args, **kwargs): return self.wait_returns def _send_method(self, *args, **kwargs): pass self.conn = Mock() self.conn._get_free_channel_id.side_effect = count(0).__next__ self.conn.channels = {} self.channel = Channel(self.conn, 0) def test_init(self): assert not self.channel.no_ack_consumers def test_prepare_message(self): assert self.channel.prepare_message( 'foobar', 10, 'application/data', 'utf-8', properties={}, ) def test_message_to_python(self): message = Mock() message.headers = {} message.properties = {} assert self.channel.message_to_python(message) def test_close_resolves_connection_cycle(self): assert self.channel.connection is not None self.channel.close() assert self.channel.connection is None def test_basic_consume_registers_ack_status(self): self.channel.wait_returns = ['my-consumer-tag'] self.channel.basic_consume('foo', no_ack=True) assert 'my-consumer-tag' in self.channel.no_ack_consumers self.channel.wait_returns = ['other-consumer-tag'] self.channel.basic_consume('bar', no_ack=False) assert 'other-consumer-tag' not in self.channel.no_ack_consumers self.channel.basic_cancel('my-consumer-tag') assert 'my-consumer-tag' not in self.channel.no_ack_consumers def test_consume_registers_cancel_callback(self): on_cancel = Mock() self.channel.wait_returns = ['my-consumer-tag'] self.channel.basic_consume('foo', on_cancel=on_cancel) assert self.channel.cancel_callbacks['my-consumer-tag'] == on_cancel class test_Transport: def setup_method(self): self.connection = Connection('pyamqp://') self.transport = self.connection.transport def test_create_channel(self): connection = Mock() self.transport.create_channel(connection) connection.channel.assert_called_with() def test_ssl_cert_passed(self): ssl_dict = { 'ca_certs': '/etc/pki/tls/certs/something.crt', 'cert_reqs': "ssl.CERT_REQUIRED", } ssl_dict_copy = {k: ssl_dict[k] for k in ssl_dict} connection = Connection('amqps://', ssl=ssl_dict_copy) assert connection.transport.client.ssl == ssl_dict def test_driver_version(self): assert self.transport.driver_version() def test_drain_events(self): connection = Mock() self.transport.drain_events(connection, timeout=10.0) connection.drain_events.assert_called_with(timeout=10.0) def test_dnspython_localhost_resolve_bug(self): class Conn: def __init__(self, **kwargs): vars(self).update(kwargs) def connect(self): pass self.transport.Connection = Conn self.transport.client.hostname = 'localhost' conn1 = self.transport.establish_connection() assert conn1.host == '127.0.0.1:5672' self.transport.client.hostname = 'example.com' conn2 = self.transport.establish_connection() assert conn2.host == 'example.com:5672' def test_close_connection(self): connection = Mock() connection.client = Mock() self.transport.close_connection(connection) assert connection.client is None connection.close.assert_called_with() @pytest.mark.masked_modules('ssl') def test_import_no_ssl(self, mask_modules): pm = sys.modules.pop('amqp.connection') try: from amqp.connection import SSLError assert SSLError.__module__ == 'amqp.connection' finally: if pm is not None: sys.modules['amqp.connection'] = pm class test_pyamqp: def test_default_port(self): class Transport(pyamqp.Transport): Connection = MockConnection c = Connection(port=None, transport=Transport).connect() assert c['host'] == f'127.0.0.1:{Transport.default_port}' def test_custom_port(self): class Transport(pyamqp.Transport): Connection = MockConnection c = Connection(port=1337, transport=Transport).connect() assert c['host'] == '127.0.0.1:1337' def test_ssl(self): # Test setting TLS by ssl=True. class Transport(pyamqp.Transport): Connection = MagicMock() Connection(transport=Transport, ssl=True).connect() Transport.Connection.assert_called_once() _, kwargs = Transport.Connection.call_args assert kwargs['ssl'] is True def test_ssl_dict(self): # Test setting TLS by setting ssl as dict. class Transport(pyamqp.Transport): Connection = MagicMock() Connection(transport=Transport, ssl={'a': 1, 'b': 2}).connect() Transport.Connection.assert_called_once() _, kwargs = Transport.Connection.call_args assert kwargs['ssl'] == {'a': 1, 'b': 2} @pytest.mark.parametrize( 'hostname', [ 'broker.example.com', 'amqp://broker.example.com/0', 'amqps://broker.example.com/0', 'amqp://guest:guest@broker.example.com/0', 'amqp://broker.example.com;broker2.example.com' ]) def test_ssl_server_hostname(self, hostname): # Test setting server_hostname from URI. class Transport(pyamqp.Transport): Connection = MagicMock() Connection( hostname, transport=Transport, ssl={'server_hostname': None} ).connect() Transport.Connection.assert_called_once() _, kwargs = Transport.Connection.call_args assert kwargs['ssl'] == {'server_hostname': 'broker.example.com'} def test_register_with_event_loop(self): t = pyamqp.Transport(Mock()) conn = Mock(name='conn') loop = Mock(name='loop') t.register_with_event_loop(conn, loop) loop.add_reader.assert_called_with( conn.sock, t.on_readable, conn, loop, ) def test_heartbeat_check(self): t = pyamqp.Transport(Mock()) conn = Mock() t.heartbeat_check(conn, rate=4.331) conn.heartbeat_tick.assert_called_with(rate=4.331) def test_get_manager(self): with patch('kombu.transport.pyamqp.get_manager') as get_manager: t = pyamqp.Transport(Mock()) t.get_manager(1, kw=2) get_manager.assert_called_with(t.client, 1, kw=2) kombu-5.5.3/t/unit/transport/test_pyro.py000066400000000000000000000055231477772317200205470ustar00rootroot00000000000000from __future__ import annotations import socket import pytest from kombu import Connection, Consumer, Exchange, Producer, Queue class test_PyroTransport: def setup_method(self): self.c = Connection(transport='pyro', virtual_host="kombu.broker") self.e = Exchange('test_transport_pyro') self.q = Queue('test_transport_pyro', exchange=self.e, routing_key='test_transport_pyro') self.q2 = Queue('test_transport_pyro2', exchange=self.e, routing_key='test_transport_pyro2') self.fanout = Exchange('test_transport_pyro_fanout', type='fanout') self.q3 = Queue('test_transport_pyro_fanout1', exchange=self.fanout) self.q4 = Queue('test_transport_pyro_fanout2', exchange=self.fanout) def test_driver_version(self): assert self.c.transport.driver_version() @pytest.mark.skip("requires running Pyro nameserver and Kombu Broker") def test_produce_consume_noack(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer = Consumer(channel, self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_pyro') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() assert len(_received) == 10 def test_drain_events(self): with pytest.raises(socket.timeout): self.c.drain_events(timeout=0.1) c1 = self.c.channel() c2 = self.c.channel() with pytest.raises(socket.timeout): self.c.drain_events(timeout=0.1) del c1 # so pyflakes doesn't complain. del c2 @pytest.mark.skip("requires running Pyro nameserver and Kombu Broker") def test_drain_events_unregistered_queue(self): c1 = self.c.channel() producer = self.c.Producer() consumer = self.c.Consumer([self.q2]) producer.publish( {'hello': 'world'}, declare=consumer.queues, routing_key=self.q2.routing_key, exchange=self.q2.exchange, ) message = consumer.queues[0].get()._raw class Cycle: def get(self, callback, timeout=None): return (message, 'foo'), c1 self.c.transport.cycle = Cycle() self.c.drain_events() @pytest.mark.skip("requires running Pyro nameserver and Kombu Broker") def test_queue_for(self): chan = self.c.channel() x = chan._queue_for('foo') assert x assert chan._queue_for('foo') is x kombu-5.5.3/t/unit/transport/test_qpid.py000066400000000000000000002127401477772317200205140ustar00rootroot00000000000000from __future__ import annotations import select import socket import ssl import sys import time import uuid from collections.abc import Callable from itertools import count from queue import Empty from unittest.mock import Mock, call, patch import pytest from kombu.transport.qpid import (AuthenticationFailure, Channel, Connection, ConnectionError, Message, NotFound, QoS, Transport) from kombu.transport.virtual import Base64 QPID_MODULE = 'kombu.transport.qpid' @pytest.fixture def disable_runtime_dependency_check(patching): mock_dependency_is_none = patching(QPID_MODULE + '.dependency_is_none') mock_dependency_is_none.return_value = False return mock_dependency_is_none class QpidException(Exception): """ An object used to mock Exceptions provided by qpid.messaging.exceptions """ def __init__(self, code=None, text=None): super().__init__(self) self.code = code self.text = text class BreakOutException(Exception): pass @pytest.mark.skip(reason='Not supported in Python3') class test_QoS__init__: def setup_method(self): self.mock_session = Mock() self.qos = QoS(self.mock_session) def test__init__prefetch_default_set_correct_without_prefetch_value(self): assert self.qos.prefetch_count == 1 def test__init__prefetch_is_hard_set_to_one(self): qos_limit_two = QoS(self.mock_session) assert qos_limit_two.prefetch_count == 1 def test__init___not_yet_acked_is_initialized(self): assert isinstance(self.qos._not_yet_acked, dict) @pytest.mark.skip(reason='Not supported in Python3') class test_QoS_can_consume: def setup_method(self): session = Mock() self.qos = QoS(session) def test_True_when_prefetch_limit_is_zero(self): self.qos.prefetch_count = 0 self.qos._not_yet_acked = [] assert self.qos.can_consume() def test_True_when_len_of__not_yet_acked_is_lt_prefetch_count(self): self.qos.prefetch_count = 3 self.qos._not_yet_acked = ['a', 'b'] assert self.qos.can_consume() def test_False_when_len_of__not_yet_acked_is_eq_prefetch_count(self): self.qos.prefetch_count = 3 self.qos._not_yet_acked = ['a', 'b', 'c'] assert not self.qos.can_consume() @pytest.mark.skip(reason='Not supported in Python3') class test_QoS_can_consume_max_estimate: def setup_method(self): self.mock_session = Mock() self.qos = QoS(self.mock_session) def test_return_one_when_prefetch_count_eq_zero(self): self.qos.prefetch_count = 0 assert self.qos.can_consume_max_estimate() == 1 def test_return_prefetch_count_sub_len__not_yet_acked(self): self.qos._not_yet_acked = ['a', 'b'] self.qos.prefetch_count = 4 assert self.qos.can_consume_max_estimate() == 2 @pytest.mark.skip(reason='Not supported in Python3') class test_QoS_ack: def setup_method(self): self.mock_session = Mock() self.qos = QoS(self.mock_session) def test_ack_pops__not_yet_acked(self): message = Mock() self.qos.append(message, 1) assert 1 in self.qos._not_yet_acked self.qos.ack(1) assert 1 not in self.qos._not_yet_acked def test_ack_calls_session_acknowledge_with_message(self): message = Mock() self.qos.append(message, 1) self.qos.ack(1) self.qos.session.acknowledge.assert_called_with(message=message) @pytest.mark.skip(reason='Not supported in Python3') class test_QoS_reject: @pytest.fixture(autouse=True) def setup_qpid(self, patching): self.mock_qpid = patching(QPID_MODULE + '.qpid') self.mock_Disposition = self.mock_qpid.messaging.Disposition self.mock_RELEASED = self.mock_qpid.messaging.RELEASED self.mock_REJECTED = self.mock_qpid.messaging.REJECTED def setup_method(self): self.mock_session = Mock() self.mock_message = Mock() self.qos = QoS(self.mock_session) def test_reject_pops__not_yet_acked(self): self.qos.append(self.mock_message, 1) assert 1 in self.qos._not_yet_acked self.qos.reject(1) assert 1 not in self.qos._not_yet_acked def test_reject_requeue_true(self): self.qos.append(self.mock_message, 1) self.qos.reject(1, requeue=True) self.mock_Disposition.assert_called_with(self.mock_RELEASED) self.qos.session.acknowledge.assert_called_with( message=self.mock_message, disposition=self.mock_Disposition.return_value, ) def test_reject_requeue_false(self): message = Mock() self.qos.append(message, 1) self.qos.reject(1, requeue=False) self.mock_Disposition.assert_called_with(self.mock_REJECTED) self.qos.session.acknowledge.assert_called_with( message=message, disposition=self.mock_Disposition.return_value, ) @pytest.mark.skip(reason='Not supported in Python3') class test_QoS: def mock_message_factory(self): """Create and return a mock message tag and delivery_tag.""" m_delivery_tag = self.delivery_tag_generator.next() m = f'message {m_delivery_tag}' return m, m_delivery_tag def add_n_messages_to_qos(self, n, qos): """Add N mock messages into the passed in qos object""" for i in range(n): self.add_message_to_qos(qos) def add_message_to_qos(self, qos): """Add a single mock message into the passed in qos object. Uses the mock_message_factory() to create the message and delivery_tag. """ m, m_delivery_tag = self.mock_message_factory() qos.append(m, m_delivery_tag) def setup_method(self): self.mock_session = Mock() self.qos_no_limit = QoS(self.mock_session) self.qos_limit_2 = QoS(self.mock_session, prefetch_count=2) self.delivery_tag_generator = count(1) def test_append(self): """Append two messages and check inside the QoS object that they were put into the internal data structures correctly """ qos = self.qos_no_limit m1, m1_tag = self.mock_message_factory() m2, m2_tag = self.mock_message_factory() qos.append(m1, m1_tag) length_not_yet_acked = len(qos._not_yet_acked) assert length_not_yet_acked == 1 checked_message1 = qos._not_yet_acked[m1_tag] assert m1 is checked_message1 qos.append(m2, m2_tag) length_not_yet_acked = len(qos._not_yet_acked) assert length_not_yet_acked == 2 checked_message2 = qos._not_yet_acked[m2_tag] assert m2 is checked_message2 def test_get(self): """Append two messages, and use get to receive them""" qos = self.qos_no_limit m1, m1_tag = self.mock_message_factory() m2, m2_tag = self.mock_message_factory() qos.append(m1, m1_tag) qos.append(m2, m2_tag) message1 = qos.get(m1_tag) message2 = qos.get(m2_tag) assert m1 is message1 assert m2 is message2 @pytest.mark.skip(reason='Not supported in Python3') class ConnectionTestBase: @patch(QPID_MODULE + '.qpid') def setup_method(self, mock_qpid): self.connection_options = { 'host': 'localhost', 'port': 5672, 'transport': 'tcp', 'timeout': 10, 'sasl_mechanisms': 'ANONYMOUS', } self.mock_qpid_connection = mock_qpid.messaging.Connection self.conn = Connection(**self.connection_options) @pytest.mark.skip(reason='Not supported in Python3') class test_Connection__init__(ConnectionTestBase): def test_stores_connection_options(self): # ensure that only one mech was passed into connection. The other # options should all be passed through as-is modified_conn_opts = self.connection_options assert modified_conn_opts == self.conn.connection_options def test_class_variables(self): assert isinstance(self.conn.channels, list) assert isinstance(self.conn._callbacks, dict) def test_establishes_connection(self): modified_conn_opts = self.connection_options self.mock_qpid_connection.establish.assert_called_with( **modified_conn_opts ) def test_saves_established_connection(self): created_conn = self.mock_qpid_connection.establish.return_value assert self.conn._qpid_conn is created_conn @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) @patch(QPID_MODULE + '.sys.exc_info') @patch(QPID_MODULE + '.qpid') def test_mutates_ConnError_by_message(self, mock_qpid, mock_exc_info): text = 'connection-forced: Authentication failed(320)' my_conn_error = QpidException(text=text) mock_qpid.messaging.Connection.establish.side_effect = my_conn_error mock_exc_info.return_value = 'a', 'b', None try: self.conn = Connection(**self.connection_options) except AuthenticationFailure as error: exc_info = sys.exc_info() assert not isinstance(error, QpidException) assert exc_info[1] == 'b' assert exc_info[2] is None else: self.fail('ConnectionError type was not mutated correctly') @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) @patch(QPID_MODULE + '.sys.exc_info') @patch(QPID_MODULE + '.qpid') def test_mutates_ConnError_by_code(self, mock_qpid, mock_exc_info): my_conn_error = QpidException(code=320, text='someothertext') mock_qpid.messaging.Connection.establish.side_effect = my_conn_error mock_exc_info.return_value = 'a', 'b', None try: self.conn = Connection(**self.connection_options) except AuthenticationFailure as error: exc_info = sys.exc_info() assert not isinstance(error, QpidException) assert exc_info[1] == 'b' assert exc_info[2] is None else: self.fail('ConnectionError type was not mutated correctly') @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) @patch(QPID_MODULE + '.sys.exc_info') @patch(QPID_MODULE + '.qpid') def test_connection__init__mutates_ConnError_by_message2(self, mock_qpid, mock_exc_info): """ Test for PLAIN connection via python-saslwrapper, sans cyrus-sasl-plain This test is specific for what is returned when we attempt to connect with PLAIN mech and python-saslwrapper is installed, but cyrus-sasl-plain is not installed. """ my_conn_error = QpidException() my_conn_error.text = 'Error in sasl_client_start (-4) SASL(-4): no '\ 'mechanism available' mock_qpid.messaging.Connection.establish.side_effect = my_conn_error mock_exc_info.return_value = ('a', 'b', None) try: self.conn = Connection(**self.connection_options) except AuthenticationFailure as error: exc_info = sys.exc_info() assert not isinstance(error, QpidException) assert exc_info[1] == 'b' assert exc_info[2] is None else: self.fail('ConnectionError type was not mutated correctly') @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) @patch(QPID_MODULE + '.sys.exc_info') @patch(QPID_MODULE + '.qpid') def test_unknown_connection_error(self, mock_qpid, mock_exc_info): # If we get a connection error that we don't understand, # bubble it up as-is my_conn_error = QpidException(code=999, text='someothertext') mock_qpid.messaging.Connection.establish.side_effect = my_conn_error mock_exc_info.return_value = 'a', 'b', None try: self.conn = Connection(**self.connection_options) except Exception as error: assert error.code == 999 else: self.fail('Connection should have thrown an exception') @patch.object(Transport, 'channel_errors', new=(QpidException, )) @patch(QPID_MODULE + '.qpid') @patch(QPID_MODULE + '.ConnectionError', new=IOError) def test_non_qpid_error_raises(self, mock_qpid): mock_Qpid_Connection = mock_qpid.messaging.Connection my_conn_error = SyntaxError() my_conn_error.text = 'some non auth related error message' mock_Qpid_Connection.establish.side_effect = my_conn_error with pytest.raises(SyntaxError): Connection(**self.connection_options) @patch(QPID_MODULE + '.qpid') @patch(QPID_MODULE + '.ConnectionError', new=IOError) def test_non_auth_conn_error_raises(self, mock_qpid): mock_Qpid_Connection = mock_qpid.messaging.Connection my_conn_error = IOError() my_conn_error.text = 'some non auth related error message' mock_Qpid_Connection.establish.side_effect = my_conn_error with pytest.raises(IOError): Connection(**self.connection_options) @pytest.mark.skip(reason='Not supported in Python3') class test_Connection_class_attributes(ConnectionTestBase): def test_connection_verify_class_attributes(self): assert Channel == Connection.Channel @pytest.mark.skip(reason='Not supported in Python3') class test_Connection_get_Qpid_connection(ConnectionTestBase): def test_connection_get_qpid_connection(self): self.conn._qpid_conn = Mock() returned_connection = self.conn.get_qpid_connection() assert self.conn._qpid_conn is returned_connection @pytest.mark.skip(reason='Not supported in Python3') class test_Connection_close(ConnectionTestBase): def test_connection_close(self): self.conn._qpid_conn = Mock() self.conn.close() self.conn._qpid_conn.close.assert_called_once_with() @pytest.mark.skip(reason='Not supported in Python3') class test_Connection_close_channel(ConnectionTestBase): def setup_method(self): super().setup_method() self.conn.channels = Mock() def test_connection_close_channel_removes_channel_from_channel_list(self): mock_channel = Mock() self.conn.close_channel(mock_channel) self.conn.channels.remove.assert_called_once_with(mock_channel) def test_connection_close_channel_handles_ValueError_being_raised(self): self.conn.channels.remove = Mock(side_effect=ValueError()) self.conn.close_channel(Mock()) def test_connection_close_channel_set_channel_connection_to_None(self): mock_channel = Mock() mock_channel.connection = False self.conn.channels.remove = Mock(side_effect=ValueError()) self.conn.close_channel(mock_channel) assert mock_channel.connection is None @pytest.mark.skip(reason='Not supported in Python3') class ChannelTestBase: @pytest.fixture(autouse=True) def setup_channel(self, patching): self.mock_qpidtoollibs = patching(QPID_MODULE + '.qpidtoollibs') self.mock_broker_agent = self.mock_qpidtoollibs.BrokerAgent self.conn = Mock() self.transport = Mock() self.channel = Channel(self.conn, self.transport) @pytest.mark.skip(reason='Not supported in Python3') class test_Channel_purge(ChannelTestBase): def setup_method(self): self.mock_queue = Mock() def test_gets_queue(self): self.channel._purge(self.mock_queue) getQueue = self.mock_broker_agent.return_value.getQueue getQueue.assert_called_once_with(self.mock_queue) def test_does_not_call_purge_if_message_count_is_zero(self): values = {'msgDepth': 0} queue_obj = self.mock_broker_agent.return_value.getQueue.return_value queue_obj.values = values self.channel._purge(self.mock_queue) assert not queue_obj.purge.called def test_purges_all_messages_from_queue(self): values = {'msgDepth': 5} queue_obj = self.mock_broker_agent.return_value.getQueue.return_value queue_obj.values = values self.channel._purge(self.mock_queue) queue_obj.purge.assert_called_with(5) def test_returns_message_count(self): values = {'msgDepth': 5} queue_obj = self.mock_broker_agent.return_value.getQueue.return_value queue_obj.values = values result = self.channel._purge(self.mock_queue) assert result == 5 @patch(QPID_MODULE + '.NotFound', new=QpidException) def test_raises_channel_error_if_queue_does_not_exist(self): self.mock_broker_agent.return_value.getQueue.return_value = None with pytest.raises(QpidException): self.channel._purge(self.mock_queue) @pytest.mark.skip(reason='Not supported in Python3') class test_Channel_put(ChannelTestBase): @patch(QPID_MODULE + '.qpid') def test_channel__put_onto_queue(self, mock_qpid): routing_key = 'routingkey' mock_message = Mock() mock_Message_cls = mock_qpid.messaging.Message self.channel._put(routing_key, mock_message) address_str = '{0}; {{assert: always, node: {{type: queue}}}}'.format( routing_key, ) self.transport.session.sender.assert_called_with(address_str) mock_Message_cls.assert_called_with( content=mock_message, subject=None, durable=True ) mock_sender = self.transport.session.sender.return_value mock_sender.send.assert_called_with( mock_Message_cls.return_value, sync=True, ) mock_sender.close.assert_called_with() @patch(QPID_MODULE + '.qpid') def test_channel__put_onto_exchange(self, mock_qpid): mock_routing_key = 'routingkey' mock_exchange_name = 'myexchange' mock_message = Mock() mock_Message_cls = mock_qpid.messaging.Message self.channel._put(mock_routing_key, mock_message, mock_exchange_name) addrstr = '{0}/{1}; {{assert: always, node: {{type: topic}}}}'.format( mock_exchange_name, mock_routing_key, ) self.transport.session.sender.assert_called_with(addrstr) mock_Message_cls.assert_called_with( content=mock_message, subject=mock_routing_key, durable=True ) mock_sender = self.transport.session.sender.return_value mock_sender.send.assert_called_with( mock_Message_cls.return_value, sync=True, ) mock_sender.close.assert_called_with() @pytest.mark.skip(reason='Not supported in Python3') class test_Channel_get(ChannelTestBase): def test_channel__get(self): mock_queue = Mock() result = self.channel._get(mock_queue) self.transport.session.receiver.assert_called_once_with(mock_queue) mock_rx = self.transport.session.receiver.return_value mock_rx.fetch.assert_called_once_with(timeout=0) mock_rx.close.assert_called_once_with() assert mock_rx.fetch.return_value is result @pytest.mark.skip(reason='Not supported in Python3') class test_Channel_close(ChannelTestBase): @pytest.fixture(autouse=True) def setup_basic_cancel(self, patching, setup_channel): self.mock_basic_cancel = patching.object(self.channel, 'basic_cancel') self.channel.closed = False @pytest.fixture(autouse=True) def setup_receivers(self, setup_channel): self.mock_receiver1 = Mock() self.mock_receiver2 = Mock() self.channel._receivers = { 1: self.mock_receiver1, 2: self.mock_receiver2, } def test_channel_close_sets_close_attribute(self): self.channel.close() assert self.channel.closed def test_channel_close_calls_basic_cancel_on_all_receivers(self): self.channel.close() self.mock_basic_cancel.assert_has_calls([call(1), call(2)]) def test_channel_close_calls_close_channel_on_connection(self): self.channel.close() self.conn.close_channel.assert_called_once_with(self.channel) def test_channel_close_calls_close_on_broker_agent(self): self.channel.close() self.channel._broker.close.assert_called_once_with() def test_channel_close_does_nothing_if_already_closed(self): self.channel.closed = True self.channel.close() self.mock_basic_cancel.assert_not_called() def test_channel_close_does_not_call_close_channel_if_conn_is_None(self): self.channel.connection = None self.channel.close() self.conn.close_channel.assert_not_called() @pytest.mark.skip(reason='Not supported in Python3') class test_Channel_basic_qos(ChannelTestBase): def test_channel_basic_qos_always_returns_one(self): self.channel.basic_qos(2) assert self.channel.qos.prefetch_count == 1 @pytest.mark.skip(reason='Not supported in Python3') class test_Channel_basic_get(ChannelTestBase): @pytest.fixture(autouse=True) def setup_channel_attributes(self, setup_channel): self.channel.Message = Mock() self.channel._get = Mock() def test_channel_basic_get_calls__get_with_queue(self): mock_queue = Mock() self.channel.basic_get(mock_queue) self.channel._get.assert_called_once_with(mock_queue) def test_channel_basic_get_creates_Message_correctly(self): mock_queue = Mock() self.channel.basic_get(mock_queue) mock_raw_message = self.channel._get.return_value.content self.channel.Message.assert_called_once_with( mock_raw_message, channel=self.channel, ) def test_channel_basic_get_acknowledges_message_by_default(self): mock_queue = Mock() self.channel.basic_get(mock_queue) mock_qpid_message = self.channel._get.return_value acknowledge = self.transport.session.acknowledge acknowledge.assert_called_once_with(message=mock_qpid_message) def test_channel_basic_get_acknowledges_message_with_no_ack_False(self): mock_queue = Mock() self.channel.basic_get(mock_queue, no_ack=False) mock_qpid_message = self.channel._get.return_value acknowledge = self.transport.session.acknowledge acknowledge.assert_called_once_with(message=mock_qpid_message) def test_channel_basic_get_acknowledges_message_with_no_ack_True(self): mock_queue = Mock() self.channel.basic_get(mock_queue, no_ack=True) mock_qpid_message = self.channel._get.return_value acknowledge = self.transport.session.acknowledge acknowledge.assert_called_once_with(message=mock_qpid_message) def test_channel_basic_get_returns_correct_message(self): mock_queue = Mock() basic_get_result = self.channel.basic_get(mock_queue) expected_message = self.channel.Message.return_value assert expected_message is basic_get_result def test_basic_get_returns_None_when_channel__get_raises_Empty(self): mock_queue = Mock() self.channel._get = Mock(side_effect=Empty) basic_get_result = self.channel.basic_get(mock_queue) assert self.channel.Message.call_count == 0 assert basic_get_result is None @pytest.mark.skip(reason='Not supported in Python3') class test_Channel_basic_cancel(ChannelTestBase): @pytest.fixture(autouse=True) def setup_receivers(self, setup_channel): self.channel._receivers = {1: Mock()} def test_channel_basic_cancel_no_error_if_consumer_tag_not_found(self): self.channel.basic_cancel(2) def test_channel_basic_cancel_pops_receiver(self): self.channel.basic_cancel(1) assert 1 not in self.channel._receivers def test_channel_basic_cancel_closes_receiver(self): mock_receiver = self.channel._receivers[1] self.channel.basic_cancel(1) mock_receiver.close.assert_called_once_with() def test_channel_basic_cancel_pops__tag_to_queue(self): self.channel._tag_to_queue = Mock() self.channel.basic_cancel(1) self.channel._tag_to_queue.pop.assert_called_once_with(1, None) def test_channel_basic_cancel_pops_connection__callbacks(self): self.channel._tag_to_queue = Mock() self.channel.basic_cancel(1) mock_queue = self.channel._tag_to_queue.pop.return_value self.conn._callbacks.pop.assert_called_once_with(mock_queue, None) @pytest.mark.skip(reason='Not supported in Python3') class test_Channel__init__(ChannelTestBase): def test_channel___init__sets_variables_as_expected(self): assert self.conn is self.channel.connection assert self.transport is self.channel.transport assert not self.channel.closed self.conn.get_qpid_connection.assert_called_once_with() expected_broker_agent = self.mock_broker_agent.return_value assert self.channel._broker is expected_broker_agent assert self.channel._tag_to_queue == {} assert self.channel._receivers == {} assert self.channel._qos is None @pytest.mark.skip(reason='Not supported in Python3') class test_Channel_basic_consume(ChannelTestBase): @pytest.fixture(autouse=True) def setup_callbacks(self, setup_channel): self.conn._callbacks = {} def test_channel_basic_consume_adds_queue_to__tag_to_queue(self): mock_tag = Mock() mock_queue = Mock() self.channel.basic_consume(mock_queue, Mock(), Mock(), mock_tag) expected_dict = {mock_tag: mock_queue} assert expected_dict == self.channel._tag_to_queue def test_channel_basic_consume_adds_entry_to_connection__callbacks(self): mock_queue = Mock() self.channel.basic_consume(mock_queue, Mock(), Mock(), Mock()) assert mock_queue in self.conn._callbacks assert isinstance(self.conn._callbacks[mock_queue], Callable) def test_channel_basic_consume_creates_new_receiver(self): mock_queue = Mock() self.channel.basic_consume(mock_queue, Mock(), Mock(), Mock()) self.transport.session.receiver.assert_called_once_with(mock_queue) def test_channel_basic_consume_saves_new_receiver(self): mock_tag = Mock() self.channel.basic_consume(Mock(), Mock(), Mock(), mock_tag) new_mock_receiver = self.transport.session.receiver.return_value expected_dict = {mock_tag: new_mock_receiver} assert expected_dict == self.channel._receivers def test_channel_basic_consume_sets_capacity_on_new_receiver(self): mock_prefetch_count = Mock() self.channel.qos.prefetch_count = mock_prefetch_count self.channel.basic_consume(Mock(), Mock(), Mock(), Mock()) new_receiver = self.transport.session.receiver.return_value assert new_receiver.capacity is mock_prefetch_count def get_callback(self, no_ack=Mock(), original_cb=Mock()): self.channel.Message = Mock() mock_queue = Mock() self.channel.basic_consume(mock_queue, no_ack, original_cb, Mock()) return self.conn._callbacks[mock_queue] def test_channel_basic_consume_callback_creates_Message_correctly(self): callback = self.get_callback() mock_qpid_message = Mock() callback(mock_qpid_message) mock_content = mock_qpid_message.content self.channel.Message.assert_called_once_with( mock_content, channel=self.channel, ) def test_channel_basic_consume_callback_adds_message_to_QoS(self): self.channel._qos = Mock() callback = self.get_callback() mock_qpid_message = Mock() callback(mock_qpid_message) mock_delivery_tag = self.channel.Message.return_value.delivery_tag self.channel._qos.append.assert_called_once_with( mock_qpid_message, mock_delivery_tag, ) def test_channel_basic_consume_callback_gratuitously_acks(self): self.channel.basic_ack = Mock() callback = self.get_callback() mock_qpid_message = Mock() callback(mock_qpid_message) mock_delivery_tag = self.channel.Message.return_value.delivery_tag self.channel.basic_ack.assert_called_once_with(mock_delivery_tag) def test_channel_basic_consume_callback_does_not_ack_when_needed(self): self.channel.basic_ack = Mock() callback = self.get_callback(no_ack=False) mock_qpid_message = Mock() callback(mock_qpid_message) self.channel.basic_ack.assert_not_called() def test_channel_basic_consume_callback_calls_real_callback(self): self.channel.basic_ack = Mock() mock_original_callback = Mock() callback = self.get_callback(original_cb=mock_original_callback) mock_qpid_message = Mock() callback(mock_qpid_message) expected_message = self.channel.Message.return_value mock_original_callback.assert_called_once_with(expected_message) @pytest.mark.skip(reason='Not supported in Python3') class test_Channel_queue_delete(ChannelTestBase): @pytest.fixture(autouse=True) def setup_channel_patches(self, patching, setup_channel): self.mock__has_queue = patching.object(self.channel, '_has_queue') self.mock__size = patching.object(self.channel, '_size') self.mock__delete = patching.object(self.channel, '_delete') self.mock_queue = Mock() def test_checks_if_queue_exists(self): self.channel.queue_delete(self.mock_queue) self.mock__has_queue.assert_called_once_with(self.mock_queue) def test_does_nothing_if_queue_does_not_exist(self): self.mock__has_queue.return_value = False self.channel.queue_delete(self.mock_queue) self.mock__delete.assert_not_called() def test_not_empty_and_if_empty_True_no_delete(self): self.mock__size.return_value = 1 self.channel.queue_delete(self.mock_queue, if_empty=True) mock_broker = self.mock_broker_agent.return_value mock_broker.getQueue.assert_not_called() def test_calls_get_queue(self): self.channel.queue_delete(self.mock_queue) getQueue = self.mock_broker_agent.return_value.getQueue getQueue.assert_called_once_with(self.mock_queue) def test_gets_queue_attribute(self): self.channel.queue_delete(self.mock_queue) queue_obj = self.mock_broker_agent.return_value.getQueue.return_value queue_obj.getAttributes.assert_called_once_with() def test_queue_in_use_and_if_unused_no_delete(self): queue_obj = self.mock_broker_agent.return_value.getQueue.return_value queue_obj.getAttributes.return_value = {'consumerCount': 1} self.channel.queue_delete(self.mock_queue, if_unused=True) self.mock__delete.assert_not_called() def test_calls__delete_with_queue(self): self.channel.queue_delete(self.mock_queue) self.mock__delete.assert_called_once_with(self.mock_queue) @pytest.mark.skip(reason='Not supported in Python3') class test_Channel: @patch(QPID_MODULE + '.qpidtoollibs') def setup_method(self, mock_qpidtoollibs): self.mock_connection = Mock() self.mock_qpid_connection = Mock() self.mock_qpid_session = Mock() self.mock_qpid_connection.session = Mock( return_value=self.mock_qpid_session, ) self.mock_connection.get_qpid_connection = Mock( return_value=self.mock_qpid_connection, ) self.mock_transport = Mock() self.mock_broker = Mock() self.mock_Message = Mock() self.mock_BrokerAgent = mock_qpidtoollibs.BrokerAgent self.mock_BrokerAgent.return_value = self.mock_broker self.my_channel = Channel( self.mock_connection, self.mock_transport, ) self.my_channel.Message = self.mock_Message def test_verify_QoS_class_attribute(self): """Verify that the class attribute QoS refers to the QoS object""" assert QoS is Channel.QoS def test_verify_Message_class_attribute(self): """Verify that the class attribute Message refers to the Message object.""" assert Message is Channel.Message def test_body_encoding_class_attribute(self): """Verify that the class attribute body_encoding is set to base64""" assert Channel.body_encoding == 'base64' def test_codecs_class_attribute(self): """Verify that the codecs class attribute has a correct key and value.""" assert isinstance(Channel.codecs, dict) assert 'base64' in Channel.codecs assert isinstance(Channel.codecs['base64'], Base64) def test_size(self): """Test getting the number of messages in a queue specified by name and returning them.""" message_count = 5 mock_queue = Mock() mock_queue_to_check = Mock() mock_queue_to_check.values = {'msgDepth': message_count} self.mock_broker.getQueue.return_value = mock_queue_to_check result = self.my_channel._size(mock_queue) self.mock_broker.getQueue.assert_called_with(mock_queue) assert message_count == result def test_delete(self): """Test deleting a queue calls purge and delQueue with queue name.""" mock_queue = Mock() self.my_channel._purge = Mock() result = self.my_channel._delete(mock_queue) self.my_channel._purge.assert_called_with(mock_queue) self.mock_broker.delQueue.assert_called_with(mock_queue) assert result is None def test_has_queue_true(self): """Test checking if a queue exists, and it does.""" mock_queue = Mock() self.mock_broker.getQueue.return_value = True result = self.my_channel._has_queue(mock_queue) assert result def test_has_queue_false(self): """Test checking if a queue exists, and it does not.""" mock_queue = Mock() self.mock_broker.getQueue.return_value = False result = self.my_channel._has_queue(mock_queue) assert not result @patch('amqp.protocol.queue_declare_ok_t') def test_queue_declare_with_exception_raised(self, mock_queue_declare_ok_t): """Test declare_queue, where an exception is raised and silenced.""" mock_queue = Mock() mock_passive = Mock() mock_durable = Mock() mock_exclusive = Mock() mock_auto_delete = Mock() mock_nowait = Mock() mock_arguments = Mock() mock_msg_count = Mock() mock_queue.startswith.return_value = False mock_queue.endswith.return_value = False options = { 'passive': mock_passive, 'durable': mock_durable, 'exclusive': mock_exclusive, 'auto-delete': mock_auto_delete, 'arguments': mock_arguments, } mock_consumer_count = Mock() mock_return_value = Mock() values_dict = { 'msgDepth': mock_msg_count, 'consumerCount': mock_consumer_count, } mock_queue_data = Mock() mock_queue_data.values = values_dict exception_to_raise = Exception('The foo object already exists.') self.mock_broker.addQueue.side_effect = exception_to_raise self.mock_broker.getQueue.return_value = mock_queue_data mock_queue_declare_ok_t.return_value = mock_return_value result = self.my_channel.queue_declare( mock_queue, passive=mock_passive, durable=mock_durable, exclusive=mock_exclusive, auto_delete=mock_auto_delete, nowait=mock_nowait, arguments=mock_arguments, ) self.mock_broker.addQueue.assert_called_with( mock_queue, options=options, ) mock_queue_declare_ok_t.assert_called_with( mock_queue, mock_msg_count, mock_consumer_count, ) assert mock_return_value is result def test_queue_declare_set_ring_policy_for_celeryev(self): """Test declare_queue sets ring_policy for celeryev.""" mock_queue = Mock() mock_queue.startswith.return_value = True mock_queue.endswith.return_value = False expected_default_options = { 'passive': False, 'durable': False, 'exclusive': False, 'auto-delete': True, 'arguments': None, 'qpid.policy_type': 'ring', } mock_msg_count = Mock() mock_consumer_count = Mock() values_dict = { 'msgDepth': mock_msg_count, 'consumerCount': mock_consumer_count, } mock_queue_data = Mock() mock_queue_data.values = values_dict self.mock_broker.addQueue.return_value = None self.mock_broker.getQueue.return_value = mock_queue_data self.my_channel.queue_declare(mock_queue) mock_queue.startswith.assert_called_with('celeryev') self.mock_broker.addQueue.assert_called_with( mock_queue, options=expected_default_options, ) def test_queue_declare_set_ring_policy_for_pidbox(self): """Test declare_queue sets ring_policy for pidbox.""" mock_queue = Mock() mock_queue.startswith.return_value = False mock_queue.endswith.return_value = True expected_default_options = { 'passive': False, 'durable': False, 'exclusive': False, 'auto-delete': True, 'arguments': None, 'qpid.policy_type': 'ring', } mock_msg_count = Mock() mock_consumer_count = Mock() values_dict = { 'msgDepth': mock_msg_count, 'consumerCount': mock_consumer_count, } mock_queue_data = Mock() mock_queue_data.values = values_dict self.mock_broker.addQueue.return_value = None self.mock_broker.getQueue.return_value = mock_queue_data self.my_channel.queue_declare(mock_queue) mock_queue.endswith.assert_called_with('pidbox') self.mock_broker.addQueue.assert_called_with( mock_queue, options=expected_default_options, ) def test_queue_declare_ring_policy_not_set_as_expected(self): """Test declare_queue does not set ring_policy as expected.""" mock_queue = Mock() mock_queue.startswith.return_value = False mock_queue.endswith.return_value = False expected_default_options = { 'passive': False, 'durable': False, 'exclusive': False, 'auto-delete': True, 'arguments': None, } mock_msg_count = Mock() mock_consumer_count = Mock() values_dict = { 'msgDepth': mock_msg_count, 'consumerCount': mock_consumer_count, } mock_queue_data = Mock() mock_queue_data.values = values_dict self.mock_broker.addQueue.return_value = None self.mock_broker.getQueue.return_value = mock_queue_data self.my_channel.queue_declare(mock_queue) mock_queue.startswith.assert_called_with('celeryev') mock_queue.endswith.assert_called_with('pidbox') self.mock_broker.addQueue.assert_called_with( mock_queue, options=expected_default_options, ) def test_queue_declare_test_defaults(self): """Test declare_queue defaults.""" mock_queue = Mock() mock_queue.startswith.return_value = False mock_queue.endswith.return_value = False expected_default_options = { 'passive': False, 'durable': False, 'exclusive': False, 'auto-delete': True, 'arguments': None, } mock_msg_count = Mock() mock_consumer_count = Mock() values_dict = { 'msgDepth': mock_msg_count, 'consumerCount': mock_consumer_count, } mock_queue_data = Mock() mock_queue_data.values = values_dict self.mock_broker.addQueue.return_value = None self.mock_broker.getQueue.return_value = mock_queue_data self.my_channel.queue_declare(mock_queue) self.mock_broker.addQueue.assert_called_with( mock_queue, options=expected_default_options, ) def test_queue_declare_raises_exception_not_silenced(self): unique_exception = Exception('This exception should not be silenced') mock_queue = Mock() self.mock_broker.addQueue.side_effect = unique_exception with pytest.raises(unique_exception.__class__): self.my_channel.queue_declare(mock_queue) self.mock_broker.addQueue.assert_called_once_with( mock_queue, options={ 'exclusive': False, 'durable': False, 'qpid.policy_type': 'ring', 'passive': False, 'arguments': None, 'auto-delete': True }) def test_exchange_declare_raises_exception_and_silenced(self): """Create exchange where an exception is raised and then silenced""" self.mock_broker.addExchange.side_effect = Exception( 'The foo object already exists.', ) self.my_channel.exchange_declare() def test_exchange_declare_raises_exception_not_silenced(self): """Create Exchange where an exception is raised and not silenced.""" unique_exception = Exception('This exception should not be silenced') self.mock_broker.addExchange.side_effect = unique_exception with pytest.raises(unique_exception.__class__): self.my_channel.exchange_declare() def test_exchange_declare(self): """Create Exchange where an exception is NOT raised.""" mock_exchange = Mock() mock_type = Mock() mock_durable = Mock() options = {'durable': mock_durable} result = self.my_channel.exchange_declare( mock_exchange, mock_type, mock_durable, ) self.mock_broker.addExchange.assert_called_with( mock_type, mock_exchange, options, ) assert result is None def test_exchange_delete(self): """Test the deletion of an exchange by name.""" mock_exchange = Mock() result = self.my_channel.exchange_delete(mock_exchange) self.mock_broker.delExchange.assert_called_with(mock_exchange) assert result is None def test_queue_bind(self): """Test binding a queue to an exchange using a routing key.""" mock_queue = Mock() mock_exchange = Mock() mock_routing_key = Mock() self.my_channel.queue_bind( mock_queue, mock_exchange, mock_routing_key, ) self.mock_broker.bind.assert_called_with( mock_exchange, mock_queue, mock_routing_key, ) def test_queue_unbind(self): """Test unbinding a queue from an exchange using a routing key.""" mock_queue = Mock() mock_exchange = Mock() mock_routing_key = Mock() self.my_channel.queue_unbind( mock_queue, mock_exchange, mock_routing_key, ) self.mock_broker.unbind.assert_called_with( mock_exchange, mock_queue, mock_routing_key, ) def test_queue_purge(self): """Test purging a queue by name.""" mock_queue = Mock() purge_result = Mock() self.my_channel._purge = Mock(return_value=purge_result) result = self.my_channel.queue_purge(mock_queue) self.my_channel._purge.assert_called_with(mock_queue) assert purge_result is result @patch(QPID_MODULE + '.Channel.qos') def test_basic_ack(self, mock_qos): """Test that basic_ack calls the QoS object properly.""" mock_delivery_tag = Mock() self.my_channel.basic_ack(mock_delivery_tag) mock_qos.ack.assert_called_with(mock_delivery_tag) @patch(QPID_MODULE + '.Channel.qos') def test_basic_reject(self, mock_qos): """Test that basic_reject calls the QoS object properly.""" mock_delivery_tag = Mock() mock_requeue_value = Mock() self.my_channel.basic_reject(mock_delivery_tag, mock_requeue_value) mock_qos.reject.assert_called_with( mock_delivery_tag, requeue=mock_requeue_value, ) def test_qos_manager_is_none(self): """Test the qos property if the QoS object did not already exist.""" self.my_channel._qos = None result = self.my_channel.qos assert isinstance(result, QoS) assert result == self.my_channel._qos def test_qos_manager_already_exists(self): """Test the qos property if the QoS object already exists.""" mock_existing_qos = Mock() self.my_channel._qos = mock_existing_qos result = self.my_channel.qos assert mock_existing_qos is result def test_prepare_message(self): """Test that prepare_message() returns the correct result.""" mock_body = Mock() mock_priority = Mock() mock_content_encoding = Mock() mock_content_type = Mock() mock_header1 = Mock() mock_header2 = Mock() mock_properties1 = Mock() mock_properties2 = Mock() headers = {'header1': mock_header1, 'header2': mock_header2} properties = {'properties1': mock_properties1, 'properties2': mock_properties2} result = self.my_channel.prepare_message( mock_body, priority=mock_priority, content_type=mock_content_type, content_encoding=mock_content_encoding, headers=headers, properties=properties) assert mock_body is result['body'] assert mock_content_encoding is result['content-encoding'] assert mock_content_type is result['content-type'] assert headers == result['headers'] assert properties == result['properties'] assert (mock_priority is result['properties']['delivery_info']['priority']) @patch('__builtin__.buffer') @patch(QPID_MODULE + '.Channel.body_encoding') @patch(QPID_MODULE + '.Channel.encode_body') @patch(QPID_MODULE + '.Channel._put') def test_basic_publish(self, mock_put, mock_encode_body, mock_body_encoding, mock_buffer): """Test basic_publish().""" mock_original_body = Mock() mock_encoded_body = 'this is my encoded body' mock_message = {'body': mock_original_body, 'properties': {'delivery_info': {}}} mock_encode_body.return_value = ( mock_encoded_body, mock_body_encoding, ) mock_exchange = Mock() mock_routing_key = Mock() mock_encoded_buffered_body = Mock() mock_buffer.return_value = mock_encoded_buffered_body self.my_channel.basic_publish( mock_message, mock_exchange, mock_routing_key, ) mock_encode_body.assert_called_once_with( mock_original_body, mock_body_encoding, ) mock_buffer.assert_called_once_with(mock_encoded_body) assert mock_message['body'] is mock_encoded_buffered_body assert (mock_message['properties']['body_encoding'] is mock_body_encoding) assert isinstance( mock_message['properties']['delivery_tag'], uuid.UUID) assert (mock_message['properties']['delivery_info']['exchange'] is mock_exchange) assert (mock_message['properties']['delivery_info']['routing_key'] is mock_routing_key) mock_put.assert_called_with( mock_routing_key, mock_message, mock_exchange, ) @patch(QPID_MODULE + '.Channel.codecs') def test_encode_body_expected_encoding(self, mock_codecs): """Test if encode_body() works when encoding is set correctly""" mock_body = Mock() mock_encoder = Mock() mock_encoded_result = Mock() mock_codecs.get.return_value = mock_encoder mock_encoder.encode.return_value = mock_encoded_result result = self.my_channel.encode_body(mock_body, encoding='base64') expected_result = (mock_encoded_result, 'base64') assert expected_result == result @patch(QPID_MODULE + '.Channel.codecs') def test_encode_body_not_expected_encoding(self, mock_codecs): """Test if encode_body() works when encoding is not set correctly.""" mock_body = Mock() result = self.my_channel.encode_body(mock_body, encoding=None) expected_result = mock_body, None assert expected_result == result @patch(QPID_MODULE + '.Channel.codecs') def test_decode_body_expected_encoding(self, mock_codecs): """Test if decode_body() works when encoding is set correctly.""" mock_body = Mock() mock_decoder = Mock() mock_decoded_result = Mock() mock_codecs.get.return_value = mock_decoder mock_decoder.decode.return_value = mock_decoded_result result = self.my_channel.decode_body(mock_body, encoding='base64') assert mock_decoded_result == result @patch(QPID_MODULE + '.Channel.codecs') def test_decode_body_not_expected_encoding(self, mock_codecs): """Test if decode_body() works when encoding is not set correctly.""" mock_body = Mock() result = self.my_channel.decode_body(mock_body, encoding=None) assert mock_body == result def test_typeof_exchange_exists(self): """Test that typeof() finds an exchange that already exists.""" mock_exchange = Mock() mock_qpid_exchange = Mock() mock_attributes = {} mock_type = Mock() mock_attributes['type'] = mock_type mock_qpid_exchange.getAttributes.return_value = mock_attributes self.mock_broker.getExchange.return_value = mock_qpid_exchange result = self.my_channel.typeof(mock_exchange) assert mock_type is result def test_typeof_exchange_does_not_exist(self): """Test that typeof() finds an exchange that does not exists.""" mock_exchange = Mock() mock_default = Mock() self.mock_broker.getExchange.return_value = None result = self.my_channel.typeof(mock_exchange, default=mock_default) assert mock_default is result @pytest.mark.skip(reason='Not supported in Python3') @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport__init__: @pytest.fixture(autouse=True) def mock_verify_runtime_environment(self, patching): self.mock_verify_runtime_environment = patching.object( Transport, 'verify_runtime_environment') @pytest.fixture(autouse=True) def mock_transport_init(self, patching): self.mock_base_Transport__init__ = patching( QPID_MODULE + '.base.Transport.__init__') def test_Transport___init___calls_verify_runtime_environment(self): Transport(Mock()) self.mock_verify_runtime_environment.assert_called_once_with() def test_transport___init___calls_parent_class___init__(self): m = Mock() Transport(m) self.mock_base_Transport__init__.assert_called_once_with(m) def test_transport___init___sets_use_async_interface_False(self): transport = Transport(Mock()) assert not transport.use_async_interface @pytest.mark.skip(reason='Not supported in Python3') @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_drain_events: @pytest.fixture(autouse=True) def setup_self(self, disable_runtime_dependency_check): # ^^ disable_runtime.. must be called before this fixture. self.transport = Transport(Mock()) self.transport.session = Mock() self.mock_queue = Mock() self.mock_message = Mock() self.mock_conn = Mock() self.mock_callback = Mock() self.mock_conn._callbacks = {self.mock_queue: self.mock_callback} def mock_next_receiver(self, timeout): time.sleep(0.3) mock_receiver = Mock() mock_receiver.source = self.mock_queue mock_receiver.fetch.return_value = self.mock_message return mock_receiver def test_socket_timeout_raised_when_all_receivers_empty(self): with patch(QPID_MODULE + '.QpidEmpty', new=QpidException): self.transport.session.next_receiver.side_effect = QpidException() with pytest.raises(socket.timeout): self.transport.drain_events(Mock()) def test_socket_timeout_raised_when_by_timeout(self): self.transport.session.next_receiver = self.mock_next_receiver with pytest.raises(socket.timeout): self.transport.drain_events(self.mock_conn, timeout=1) def test_timeout_returns_no_earlier_then_asked_for(self): self.transport.session.next_receiver = self.mock_next_receiver start_time = time.monotonic() try: self.transport.drain_events(self.mock_conn, timeout=1) except socket.timeout: pass elapsed_time_in_s = time.monotonic() - start_time assert elapsed_time_in_s >= 1.0 def test_callback_is_called(self): self.transport.session.next_receiver = self.mock_next_receiver try: self.transport.drain_events(self.mock_conn, timeout=1) except socket.timeout: pass self.mock_callback.assert_called_with(self.mock_message) @pytest.mark.skip(reason='Not supported in Python3') class test_Transport_create_channel: @pytest.fixture(autouse=True) def setup_self(self, disable_runtime_dependency_check): # ^^ disable runtime MUST be called before this fixture self.transport = Transport(Mock()) self.mock_conn = Mock() self.mock_new_channel = Mock() self.mock_conn.Channel.return_value = self.mock_new_channel self.returned_channel = self.transport.create_channel(self.mock_conn) def test_new_channel_created_from_connection(self): assert self.mock_new_channel is self.returned_channel self.mock_conn.Channel.assert_called_with( self.mock_conn, self.transport, ) def test_new_channel_added_to_connection_channel_list(self): append_method = self.mock_conn.channels.append append_method.assert_called_with(self.mock_new_channel) @pytest.mark.skip(reason='Not supported in Python3') @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_establish_connection: @pytest.fixture(autouse=True) def setup_self(self, disable_runtime_dependency_check): class MockClient: pass self.client = MockClient() self.client.connect_timeout = 4 self.client.ssl = False self.client.transport_options = {} self.client.userid = None self.client.password = None self.client.login_method = None self.transport = Transport(self.client) self.mock_conn = Mock() self.transport.Connection = self.mock_conn def test_transport_establish_conn_new_option_overwrites_default(self): self.client.userid = 'new-userid' self.client.password = 'new-password' self.transport.establish_connection() self.mock_conn.assert_called_once_with( username=self.client.userid, password=self.client.password, sasl_mechanisms='PLAIN', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_establish_conn_empty_client_is_default(self): self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='ANONYMOUS', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_establish_conn_additional_transport_option(self): new_param_value = 'mynewparam' self.client.transport_options['new_param'] = new_param_value self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='ANONYMOUS', host='localhost', timeout=4, new_param=new_param_value, port=5672, transport='tcp', ) def test_transport_establish_conn_transform_localhost_to_127_0_0_1(self): self.client.hostname = 'localhost' self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='ANONYMOUS', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_password_no_userid_raises_exception(self): self.client.password = 'somepass' with pytest.raises(Exception): self.transport.establish_connection() def test_transport_userid_no_password_raises_exception(self): self.client.userid = 'someusername' with pytest.raises(Exception): self.transport.establish_connection() def test_transport_overrides_sasl_mech_from_login_method(self): self.client.login_method = 'EXTERNAL' self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='EXTERNAL', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_overrides_sasl_mech_has_username(self): self.client.userid = 'new-userid' self.client.login_method = 'EXTERNAL' self.transport.establish_connection() self.mock_conn.assert_called_once_with( username=self.client.userid, sasl_mechanisms='EXTERNAL', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_establish_conn_set_password(self): self.client.userid = 'someuser' self.client.password = 'somepass' self.transport.establish_connection() self.mock_conn.assert_called_once_with( username='someuser', password='somepass', sasl_mechanisms='PLAIN', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_establish_conn_no_ssl_sets_transport_tcp(self): self.client.ssl = False self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='ANONYMOUS', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_establish_conn_with_ssl_with_hostname_check(self): self.client.ssl = { 'keyfile': 'my_keyfile', 'certfile': 'my_certfile', 'ca_certs': 'my_cacerts', 'cert_reqs': ssl.CERT_REQUIRED, } self.transport.establish_connection() self.mock_conn.assert_called_once_with( ssl_certfile='my_certfile', ssl_trustfile='my_cacerts', timeout=4, ssl_skip_hostname_check=False, sasl_mechanisms='ANONYMOUS', host='localhost', ssl_keyfile='my_keyfile', port=5672, transport='ssl', ) def test_transport_establish_conn_with_ssl_skip_hostname_check(self): self.client.ssl = { 'keyfile': 'my_keyfile', 'certfile': 'my_certfile', 'ca_certs': 'my_cacerts', 'cert_reqs': ssl.CERT_OPTIONAL, } self.transport.establish_connection() self.mock_conn.assert_called_once_with( ssl_certfile='my_certfile', ssl_trustfile='my_cacerts', timeout=4, ssl_skip_hostname_check=True, sasl_mechanisms='ANONYMOUS', host='localhost', ssl_keyfile='my_keyfile', port=5672, transport='ssl', ) def test_transport_establish_conn_sets_client_on_connection_object(self): self.transport.establish_connection() assert self.mock_conn.return_value.client is self.client def test_transport_establish_conn_creates_session_on_transport(self): self.transport.establish_connection() qpid_conn = self.mock_conn.return_value.get_qpid_connection new_mock_session = qpid_conn.return_value.session.return_value assert self.transport.session is new_mock_session def test_transport_establish_conn_returns_new_connection_object(self): new_conn = self.transport.establish_connection() assert new_conn is self.mock_conn.return_value def test_transport_establish_conn_uses_hostname_if_not_default(self): self.client.hostname = 'some_other_hostname' self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='ANONYMOUS', host='some_other_hostname', timeout=4, port=5672, transport='tcp', ) def test_transport_sets_qpid_message_ready_handler(self): self.transport.establish_connection() qpid_conn_call = self.mock_conn.return_value.get_qpid_connection mock_session = qpid_conn_call.return_value.session.return_value mock_set_callback = mock_session.set_message_received_notify_handler expected_msg_callback = self.transport._qpid_message_ready_handler mock_set_callback.assert_called_once_with(expected_msg_callback) def test_transport_sets_session_exception_handler(self): self.transport.establish_connection() qpid_conn_call = self.mock_conn.return_value.get_qpid_connection mock_session = qpid_conn_call.return_value.session.return_value mock_set_callback = mock_session.set_async_exception_notify_handler exc_callback = self.transport._qpid_async_exception_notify_handler mock_set_callback.assert_called_once_with(exc_callback) def test_transport_sets_connection_exception_handler(self): self.transport.establish_connection() qpid_conn_call = self.mock_conn.return_value.get_qpid_connection qpid_conn = qpid_conn_call.return_value mock_set_callback = qpid_conn.set_async_exception_notify_handler exc_callback = self.transport._qpid_async_exception_notify_handler mock_set_callback.assert_called_once_with(exc_callback) @pytest.mark.skip(reason='Not supported in Python3') class test_Transport_class_attributes: def test_verify_Connection_attribute(self): assert Connection is Transport.Connection def test_verify_polling_disabled(self): assert Transport.polling_interval is None def test_verify_driver_type_and_name(self): assert Transport.driver_type == 'qpid' assert Transport.driver_name == 'qpid' def test_verify_implements_exchange_types(self): assert 'fanout' in Transport.implements.exchange_type assert 'direct' in Transport.implements.exchange_type assert 'topic' in Transport.implements.exchange_type assert 'frobnitz' not in Transport.implements.exchange_type def test_transport_verify_recoverable_connection_errors(self): connection_errors = Transport.recoverable_connection_errors assert ConnectionError in connection_errors assert select.error in connection_errors def test_transport_verify_recoverable_channel_errors(self): channel_errors = Transport.recoverable_channel_errors assert NotFound in channel_errors def test_transport_verify_pre_kombu_3_0_exception_labels(self): assert (Transport.recoverable_channel_errors == Transport.channel_errors) assert (Transport.recoverable_connection_errors == Transport.connection_errors) @pytest.mark.skip(reason='Not supported in Python3') @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_register_with_event_loop: def test_transport_register_with_event_loop_calls_add_reader(self): transport = Transport(Mock()) mock_connection = Mock() mock_loop = Mock() transport.register_with_event_loop(mock_connection, mock_loop) mock_loop.add_reader.assert_called_with( transport.r, transport.on_readable, mock_connection, mock_loop, ) @pytest.mark.skip(reason='Not supported in Python3') @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_Qpid_callback_handlers_async: @pytest.fixture(autouse=True) def setup_self(self, patching, disable_runtime_dependency_check): self.mock_os_write = patching(QPID_MODULE + '.os.write') self.transport = Transport(Mock()) self.transport.register_with_event_loop(Mock(), Mock()) def test__qpid_message_ready_handler_writes_symbol_to_fd(self): self.transport._qpid_message_ready_handler(Mock()) self.mock_os_write.assert_called_once_with(self.transport._w, '0') def test__qpid_async_exception_notify_handler_writes_symbol_to_fd(self): self.transport._qpid_async_exception_notify_handler(Mock(), Mock()) self.mock_os_write.assert_called_once_with(self.transport._w, 'e') @pytest.mark.skip(reason='Not supported in Python3') @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_Qpid_callback_handlers_sync: @pytest.fixture(autouse=True) def setup_method(self, patching, disable_runtime_dependency_check): self.mock_os_write = patching(QPID_MODULE + '.os.write') self.transport = Transport(Mock()) def test__qpid_message_ready_handler_dows_not_write(self): self.transport._qpid_message_ready_handler(Mock()) self.mock_os_write.assert_not_called() def test__qpid_async_exception_notify_handler_does_not_write(self): self.transport._qpid_async_exception_notify_handler(Mock(), Mock()) self.mock_os_write.assert_not_called() @pytest.mark.skip(reason='Not supported in Python3') @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_on_readable: @pytest.fixture(autouse=True) def setup_self(self, patching, disable_runtime_dependency_check): self.mock_os_read = patching(QPID_MODULE + '.os.read') self.mock_drain_events = patching.object(Transport, 'drain_events') self.transport = Transport(Mock()) self.transport.register_with_event_loop(Mock(), Mock()) def test_transport_on_readable_reads_symbol_from_fd(self): self.transport.on_readable(Mock(), Mock()) self.mock_os_read.assert_called_once_with(self.transport.r, 1) def test_transport_on_readable_calls_drain_events(self): mock_connection = Mock() self.transport.on_readable(mock_connection, Mock()) self.mock_drain_events.assert_called_with(mock_connection) def test_transport_on_readable_catches_socket_timeout(self): self.mock_drain_events.side_effect = socket.timeout() self.transport.on_readable(Mock(), Mock()) def test_transport_on_readable_ignores_non_socket_timeout_exception(self): self.mock_drain_events.side_effect = IOError() with pytest.raises(IOError): self.transport.on_readable(Mock(), Mock()) @pytest.mark.skip(reason='Not supported in Python3') @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_verify_runtime_environment: @pytest.fixture(autouse=True) def setup_self(self, patching): self.verify_runtime_environment = Transport.verify_runtime_environment patching.object(Transport, 'verify_runtime_environment') self.transport = Transport(Mock()) @patch(QPID_MODULE + '.PY3', new=True) def test_raises_exception_for_Python3(self): with pytest.raises(RuntimeError): self.verify_runtime_environment(self.transport) @patch('__builtin__.getattr') def test_raises_exc_for_PyPy(self, mock_getattr): mock_getattr.return_value = True with pytest.raises(RuntimeError): self.verify_runtime_environment(self.transport) @patch(QPID_MODULE + '.dependency_is_none') def test_raises_exc_dep_missing(self, mock_dep_is_none): mock_dep_is_none.return_value = True with pytest.raises(RuntimeError): self.verify_runtime_environment(self.transport) @patch(QPID_MODULE + '.dependency_is_none') def test_calls_dependency_is_none(self, mock_dep_is_none): mock_dep_is_none.return_value = False self.verify_runtime_environment(self.transport) mock_dep_is_none.assert_called() def test_raises_no_exception(self): self.verify_runtime_environment(self.transport) @pytest.mark.skip(reason='Not supported in Python3') @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport: def setup_method(self): """Creates a mock_client to be used in testing.""" self.mock_client = Mock() def test_supports_ev(self): """Test that the transport claims to support async event loop""" assert Transport(self.mock_client).supports_ev def test_close_connection(self): """Test that close_connection calls close on the connection.""" my_transport = Transport(self.mock_client) mock_connection = Mock() my_transport.close_connection(mock_connection) mock_connection.close.assert_called_once_with() def test_default_connection_params(self): """Test that the default_connection_params are correct""" correct_params = { 'hostname': 'localhost', 'port': 5672, } my_transport = Transport(self.mock_client) result_params = my_transport.default_connection_params assert correct_params == result_params @patch(QPID_MODULE + '.os.close') def test_del_sync(self, close): my_transport = Transport(self.mock_client) my_transport.__del__() close.assert_not_called() @patch(QPID_MODULE + '.os.close') def test_del_async(self, close): my_transport = Transport(self.mock_client) my_transport.register_with_event_loop(Mock(), Mock()) my_transport.__del__() close.assert_called() @patch(QPID_MODULE + '.os.close') def test_del_async_failed(self, close): close.side_effect = OSError() my_transport = Transport(self.mock_client) my_transport.register_with_event_loop(Mock(), Mock()) my_transport.__del__() close.assert_called() kombu-5.5.3/t/unit/transport/test_redis.py000066400000000000000000002070761477772317200206730ustar00rootroot00000000000000from __future__ import annotations import base64 import copy import socket import types from collections import defaultdict from itertools import count from queue import Empty from queue import Queue as _Queue from typing import TYPE_CHECKING from unittest.mock import ANY, Mock, call, patch import pytest from kombu import Connection, Consumer, Exchange, Producer, Queue from kombu.exceptions import VersionMismatch from kombu.transport import virtual from kombu.utils import eventio # patch poll from kombu.utils.json import dumps if TYPE_CHECKING: from types import TracebackType def _redis_modules(): class ConnectionError(Exception): pass class AuthenticationError(Exception): pass class InvalidData(Exception): pass class InvalidResponse(Exception): pass class ResponseError(Exception): pass exceptions = types.ModuleType('redis.exceptions') exceptions.ConnectionError = ConnectionError exceptions.AuthenticationError = AuthenticationError exceptions.InvalidData = InvalidData exceptions.InvalidResponse = InvalidResponse exceptions.ResponseError = ResponseError class Redis: pass myredis = types.ModuleType('redis') myredis.exceptions = exceptions myredis.Redis = Redis return myredis, exceptions class _poll(eventio._select): def register(self, fd, flags): if flags & eventio.READ: self._rfd.add(fd) def poll(self, timeout): events = [] for fd in self._rfd: if fd.data: events.append((fd.fileno(), eventio.READ)) return events eventio.poll = _poll pytest.importorskip('redis') # must import after poller patch, pep8 complains from kombu.transport import redis # noqa class ResponseError(Exception): pass class Client: queues = {} sets = defaultdict(set) hashes = defaultdict(dict) shard_hint = None def __init__(self, db=None, port=None, connection_pool=None, **kwargs): self._called = [] self._connection = None self.bgsave_raises_ResponseError = False self.connection = self._sconnection(self) def bgsave(self): self._called.append('BGSAVE') if self.bgsave_raises_ResponseError: raise ResponseError() def delete(self, key): self.queues.pop(key, None) def exists(self, key): return key in self.queues or key in self.sets def hset(self, key, k, v): self.hashes[key][k] = v def hget(self, key, k): return self.hashes[key].get(k) def hdel(self, key, k): self.hashes[key].pop(k, None) def sadd(self, key, member, *args): self.sets[key].add(member) def zadd(self, key, *args): if redis.redis.VERSION[0] >= 3: (mapping,) = args for item in mapping: self.sets[key].add(item) else: # TODO: remove me when we drop support for Redis-py v2 (score1, member1) = args self.sets[key].add(member1) def smembers(self, key): return self.sets.get(key, set()) def ping(self, *args, **kwargs): return True def srem(self, key, *args): self.sets.pop(key, None) zrem = srem def llen(self, key): try: return self.queues[key].qsize() except KeyError: return 0 def lpush(self, key, value): self.queues[key].put_nowait(value) def parse_response(self, connection, type, **options): cmd, queues = self.connection._sock.data.pop() queues = list(queues) assert cmd == type self.connection._sock.data = [] if type == 'BRPOP': timeout = queues.pop() item = self.brpop(queues, timeout) if item: return item raise Empty() def brpop(self, keys, timeout=None): for key in keys: try: item = self.queues[key].get_nowait() except Empty: pass else: return key, item def rpop(self, key): try: return self.queues[key].get_nowait() except (KeyError, Empty): pass def __contains__(self, k): return k in self._called def pipeline(self): return Pipeline(self) def encode(self, value): return str(value) def _new_queue(self, key): self.queues[key] = _Queue() class _sconnection: disconnected = False class _socket: blocking = True filenos = count(30) def __init__(self, *args): self._fileno = next(self.filenos) self.data = [] def fileno(self): return self._fileno def setblocking(self, blocking): self.blocking = blocking def __init__(self, client): self.client = client self._sock = self._socket() def disconnect(self): self.disconnected = True def send_command(self, cmd, *args): self._sock.data.append((cmd, args)) def info(self): return {'foo': 1} def pubsub(self, *args, **kwargs): connection = self.connection class ConnectionPool: def get_connection(self, *args, **kwargs): return connection self.connection_pool = ConnectionPool() return self class Pipeline: def __init__(self, client): self.client = client self.stack = [] def __enter__(self): return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None ) -> None: pass def __getattr__(self, key): if key not in self.__dict__: def _add(*args, **kwargs): self.stack.append((getattr(self.client, key), args, kwargs)) return self return _add return self.__dict__[key] def execute(self): stack = list(self.stack) self.stack[:] = [] return [fun(*args, **kwargs) for fun, args, kwargs in stack] class Channel(redis.Channel): def _get_client(self): return Client def _get_pool(self, asynchronous=False): return Mock() def _get_response_error(self): return ResponseError def _new_queue(self, queue, **kwargs): for pri in self.priority_steps: self.client._new_queue(self._q_for_pri(queue, pri)) def pipeline(self): return Pipeline(Client()) class Transport(redis.Transport): Channel = Channel connection_errors = (KeyError,) channel_errors = (IndexError,) class test_Channel: def setup_method(self): self.connection = self.create_connection() self.channel = self.connection.default_channel def create_connection(self, **kwargs): kwargs.setdefault('transport_options', {'fanout_patterns': True}) return Connection(transport=Transport, **kwargs) def _get_one_delivery_tag(self, n='test_uniq_tag'): with self.create_connection() as conn1: chan = conn1.default_channel chan.exchange_declare(n) chan.queue_declare(n) chan.queue_bind(n, n, n) msg = chan.prepare_message('quick brown fox') chan.basic_publish(msg, n, n) payload = chan._get(n) assert payload pymsg = chan.message_to_python(payload) return pymsg.delivery_tag def test_delivery_tag_is_uuid(self): seen = set() for i in range(100): tag = self._get_one_delivery_tag() assert tag not in seen seen.add(tag) with pytest.raises(ValueError): int(tag) assert len(tag) == 36 def test_disable_ack_emulation(self): conn = Connection(transport=Transport, transport_options={ 'ack_emulation': False, }) chan = conn.channel() assert not chan.ack_emulation assert chan.QoS == virtual.QoS def test_redis_ping_raises(self): pool = Mock(name='pool') pool_at_init = [pool] client = Mock(name='client') class XChannel(Channel): def __init__(self, *args, **kwargs): self._pool = pool_at_init[0] super().__init__(*args, **kwargs) def _get_client(self): return lambda *_, **__: client class XTransport(Transport): Channel = XChannel conn = Connection(transport=XTransport) conn.transport.cycle = Mock(name='cycle') client.ping.side_effect = RuntimeError() with pytest.raises(RuntimeError): conn.channel() pool.disconnect.assert_called_with() pool.disconnect.reset_mock() # Ensure that the channel without ensured connection to Redis # won't be added to the cycle. conn.transport.cycle.add.assert_not_called() assert len(conn.transport.channels) == 0 pool_at_init = [None] with pytest.raises(RuntimeError): conn.channel() pool.disconnect.assert_not_called() def test_redis_connection_added_to_cycle_if_ping_succeeds(self): """Test should check the connection is added to the cycle only if the ping to Redis was finished successfully.""" # given: mock pool and client pool = Mock(name='pool') client = Mock(name='client') # override channel class with given mocks class XChannel(Channel): def __init__(self, *args, **kwargs): self._pool = pool super().__init__(*args, **kwargs) def _get_client(self): return lambda *_, **__: client # override Channel in Transport with given channel class XTransport(Transport): Channel = XChannel # when: create connection with overridden transport conn = Connection(transport=XTransport) conn.transport.cycle = Mock(name='cycle') # create the channel chan = conn.channel() # then: check if ping was called client.ping.assert_called_once() # the connection was added to the cycle conn.transport.cycle.add.assert_called_once() assert len(conn.transport.channels) == 1 # the channel was flagged as registered into poller assert chan._registered def test_redis_on_disconnect_channel_only_if_was_registered(self): """Test should check if the _on_disconnect method is called only if the channel was registered into the poller.""" # given: mock pool and client pool = Mock(name='pool') client = Mock( name='client', ping=Mock(return_value=True) ) # create RedisConnectionMock class # for the possibility to run disconnect method class RedisConnectionMock: def disconnect(self, *args): pass # override Channel method with given mocks class XChannel(Channel): connection_class = RedisConnectionMock def __init__(self, *args, **kwargs): self._pool = pool # counter to check if the method was called self.on_disconect_count = 0 super().__init__(*args, **kwargs) def _get_client(self): return lambda *_, **__: client def _on_connection_disconnect(self, connection): # increment the counter when the method is called self.on_disconect_count += 1 # create the channel chan = XChannel(Mock( _used_channel_ids=[], channel_max=1, channels=[], client=Mock( transport_options={}, hostname="127.0.0.1", virtual_host=None))) # create the _connparams with overridden connection_class connparams = chan._connparams(asynchronous=True) # create redis.Connection redis_connection = connparams['connection_class']() # the connection was added to the cycle chan.connection.cycle.add.assert_called_once() # and the ping was called client.ping.assert_called_once() # the channel was registered assert chan._registered # than disconnect the Redis connection redis_connection.disconnect() # the on_disconnect counter should be incremented assert chan.on_disconect_count == 1 def test_redis__on_disconnect_should_not_be_called_if_not_registered(self): """Test should check if the _on_disconnect method is not called because the connection to Redis isn't established properly.""" # given: mock pool pool = Mock(name='pool') # client mock with ping method which return ConnectionError from redis.exceptions import ConnectionError client = Mock( name='client', ping=Mock(side_effect=ConnectionError()) ) # create RedisConnectionMock # for the possibility to run disconnect method class RedisConnectionMock: def disconnect(self, *args): pass # override Channel method with given mocks class XChannel(Channel): connection_class = RedisConnectionMock def __init__(self, *args, **kwargs): self._pool = pool # counter to check if the method was called self.on_disconect_count = 0 super().__init__(*args, **kwargs) def _get_client(self): return lambda *_, **__: client def _on_connection_disconnect(self, connection): # increment the counter when the method is called self.on_disconect_count += 1 # then: exception was risen with pytest.raises(ConnectionError): # when: create the channel chan = XChannel(Mock( _used_channel_ids=[], channel_max=1, channels=[], client=Mock( transport_options={}, hostname="127.0.0.1", virtual_host=None))) # create the _connparams with overridden connection_class connparams = chan._connparams(asynchronous=True) # create redis.Connection redis_connection = connparams['connection_class']() # the connection wasn't added to the cycle chan.connection.cycle.add.assert_not_called() # the ping was called once with the exception client.ping.assert_called_once() # the channel was not registered assert not chan._registered # then: disconnect the Redis connection redis_connection.disconnect() # the on_disconnect counter shouldn't be incremented assert chan.on_disconect_count == 0 def test_get_redis_ConnectionError(self): from redis.exceptions import ConnectionError from kombu.transport.redis import get_redis_ConnectionError connection_error = get_redis_ConnectionError() assert connection_error == ConnectionError def test_after_fork_cleanup_channel(self): from kombu.transport.redis import _after_fork_cleanup_channel channel = Mock() _after_fork_cleanup_channel(channel) channel._after_fork.assert_called_once() def test_after_fork(self): self.channel._pool = None self.channel._after_fork() pool = self.channel._pool = Mock(name='pool') self.channel._after_fork() pool.disconnect.assert_called_with() def test_next_delivery_tag(self): assert (self.channel._next_delivery_tag() != self.channel._next_delivery_tag()) def test_do_restore_message(self): client = Mock(name='client') pl1 = {'body': 'BODY'} spl1 = dumps(pl1) lookup = self.channel._lookup = Mock(name='_lookup') lookup.return_value = {'george', 'elaine'} self.channel._do_restore_message( pl1, 'ex', 'rkey', client, ) client.rpush.assert_has_calls([ call('george', spl1), call('elaine', spl1), ], any_order=True) client = Mock(name='client') pl2 = {'body': 'BODY2', 'headers': {'x-funny': 1}} headers_after = dict(pl2['headers'], redelivered=True) spl2 = dumps(dict(pl2, headers=headers_after)) self.channel._do_restore_message( pl2, 'ex', 'rkey', client, ) client.rpush.assert_any_call('george', spl2) client.rpush.assert_any_call('elaine', spl2) client.rpush.side_effect = KeyError() with patch('kombu.transport.redis.crit') as crit: self.channel._do_restore_message( pl2, 'ex', 'rkey', client, ) crit.assert_called() def test_do_restore_message_celery(self): # Payload value from real Celery project payload = { "body": base64.b64encode(dumps([ [], {}, { "callbacks": None, "errbacks": None, "chain": None, "chord": None, }, ]).encode()).decode(), "content-encoding": "utf-8", "content-type": "application/json", "headers": { "lang": "py", "task": "common.tasks.test_task", "id": "980ad2bf-104c-4ce0-8643-67d1947173f6", "shadow": None, "eta": None, "expires": None, "group": None, "group_index": None, "retries": 0, "timelimit": [None, None], "root_id": "980ad2bf-104c-4ce0-8643-67d1947173f6", "parent_id": None, "argsrepr": "()", "kwargsrepr": "{}", "origin": "gen3437@Desktop", "ignore_result": False, }, "properties": { "correlation_id": "980ad2bf-104c-4ce0-8643-67d1947173f6", "reply_to": "512f2489-ca40-3585-bc10-9b801a981782", "delivery_mode": 2, "delivery_info": { "exchange": "", "routing_key": "celery", }, "priority": 3, "body_encoding": "base64", "delivery_tag": "badb725e-9c3e-45be-b0a4-07e44630519f", }, } result_payload = copy.deepcopy(payload) result_payload['headers']['redelivered'] = True result_payload['properties']['delivery_info']['redelivered'] = True queue = 'celery' client = Mock(name='client') lookup = self.channel._lookup = Mock(name='_lookup') lookup.return_value = [queue] self.channel._do_restore_message( payload, 'exchange', 'routing_key', client, ) client.rpush.assert_called_with(self.channel._q_for_pri(queue, 3), dumps(result_payload)) def test_restore_no_messages(self): message = Mock(name='message') with patch('kombu.transport.redis.loads') as loads: def transaction_handler(restore_transaction, unacked_key): assert unacked_key == self.channel.unacked_key pipe = Mock(name='pipe') pipe.hget.return_value = None restore_transaction(pipe) pipe.multi.assert_called_once_with() pipe.hdel.assert_called_once_with( unacked_key, message.delivery_tag) loads.assert_not_called() client = self.channel._create_client = Mock(name='client') client = client() client.transaction.side_effect = transaction_handler self.channel._restore(message) client.transaction.assert_called() def test_restore_messages(self): message = Mock(name='message') with patch('kombu.transport.redis.loads') as loads: def transaction_handler(restore_transaction, unacked_key): assert unacked_key == self.channel.unacked_key restore = self.channel._do_restore_message = Mock( name='_do_restore_message', ) result = Mock(name='result') loads.return_value = 'M', 'EX', 'RK' pipe = Mock(name='pipe') pipe.hget.return_value = result restore_transaction(pipe) loads.assert_called_with(result) pipe.multi.assert_called_once_with() pipe.hdel.assert_called_once_with( unacked_key, message.delivery_tag) loads.assert_called() restore.assert_called_with('M', 'EX', 'RK', pipe, False) client = self.channel._create_client = Mock(name='client') client = client() client.transaction.side_effect = transaction_handler self.channel._restore(message) def test_qos_restore_visible(self): client = self.channel._create_client = Mock(name='client') client = client() def pipe(*args, **kwargs): return Pipeline(client) client.pipeline = pipe client.zrevrangebyscore.return_value = [ (1, 10), (2, 20), (3, 30), ] qos = redis.QoS(self.channel) restore = qos.restore_by_tag = Mock(name='restore_by_tag') qos._vrestore_count = 1 qos.restore_visible() client.zrevrangebyscore.assert_not_called() assert qos._vrestore_count == 2 qos._vrestore_count = 0 qos.restore_visible() restore.assert_has_calls([ call(1, client), call(2, client), call(3, client), ]) assert qos._vrestore_count == 1 qos._vrestore_count = 0 restore.reset_mock() client.zrevrangebyscore.return_value = [] qos.restore_visible() restore.assert_not_called() assert qos._vrestore_count == 1 qos._vrestore_count = 0 client.setnx.side_effect = redis.MutexHeld() qos.restore_visible() def test_basic_consume_when_fanout_queue(self): self.channel.exchange_declare(exchange='txconfan', type='fanout') self.channel.queue_declare(queue='txconfanq') self.channel.queue_bind(queue='txconfanq', exchange='txconfan') assert 'txconfanq' in self.channel._fanout_queues self.channel.basic_consume('txconfanq', False, None, 1) assert 'txconfanq' in self.channel.active_fanout_queues assert self.channel._fanout_to_queue.get('txconfan') == 'txconfanq' def test_basic_cancel_unknown_delivery_tag(self): assert self.channel.basic_cancel('txaseqwewq') is None def test_subscribe_no_queues(self): self.channel.subclient = Mock() self.channel.active_fanout_queues.clear() self.channel._subscribe() self.channel.subclient.subscribe.assert_not_called() def test_subscribe(self): self.channel.subclient = Mock() self.channel.active_fanout_queues.add('a') self.channel.active_fanout_queues.add('b') self.channel._fanout_queues.update(a=('a', ''), b=('b', '')) self.channel._subscribe() self.channel.subclient.psubscribe.assert_called() s_args, _ = self.channel.subclient.psubscribe.call_args assert sorted(s_args[0]) == ['/{db}.a', '/{db}.b'] self.channel.subclient.connection._sock = None self.channel._subscribe() self.channel.subclient.connection.connect.assert_called_with() def test_handle_unsubscribe_message(self): s = self.channel.subclient s.subscribed = True self.channel._handle_message(s, ['unsubscribe', 'a', 0]) assert not s.subscribed def test_handle_pmessage_message(self): res = self.channel._handle_message( self.channel.subclient, ['pmessage', 'pattern', 'channel', 'data'], ) assert res == { 'type': 'pmessage', 'pattern': 'pattern', 'channel': 'channel', 'data': 'data', } def test_handle_message(self): res = self.channel._handle_message( self.channel.subclient, ['type', 'channel', 'data'], ) assert res == { 'type': 'type', 'pattern': None, 'channel': 'channel', 'data': 'data', } def test_brpop_start_but_no_queues(self): assert self.channel._brpop_start() is None def test_receive(self): s = self.channel.subclient = Mock() self.channel._fanout_to_queue['a'] = 'b' self.channel.connection._deliver = Mock(name='_deliver') message = { 'body': 'hello', 'properties': { 'delivery_tag': 1, 'delivery_info': {'exchange': 'E', 'routing_key': 'R'}, }, } s.parse_response.return_value = ['message', 'a', dumps(message)] self.channel._receive_one(self.channel.subclient) self.channel.connection._deliver.assert_called_once_with( message, 'b', ) def test_receive_raises_for_connection_error(self): self.channel._in_listen = True s = self.channel.subclient = Mock() s.parse_response.side_effect = KeyError('foo') with pytest.raises(KeyError): self.channel._receive_one(self.channel.subclient) assert not self.channel._in_listen def test_receive_empty(self): s = self.channel.subclient = Mock() s.parse_response.return_value = None assert self.channel._receive_one(self.channel.subclient) is None def test_receive_different_message_Type(self): s = self.channel.subclient = Mock() s.parse_response.return_value = ['message', '/foo/', 0, 'data'] assert self.channel._receive_one(self.channel.subclient) is None def test_receive_invalid_response_type(self): s = self.channel.subclient = Mock() for resp in ['foo', None]: s.parse_response.return_value = resp assert self.channel._receive_one(self.channel.subclient) is None def test_receive_connection_has_gone(self): def _receive_one(c): c.connection = None _receive_one.called = True return True _receive_one.called = False self.channel._receive_one = _receive_one assert self.channel._receive() assert _receive_one.called def test_brpop_read_raises(self): c = self.channel.client = Mock() c.parse_response.side_effect = KeyError('foo') with pytest.raises(KeyError): self.channel._brpop_read() c.connection.disconnect.assert_called_with() def test_brpop_read_gives_None(self): c = self.channel.client = Mock() c.parse_response.return_value = None with pytest.raises(redis.Empty): self.channel._brpop_read() def test_poll_error(self): c = self.channel.client = Mock() c.parse_response = Mock() self.channel._poll_error('BRPOP') c.parse_response.assert_called_with(c.connection, 'BRPOP') c.parse_response.side_effect = KeyError('foo') with pytest.raises(KeyError): self.channel._poll_error('BRPOP') def test_poll_error_on_type_LISTEN(self): c = self.channel.subclient = Mock() c.parse_response = Mock() self.channel._poll_error('LISTEN') c.parse_response.assert_called_with() c.parse_response.side_effect = KeyError('foo') with pytest.raises(KeyError): self.channel._poll_error('LISTEN') def test_put_fanout(self): self.channel._in_poll = False c = self.channel._create_client = Mock() body = {'hello': 'world'} self.channel._put_fanout('exchange', body, '') c().publish.assert_called_with('/{db}.exchange', dumps(body)) def test_put_priority(self): client = self.channel._create_client = Mock(name='client') msg1 = {'properties': {'priority': 3}} self.channel._put('george', msg1) client().lpush.assert_called_with( self.channel._q_for_pri('george', 3), dumps(msg1), ) msg2 = {'properties': {'priority': 313}} self.channel._put('george', msg2) client().lpush.assert_called_with( self.channel._q_for_pri('george', 9), dumps(msg2), ) msg3 = {'properties': {}} self.channel._put('george', msg3) client().lpush.assert_called_with( self.channel._q_for_pri('george', 0), dumps(msg3), ) def test_delete(self): x = self.channel x._create_client = Mock() x._create_client.return_value = x.client delete = x.client.delete = Mock() srem = x.client.srem = Mock() x._delete('queue', 'exchange', 'routing_key', None) delete.assert_has_calls([ call(x._q_for_pri('queue', pri)) for pri in redis.PRIORITY_STEPS ]) srem.assert_called_with(x.keyprefix_queue % ('exchange',), x.sep.join(['routing_key', '', 'queue'])) def test_has_queue(self): self.channel._create_client = Mock() self.channel._create_client.return_value = self.channel.client exists = self.channel.client.exists = Mock() exists.return_value = True assert self.channel._has_queue('foo') exists.assert_has_calls([ call(self.channel._q_for_pri('foo', pri)) for pri in redis.PRIORITY_STEPS ]) exists.return_value = False assert not self.channel._has_queue('foo') def test_close_when_closed(self): self.channel.closed = True self.channel.close() def test_close_deletes_autodelete_fanout_queues(self): self.channel._fanout_queues = {'foo': ('foo', ''), 'bar': ('bar', '')} self.channel.auto_delete_queues = ['foo'] self.channel.queue_delete = Mock(name='queue_delete') client = self.channel.client self.channel.close() self.channel.queue_delete.assert_has_calls([ call('foo', client=client), ]) def test_close_client_close_raises(self): c = self.channel.client = Mock() connection = c.connection connection.disconnect.side_effect = self.channel.ResponseError() self.channel.close() connection.disconnect.assert_called_with() def test_invalid_database_raises_ValueError(self): with pytest.raises(ValueError): self.channel.connection.client.virtual_host = 'dwqeq' self.channel._connparams() def test_connparams_allows_slash_in_db(self): self.channel.connection.client.virtual_host = '/123' assert self.channel._connparams()['db'] == 123 def test_connparams_db_can_be_int(self): self.channel.connection.client.virtual_host = 124 assert self.channel._connparams()['db'] == 124 def test_new_queue_with_auto_delete(self): redis.Channel._new_queue(self.channel, 'george', auto_delete=False) assert 'george' not in self.channel.auto_delete_queues redis.Channel._new_queue(self.channel, 'elaine', auto_delete=True) assert 'elaine' in self.channel.auto_delete_queues def test_connparams_regular_hostname(self): self.channel.connection.client.hostname = 'george.vandelay.com' assert self.channel._connparams()['host'] == 'george.vandelay.com' def test_connparams_username(self): self.channel.connection.client.userid = 'kombu' assert self.channel._connparams()['username'] == 'kombu' def test_connparams_client_credentials(self): self.channel.connection.client.hostname = \ 'redis://foo:bar@127.0.0.1:6379/0' connection_parameters = self.channel._connparams() assert connection_parameters['username'] == 'foo' assert connection_parameters['password'] == 'bar' def test_connparams_password_for_unix_socket(self): self.channel.connection.client.hostname = \ 'socket://:foo@/var/run/redis.sock' connection_parameters = self.channel._connparams() password = connection_parameters['password'] path = connection_parameters['path'] assert (password, path) == ('foo', '/var/run/redis.sock') self.channel.connection.client.hostname = \ 'socket://@/var/run/redis.sock' connection_parameters = self.channel._connparams() password = connection_parameters['password'] path = connection_parameters['path'] assert (password, path) == (None, '/var/run/redis.sock') def test_connparams_health_check_interval_not_supported(self): with patch('kombu.transport.redis.Channel._create_client'): with Connection('redis+socket:///tmp/redis.sock') as conn: conn.default_channel.connection_class = \ Mock(name='connection_class') connparams = conn.default_channel._connparams() assert 'health_check_interval' not in connparams def test_connparams_health_check_interval_supported(self): with patch('kombu.transport.redis.Channel._create_client'): with Connection('redis+socket:///tmp/redis.sock') as conn: connparams = conn.default_channel._connparams() assert connparams['health_check_interval'] == 25 def test_rotate_cycle_ValueError(self): cycle = self.channel._queue_cycle cycle.update(['kramer', 'jerry']) cycle.rotate('kramer') assert cycle.items, ['jerry' == 'kramer'] cycle.rotate('elaine') def test_get_client(self): import redis as R KombuRedis = redis.Channel._get_client(self.channel) assert isinstance(KombuRedis(), R.StrictRedis) Rv = getattr(R, 'VERSION', None) try: R.VERSION = (2, 4, 0) with pytest.raises(VersionMismatch): redis.Channel._get_client(self.channel) finally: if Rv is not None: R.VERSION = Rv def test_get_prefixed_client(self): from kombu.transport.redis import PrefixedStrictRedis self.channel.global_keyprefix = "test_" PrefixedRedis = redis.Channel._get_client(self.channel) assert isinstance(PrefixedRedis(), PrefixedStrictRedis) def test_get_response_error(self): from redis.exceptions import ResponseError assert redis.Channel._get_response_error(self.channel) is ResponseError def test_avail_client(self): self.channel._pool = Mock() cc = self.channel._create_client = Mock() with self.channel.conn_or_acquire(): pass cc.assert_called_with() def test_register_with_event_loop(self): transport = self.connection.transport transport.cycle = Mock(name='cycle') transport.cycle.fds = {12: 'LISTEN', 13: 'BRPOP'} conn = Mock(name='conn') conn.client = Mock(name='client', transport_options={}) loop = Mock(name='loop') redis.Transport.register_with_event_loop(transport, conn, loop) transport.cycle.on_poll_init.assert_called_with(loop.poller) loop.call_repeatedly.assert_has_calls([ call(10, transport.cycle.maybe_restore_messages), call(25, transport.cycle.maybe_check_subclient_health), ]) loop.on_tick.add.assert_called() on_poll_start = loop.on_tick.add.call_args[0][0] on_poll_start() transport.cycle.on_poll_start.assert_called_with() loop.add_reader.assert_has_calls([ call(12, transport.on_readable, 12), call(13, transport.on_readable, 13), ]) @pytest.mark.parametrize('fds', [{12: 'LISTEN', 13: 'BRPOP'}, {}]) def test_register_with_event_loop__on_disconnect__loop_cleanup(self, fds): """Ensure event loop polling stops on disconnect (if started).""" transport = self.connection.transport self.connection._sock = None transport.cycle = Mock(name='cycle') transport.cycle.fds = fds conn = Mock(name='conn') conn.client = Mock(name='client', transport_options={}) loop = Mock(name='loop') loop.on_tick = set() redis.Transport.register_with_event_loop(transport, conn, loop) assert len(loop.on_tick) == 1 transport.cycle._on_connection_disconnect(self.connection) if fds: assert len(loop.on_tick) == 0 else: # on_tick shouldn't be cleared when polling hasn't started assert len(loop.on_tick) == 1 def test_configurable_health_check(self): transport = self.connection.transport transport.cycle = Mock(name='cycle') transport.cycle.fds = {12: 'LISTEN', 13: 'BRPOP'} conn = Mock(name='conn') conn.client = Mock(name='client', transport_options={ 'health_check_interval': 15, }) loop = Mock(name='loop') redis.Transport.register_with_event_loop(transport, conn, loop) transport.cycle.on_poll_init.assert_called_with(loop.poller) loop.call_repeatedly.assert_has_calls([ call(10, transport.cycle.maybe_restore_messages), call(15, transport.cycle.maybe_check_subclient_health), ]) loop.on_tick.add.assert_called() on_poll_start = loop.on_tick.add.call_args[0][0] on_poll_start() transport.cycle.on_poll_start.assert_called_with() loop.add_reader.assert_has_calls([ call(12, transport.on_readable, 12), call(13, transport.on_readable, 13), ]) def test_transport_on_readable(self): transport = self.connection.transport cycle = transport.cycle = Mock(name='cyle') cycle.on_readable.return_value = None redis.Transport.on_readable(transport, 13) cycle.on_readable.assert_called_with(13) def test_transport_connection_errors(self): """Ensure connection_errors are populated.""" assert redis.Transport.connection_errors def test_transport_channel_errors(self): """Ensure connection_errors are populated.""" assert redis.Transport.channel_errors def test_transport_driver_version(self): assert redis.Transport.driver_version(self.connection.transport) def test_transport_errors_when_InvalidData_used(self): from redis import exceptions from kombu.transport.redis import get_redis_error_classes class ID(Exception): pass DataError = getattr(exceptions, 'DataError', None) InvalidData = getattr(exceptions, 'InvalidData', None) exceptions.InvalidData = ID exceptions.DataError = None try: errors = get_redis_error_classes() assert errors assert ID in errors[1] finally: if DataError is not None: exceptions.DataError = DataError if InvalidData is not None: exceptions.InvalidData = InvalidData def test_empty_queues_key(self): channel = self.channel channel._in_poll = False key = channel.keyprefix_queue % 'celery' # Everything is fine, there is a list of queues. channel.client.sadd(key, 'celery\x06\x16\x06\x16celery') assert channel.get_table('celery') == [ ('celery', '', 'celery'), ] # Remove one last queue from exchange. After this call no queue # is in bound to exchange. channel.client.srem(key) # get_table() should return empty list of queues assert self.channel.get_table('celery') == [] def test_socket_connection(self): with patch('kombu.transport.redis.Channel._create_client'): with Connection('redis+socket:///tmp/redis.sock') as conn: connparams = conn.default_channel._connparams() assert issubclass( connparams['connection_class'], redis.redis.UnixDomainSocketConnection, ) assert connparams['path'] == '/tmp/redis.sock' def test_ssl_argument__dict(self): with patch('kombu.transport.redis.Channel._create_client'): # Expected format for redis-py's SSLConnection class ssl_params = { 'ssl_cert_reqs': 2, 'ssl_ca_certs': '/foo/ca.pem', 'ssl_certfile': '/foo/cert.crt', 'ssl_keyfile': '/foo/pkey.key' } with Connection('redis://', ssl=ssl_params) as conn: params = conn.default_channel._connparams() assert params['ssl_cert_reqs'] == ssl_params['ssl_cert_reqs'] assert params['ssl_ca_certs'] == ssl_params['ssl_ca_certs'] assert params['ssl_certfile'] == ssl_params['ssl_certfile'] assert params['ssl_keyfile'] == ssl_params['ssl_keyfile'] assert params.get('ssl') is None def test_ssl_connection(self): with patch('kombu.transport.redis.Channel._create_client'): with Connection('redis://', ssl={'ssl_cert_reqs': 2}) as conn: connparams = conn.default_channel._connparams() assert issubclass( connparams['connection_class'], redis.redis.SSLConnection, ) def test_rediss_connection(self): with patch('kombu.transport.redis.Channel._create_client'): with Connection('rediss://') as conn: connparams = conn.default_channel._connparams() assert issubclass( connparams['connection_class'], redis.redis.SSLConnection, ) def test_sep_transport_option(self): with Connection(transport=Transport, transport_options={ 'sep': ':', }) as conn: key = conn.default_channel.keyprefix_queue % 'celery' conn.default_channel.client.sadd(key, 'celery::celery') assert conn.default_channel.sep == ':' assert conn.default_channel.get_table('celery') == [ ('celery', '', 'celery'), ] @patch("redis.StrictRedis.execute_command") def test_global_keyprefix(self, mock_execute_command): from kombu.transport.redis import PrefixedStrictRedis with Connection(transport=Transport) as conn: client = PrefixedStrictRedis(global_keyprefix='foo_') channel = conn.channel() channel._create_client = Mock() channel._create_client.return_value = client body = {'hello': 'world'} channel._put_fanout('exchange', body, '') mock_execute_command.assert_called_with( 'PUBLISH', 'foo_/{db}.exchange', dumps(body) ) @patch("redis.StrictRedis.execute_command") def test_global_keyprefix_queue_bind(self, mock_execute_command): from kombu.transport.redis import PrefixedStrictRedis with Connection(transport=Transport) as conn: client = PrefixedStrictRedis(global_keyprefix='foo_') channel = conn.channel() channel._create_client = Mock() channel._create_client.return_value = client channel._queue_bind('default', '', None, 'queue') mock_execute_command.assert_called_with( 'SADD', 'foo__kombu.binding.default', '\x06\x16\x06\x16queue' ) @patch("redis.client.PubSub.execute_command") def test_global_keyprefix_pubsub(self, mock_execute_command): from kombu.transport.redis import PrefixedStrictRedis with Connection(transport=Transport) as conn: client = PrefixedStrictRedis(global_keyprefix='foo_') channel = conn.channel() channel.global_keyprefix = 'foo_' channel._create_client = Mock() channel._create_client.return_value = client channel.subclient.connection = Mock() channel.active_fanout_queues.add('a') channel._subscribe() mock_execute_command.assert_called_with( 'PSUBSCRIBE', 'foo_/{db}.a', ) @patch("redis.client.Pipeline.execute_command") def test_global_keyprefix_transaction(self, mock_execute_command): from kombu.transport.redis import PrefixedStrictRedis with Connection(transport=Transport) as conn: def pipeline(transaction=True, shard_hint=None): pipeline_obj = original_pipeline( transaction=transaction, shard_hint=shard_hint ) mock_execute_command.side_effect = [ None, None, pipeline_obj, pipeline_obj ] return pipeline_obj client = PrefixedStrictRedis(global_keyprefix='foo_') original_pipeline = client.pipeline client.pipeline = pipeline channel = conn.channel() channel._create_client = Mock() channel._create_client.return_value = client channel.qos.restore_by_tag('test-tag') assert mock_execute_command is not None # https://github.com/redis/redis-py/pull/3038 (redis>=5.1.0a1) # adds keyword argument `keys` to redis client. # To be compatible with all supported redis versions, # take into account only `call.args`. call_args = [call.args for call in mock_execute_command.mock_calls] assert call_args == [ ('WATCH', 'foo_unacked'), ('HGET', 'foo_unacked', 'test-tag'), ('ZREM', 'foo_unacked_index', 'test-tag'), ('HDEL', 'foo_unacked', 'test-tag') ] class test_Redis: def setup_method(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis') def teardown_method(self): self.connection.close() @pytest.mark.replace_module_value(redis.redis, 'VERSION', [3, 0, 0]) def test_publish__get_redispyv3(self, replace_module_value): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) assert self.queue(channel).get().payload == {'hello': 'world'} assert self.queue(channel).get() is None assert self.queue(channel).get() is None assert self.queue(channel).get() is None @pytest.mark.replace_module_value(redis.redis, 'VERSION', [2, 5, 10]) def test_publish__get_redispyv2(self, replace_module_value): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) assert self.queue(channel).get().payload == {'hello': 'world'} assert self.queue(channel).get() is None assert self.queue(channel).get() is None assert self.queue(channel).get() is None def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, queues=[self.queue]) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() assert channel in channel.connection.cycle._channels try: connection.drain_events(timeout=1) assert _received with pytest.raises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close() def test_purge(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() for i in range(10): producer.publish({'hello': f'world-{i}'}) assert channel._size('test_Redis') == 10 assert self.queue(channel).purge() == 10 channel.close() def test_db_values(self): Connection(virtual_host=1, transport=Transport).channel() Connection(virtual_host='1', transport=Transport).channel() Connection(virtual_host='/1', transport=Transport).channel() with pytest.raises(Exception): Connection('redis:///foo').channel() def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() c1.close() c2 = Connection(port=9999, transport=Transport).channel() c2.close() def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() assert c not in cycle._channels def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close() def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() assert conn1.disconnected assert conn2.disconnected def test_close_in_poll(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn1._sock.data = [('BRPOP', ('test_Redis',))] c._in_poll = True c.close() assert conn1.disconnected assert conn1._sock.data == [] def test_get__Empty(self): channel = self.connection.channel() with pytest.raises(Empty): channel._get('does-not-exist') channel.close() @pytest.mark.ensured_modules(*_redis_modules()) def test_get_client(self, module_exists): # with module_exists(*_redis_modules()): conn = Connection(transport=Transport) chan = conn.channel() assert chan.Client assert chan.ResponseError assert conn.transport.connection_errors assert conn.transport.channel_errors def test_check_at_least_we_try_to_connect_and_fail(self): import redis connection = Connection('redis://localhost:65534/') with pytest.raises(redis.exceptions.ConnectionError): chan = connection.channel() chan._size('some_queue') class test_MultiChannelPoller: def setup_method(self): self.Poller = redis.MultiChannelPoller def test_on_poll_start(self): p = self.Poller() p._channels = [] p.on_poll_start() p._register_BRPOP = Mock(name='_register_BRPOP') p._register_LISTEN = Mock(name='_register_LISTEN') chan1 = Mock(name='chan1') p._channels = [chan1] chan1.active_queues = [] chan1.active_fanout_queues = [] p.on_poll_start() chan1.active_queues = ['q1'] chan1.active_fanout_queues = ['q2'] chan1.qos.can_consume.return_value = False p.on_poll_start() p._register_LISTEN.assert_called_with(chan1) p._register_BRPOP.assert_not_called() chan1.qos.can_consume.return_value = True p._register_LISTEN.reset_mock() p.on_poll_start() p._register_BRPOP.assert_called_with(chan1) p._register_LISTEN.assert_called_with(chan1) def test_on_poll_init(self): p = self.Poller() chan1 = Mock(name='chan1') p._channels = [] poller = Mock(name='poller') p.on_poll_init(poller) assert p.poller is poller p._channels = [chan1] p.on_poll_init(poller) chan1.qos.restore_visible.assert_called_with( num=chan1.unacked_restore_limit, ) def test_handle_event(self): p = self.Poller() chan = Mock(name='chan') p._fd_to_chan[13] = chan, 'BRPOP' chan.handlers = {'BRPOP': Mock(name='BRPOP')} chan.qos.can_consume.return_value = False p.handle_event(13, redis.READ) chan.handlers['BRPOP'].assert_not_called() chan.qos.can_consume.return_value = True p.handle_event(13, redis.READ) chan.handlers['BRPOP'].assert_called_with() p.handle_event(13, redis.ERR) chan._poll_error.assert_called_with('BRPOP') p.handle_event(13, ~(redis.READ | redis.ERR)) def test_fds(self): p = self.Poller() p._fd_to_chan = {1: 2} assert p.fds == p._fd_to_chan def test_close_unregisters_fds(self): p = self.Poller() poller = p.poller = Mock() p._chan_to_sock.update({1: 1, 2: 2, 3: 3}) p.close() assert poller.unregister.call_count == 3 u_args = poller.unregister.call_args_list assert sorted(u_args) == [ ((1,), {}), ((2,), {}), ((3,), {}), ] def test_close_when_unregister_raises_KeyError(self): p = self.Poller() p.poller = Mock() p._chan_to_sock.update({1: 1}) p.poller.unregister.side_effect = KeyError(1) p.close() def test_close_resets_state(self): p = self.Poller() p.poller = Mock() p._channels = Mock() p._fd_to_chan = Mock() p._chan_to_sock = Mock() p._chan_to_sock.itervalues.return_value = [] p._chan_to_sock.values.return_value = [] # py3k p.close() p._channels.clear.assert_called_with() p._fd_to_chan.clear.assert_called_with() p._chan_to_sock.clear.assert_called_with() def test_register_when_registered_reregisters(self): p = self.Poller() p.poller = Mock() channel, client, type = Mock(), Mock(), Mock() sock = client.connection._sock = Mock() sock.fileno.return_value = 10 p._chan_to_sock = {(channel, client, type): 6} p._register(channel, client, type) p.poller.unregister.assert_called_with(6) assert p._fd_to_chan[10] == (channel, type) assert p._chan_to_sock[(channel, client, type)] == sock p.poller.register.assert_called_with(sock, p.eventflags) # when client not connected yet client.connection._sock = None def after_connected(): client.connection._sock = Mock() client.connection.connect.side_effect = after_connected p._register(channel, client, type) client.connection.connect.assert_called_with() def test_register_BRPOP(self): p = self.Poller() channel = Mock() channel.client.connection._sock = None p._register = Mock() channel._in_poll = False p._register_BRPOP(channel) assert channel._brpop_start.call_count == 1 assert p._register.call_count == 1 channel.client.connection._sock = Mock() p._chan_to_sock[(channel, channel.client, 'BRPOP')] = True channel._in_poll = True p._register_BRPOP(channel) assert channel._brpop_start.call_count == 1 assert p._register.call_count == 1 def test_register_LISTEN(self): p = self.Poller() channel = Mock() channel.subclient.connection._sock = None channel._in_listen = False p._register = Mock() p._register_LISTEN(channel) p._register.assert_called_with(channel, channel.subclient, 'LISTEN') assert p._register.call_count == 1 assert channel._subscribe.call_count == 1 channel._in_listen = True p._chan_to_sock[(channel, channel.subclient, 'LISTEN')] = 3 channel.subclient.connection._sock = Mock() p._register_LISTEN(channel) assert p._register.call_count == 1 assert channel._subscribe.call_count == 1 def create_get(self, events=None, queues=None, fanouts=None): _pr = [] if events is None else events _aq = [] if queues is None else queues _af = [] if fanouts is None else fanouts p = self.Poller() p.poller = Mock() p.poller.poll.return_value = _pr p._register_BRPOP = Mock() p._register_LISTEN = Mock() channel = Mock() p._channels = [channel] channel.active_queues = _aq channel.active_fanout_queues = _af return p, channel def test_get_no_actions(self): p, channel = self.create_get() with pytest.raises(redis.Empty): p.get(Mock()) def test_qos_reject(self): p, channel = self.create_get() qos = redis.QoS(channel) qos._remove_from_indices = Mock(name='_remove_from_indices') qos.reject(1234) qos._remove_from_indices.assert_called_with(1234) def test_qos_requeue(self): p, channel = self.create_get() qos = redis.QoS(channel) qos.restore_by_tag = Mock(name='restore_by_tag') qos.reject(1234, True) qos.restore_by_tag.assert_called_with(1234, leftmost=True) def test_get_brpop_qos_allow(self): p, channel = self.create_get(queues=['a_queue']) channel.qos.can_consume.return_value = True with pytest.raises(redis.Empty): p.get(Mock()) p._register_BRPOP.assert_called_with(channel) def test_get_brpop_qos_disallow(self): p, channel = self.create_get(queues=['a_queue']) channel.qos.can_consume.return_value = False with pytest.raises(redis.Empty): p.get(Mock()) p._register_BRPOP.assert_not_called() def test_get_listen(self): p, channel = self.create_get(fanouts=['f_queue']) with pytest.raises(redis.Empty): p.get(Mock()) p._register_LISTEN.assert_called_with(channel) def test_get_receives_ERR(self): p, channel = self.create_get(events=[(1, eventio.ERR)]) p._fd_to_chan[1] = (channel, 'BRPOP') with pytest.raises(redis.Empty): p.get(Mock()) channel._poll_error.assert_called_with('BRPOP') def test_get_receives_multiple(self): p, channel = self.create_get(events=[(1, eventio.ERR), (1, eventio.ERR)]) p._fd_to_chan[1] = (channel, 'BRPOP') with pytest.raises(redis.Empty): p.get(Mock()) channel._poll_error.assert_called_with('BRPOP') class test_Mutex: def test_mutex(self, lock_id='xxx'): client = Mock(name='client') lock = client.lock.return_value = Mock(name='lock') # Won lock.acquire.return_value = True held = False with redis.Mutex(client, 'foo1', 100): held = True assert held lock.acquire.assert_called_with(blocking=False) client.lock.assert_called_with('foo1', timeout=100) client.reset_mock() lock.reset_mock() # Did not win lock.acquire.return_value = False held = False with pytest.raises(redis.MutexHeld): with redis.Mutex(client, 'foo1', 100): held = True assert not held lock.acquire.assert_called_with(blocking=False) client.lock.assert_called_with('foo1', timeout=100) client.reset_mock() lock.reset_mock() # Wins but raises LockNotOwnedError (and that is ignored) lock.acquire.return_value = True lock.release.side_effect = redis.redis.exceptions.LockNotOwnedError() held = False with redis.Mutex(client, 'foo1', 100): held = True assert held class test_RedisSentinel: def test_method_called(self): from kombu.transport.redis import SentinelChannel with patch.object(SentinelChannel, '_sentinel_managed_pool') as p: connection = Connection( 'sentinel://localhost:65534/', transport_options={ 'master_name': 'not_important', }, ) connection.channel() p.assert_called() def test_keyprefix_fanout(self): from kombu.transport.redis import SentinelChannel with patch.object(SentinelChannel, '_sentinel_managed_pool'): connection = Connection( 'sentinel://localhost:65532/1', transport_options={ 'master_name': 'not_important', }, ) channel = connection.channel() assert channel.keyprefix_fanout == '/1.' def test_getting_master_from_sentinel(self): with patch('redis.sentinel.Sentinel') as patched: connection = Connection( 'sentinel://localhost/;' 'sentinel://localhost:65532/;' 'sentinel://user@localhost:65533/;' 'sentinel://:password@localhost:65534/;' 'sentinel://user:password@localhost:65535/;', transport_options={ 'master_name': 'not_important', }, ) connection.channel() patched.assert_called_once_with( [ ('localhost', 26379), ('localhost', 65532), ('localhost', 65533), ('localhost', 65534), ('localhost', 65535), ], connection_class=ANY, db=0, max_connections=10, min_other_sentinels=0, password=None, sentinel_kwargs=None, socket_connect_timeout=None, socket_keepalive=None, socket_keepalive_options=None, socket_timeout=None, username=None, retry_on_timeout=None) master_for = patched.return_value.master_for master_for.assert_called() master_for.assert_called_with('not_important', ANY) master_for().connection_pool.get_connection.assert_called() def test_getting_master_from_sentinel_single_node(self): with patch('redis.sentinel.Sentinel') as patched: connection = Connection( 'sentinel://localhost:65532/', transport_options={ 'master_name': 'not_important', }, ) connection.channel() patched.assert_called_once_with( [('localhost', 65532)], connection_class=ANY, db=0, max_connections=10, min_other_sentinels=0, password=None, sentinel_kwargs=None, socket_connect_timeout=None, socket_keepalive=None, socket_keepalive_options=None, socket_timeout=None, username=None, retry_on_timeout=None) master_for = patched.return_value.master_for master_for.assert_called() master_for.assert_called_with('not_important', ANY) master_for().connection_pool.get_connection.assert_called() def test_can_create_connection(self): from redis.exceptions import ConnectionError connection = Connection( 'sentinel://localhost:65534/', transport_options={ 'master_name': 'not_important', }, ) with pytest.raises(ConnectionError): connection.channel() def test_missing_master_name_transport_option(self): connection = Connection( 'sentinel://localhost:65534/', ) with patch('redis.sentinel.Sentinel'), \ pytest.raises(ValueError) as excinfo: connection.connect() expected = "'master_name' transport option must be specified." assert expected == excinfo.value.args[0] def test_sentinel_with_ssl(self): ssl_params = { 'ssl_cert_reqs': 2, 'ssl_ca_certs': '/foo/ca.pem', 'ssl_certfile': '/foo/cert.crt', 'ssl_keyfile': '/foo/pkey.key' } with patch('redis.sentinel.Sentinel'): with Connection( 'sentinel://', transport_options={'master_name': 'not_important'}, ssl=ssl_params) as conn: params = conn.default_channel._connparams() assert params['ssl_cert_reqs'] == ssl_params['ssl_cert_reqs'] assert params['ssl_ca_certs'] == ssl_params['ssl_ca_certs'] assert params['ssl_certfile'] == ssl_params['ssl_certfile'] assert params['ssl_keyfile'] == ssl_params['ssl_keyfile'] assert params.get('ssl') is None from kombu.transport.redis import SentinelManagedSSLConnection assert (params['connection_class'] is SentinelManagedSSLConnection) def test_can_create_connection_with_global_keyprefix(self): from redis.exceptions import ConnectionError try: connection = Connection( 'sentinel://localhost:65534/', transport_options={ 'global_keyprefix': 'some_prefix', 'master_name': 'not_important', }, ) with pytest.raises(ConnectionError): connection.channel() finally: connection.close() def test_can_create_correct_mixin_with_global_keyprefix(self): from kombu.transport.redis import GlobalKeyPrefixMixin with patch('redis.sentinel.Sentinel'): connection = Connection( 'sentinel://localhost:65534/', transport_options={ 'global_keyprefix': 'some_prefix', 'master_name': 'not_important', }, ) assert isinstance( connection.channel().client, GlobalKeyPrefixMixin ) assert ( connection.channel().client.global_keyprefix == 'some_prefix' ) connection.close() class test_GlobalKeyPrefixMixin: from kombu.transport.redis import GlobalKeyPrefixMixin global_keyprefix = "prefix_" mixin = GlobalKeyPrefixMixin() mixin.global_keyprefix = global_keyprefix def test_prefix_simple_args(self): for command in self.mixin.PREFIXED_SIMPLE_COMMANDS: prefixed_args = self.mixin._prefix_args([command, "fake_key"]) assert prefixed_args == [ command, f"{self.global_keyprefix}fake_key" ] def test_prefix_delete_args(self): prefixed_args = self.mixin._prefix_args([ "DEL", "fake_key", "fake_key2", "fake_key3" ]) assert prefixed_args == [ "DEL", f"{self.global_keyprefix}fake_key", f"{self.global_keyprefix}fake_key2", f"{self.global_keyprefix}fake_key3", ] def test_prefix_brpop_args(self): prefixed_args = self.mixin._prefix_args([ "BRPOP", "fake_key", "fake_key2", "not_prefixed" ]) assert prefixed_args == [ "BRPOP", f"{self.global_keyprefix}fake_key", f"{self.global_keyprefix}fake_key2", "not_prefixed", ] def test_prefix_evalsha_args(self): prefixed_args = self.mixin._prefix_args([ "EVALSHA", "not_prefixed", "not_prefixed", "fake_key", "not_prefixed", ]) assert prefixed_args == [ "EVALSHA", "not_prefixed", "not_prefixed", f"{self.global_keyprefix}fake_key", "not_prefixed", ] kombu-5.5.3/t/unit/transport/test_sqlalchemy.py000066400000000000000000000035751477772317200217250ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock, patch import pytest from kombu import Connection from kombu.exceptions import OperationalError pytest.importorskip('sqlalchemy') class test_SqlAlchemy: def test_url_parser(self): with patch('kombu.transport.sqlalchemy.Channel._open'): url = 'sqlalchemy+sqlite:///celerydb.sqlite' Connection(url).connect() url = 'sqla+sqlite:///celerydb.sqlite' Connection(url).connect() url = 'sqlb+sqlite:///celerydb.sqlite' with pytest.raises(KeyError): Connection(url).connect() def test_simple_queueing(self): conn = Connection( 'sqlalchemy+sqlite:///:memory:', transport_options={ "callback": Mock(), "errback": Mock(), "max_retries": 20, "interval_start": 1, "interval_step": 2, "interval_max": 30, "retry_errors": (OperationalError,) }) conn.connect() try: channel = conn.channel() assert channel.queue_cls.__table__.name == 'kombu_queue' assert channel.message_cls.__table__.name == 'kombu_message' channel._put('celery', 'DATA_SIMPLE_QUEUEING') assert channel._get('celery') == 'DATA_SIMPLE_QUEUEING' finally: conn.release() def test_clone(self): hostname = 'sqlite:///celerydb.sqlite' x = Connection('+'.join(['sqla', hostname])) try: assert x.uri_prefix == 'sqla' assert x.hostname == hostname clone = x.clone() try: assert clone.hostname == hostname assert clone.uri_prefix == 'sqla' finally: clone.release() finally: x.release() kombu-5.5.3/t/unit/transport/test_transport.py000066400000000000000000000017661477772317200216170ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock, patch from kombu import transport class test_supports_librabbitmq: def test_eventlet(self): with patch('kombu.transport._detect_environment') as de: de.return_value = 'eventlet' assert not transport.supports_librabbitmq() class test_transport: def test_resolve_transport(self): from kombu.transport.memory import Transport assert transport.resolve_transport( 'kombu.transport.memory:Transport') is Transport assert transport.resolve_transport(Transport) is Transport def test_resolve_transport_alias_callable(self): m = transport.TRANSPORT_ALIASES['George'] = Mock(name='lazyalias') try: transport.resolve_transport('George') m.assert_called_with() finally: transport.TRANSPORT_ALIASES.pop('George') def test_resolve_transport_alias(self): assert transport.resolve_transport('pyamqp') kombu-5.5.3/t/unit/transport/test_zookeeper.py000066400000000000000000000020571477772317200215600ustar00rootroot00000000000000from __future__ import annotations import pytest from kombu import Connection from kombu.transport import zookeeper pytest.importorskip('kazoo') class test_Channel: def setup_method(self): self.connection = self.create_connection() self.channel = self.connection.default_channel def create_connection(self, **kwargs): return Connection(transport=zookeeper.Transport, **kwargs) def teardown_method(self): self.connection.close() def test_put_puts_bytes_to_queue(self): class AssertQueue: def put(self, value, priority): assert isinstance(value, bytes) self.channel._queues['foo'] = AssertQueue() self.channel._put(queue='foo', message='bar') @pytest.mark.parametrize('input,expected', ( ('', '/'), ('/root', '/root'), ('/root/', '/root'), )) def test_virtual_host_normalization(self, input, expected): with self.create_connection(virtual_host=input) as conn: assert conn.default_channel._vhost == expected kombu-5.5.3/t/unit/transport/virtual/000077500000000000000000000000001477772317200176265ustar00rootroot00000000000000kombu-5.5.3/t/unit/transport/virtual/__init__.py000066400000000000000000000000001477772317200217250ustar00rootroot00000000000000kombu-5.5.3/t/unit/transport/virtual/test_base.py000066400000000000000000000514471477772317200221640ustar00rootroot00000000000000from __future__ import annotations import io import socket import warnings from array import array from time import monotonic from unittest.mock import MagicMock, Mock, patch import pytest from kombu import Connection from kombu.compression import compress from kombu.exceptions import ChannelError, ResourceError from kombu.transport import virtual from kombu.utils.uuid import uuid PRINT_FQDN = 'builtins.print' def client(**kwargs): return Connection(transport='kombu.transport.virtual:Transport', **kwargs) def memory_client(): return Connection(transport='memory') def test_BrokerState(): s = virtual.BrokerState() assert hasattr(s, 'exchanges') t = virtual.BrokerState(exchanges=16) assert t.exchanges == 16 class test_QoS: def setup_method(self): self.q = virtual.QoS(client().channel(), prefetch_count=10) def teardown_method(self): self.q._on_collect.cancel() def test_constructor(self): assert self.q.channel assert self.q.prefetch_count assert not self.q._delivered.restored assert self.q._on_collect def test_restore_visible__interface(self): qos = virtual.QoS(client().channel()) qos.restore_visible() def test_can_consume(self, stdouts): stderr = io.StringIO() _restored = [] class RestoreChannel(virtual.Channel): do_restore = True def _restore(self, message): _restored.append(message) assert self.q.can_consume() for i in range(self.q.prefetch_count - 1): self.q.append(i, uuid()) assert self.q.can_consume() self.q.append(i + 1, uuid()) assert not self.q.can_consume() tag1 = next(iter(self.q._delivered)) self.q.ack(tag1) assert self.q.can_consume() tag2 = uuid() self.q.append(i + 2, tag2) assert not self.q.can_consume() self.q.reject(tag2) assert self.q.can_consume() self.q.channel = RestoreChannel(self.q.channel.connection) tag3 = uuid() self.q.append(i + 3, tag3) self.q.reject(tag3, requeue=True) self.q._flush() assert self.q._delivered assert not self.q._delivered.restored self.q.restore_unacked_once(stderr=stderr) assert _restored == [11, 9, 8, 7, 6, 5, 4, 3, 2, 1] assert self.q._delivered.restored assert not self.q._delivered self.q.restore_unacked_once(stderr=stderr) self.q._delivered.restored = False self.q.restore_unacked_once(stderr=stderr) assert stderr.getvalue() assert not stdouts.stdout.getvalue() self.q.restore_at_shutdown = False self.q.restore_unacked_once() def test_get(self): self.q._delivered['foo'] = 1 assert self.q.get('foo') == 1 class test_Message: def test_create(self): c = client().channel() data = c.prepare_message('the quick brown fox...') tag = data['properties']['delivery_tag'] = uuid() message = c.message_to_python(data) assert isinstance(message, virtual.Message) assert message is c.message_to_python(message) if message.errors: message._reraise_error() assert message.body == b'the quick brown fox...' assert message.delivery_tag, tag def test_create_no_body(self): virtual.Message(channel=Mock(), payload={ 'body': None, 'properties': {'delivery_tag': 1}, }) def test_serializable(self): c = client().channel() body, content_type = compress('the quick brown fox...', 'gzip') data = c.prepare_message(body, headers={'compression': content_type}) tag = data['properties']['delivery_tag'] = uuid() message = c.message_to_python(data) dict_ = message.serializable() assert dict_['body'] == b'the quick brown fox...' assert dict_['properties']['delivery_tag'] == tag assert 'compression' not in dict_['headers'] class test_AbstractChannel: def test_get(self): with pytest.raises(NotImplementedError): virtual.AbstractChannel()._get('queue') def test_put(self): with pytest.raises(NotImplementedError): virtual.AbstractChannel()._put('queue', 'm') def test_size(self): assert virtual.AbstractChannel()._size('queue') == 0 def test_purge(self): with pytest.raises(NotImplementedError): virtual.AbstractChannel()._purge('queue') def test_delete(self): with pytest.raises(NotImplementedError): virtual.AbstractChannel()._delete('queue') def test_new_queue(self): assert virtual.AbstractChannel()._new_queue('queue') is None def test_has_queue(self): assert virtual.AbstractChannel()._has_queue('queue') def test_poll(self): cycle = Mock(name='cycle') assert virtual.AbstractChannel()._poll(cycle, Mock()) cycle.get.assert_called() class test_Channel: def setup_method(self): self.channel = client().channel() def teardown_method(self): if self.channel._qos is not None: self.channel._qos._on_collect.cancel() def test_get_free_channel_id(self): conn = client() channel = conn.channel() assert channel.channel_id == 1 assert channel._get_free_channel_id() == 2 def test_get_free_channel_id__exceeds_channel_max(self): conn = client() conn.transport.channel_max = 2 channel = conn.channel() channel._get_free_channel_id() with pytest.raises(ResourceError): channel._get_free_channel_id() def test_exchange_bind_interface(self): with pytest.raises(NotImplementedError): self.channel.exchange_bind('dest', 'src', 'key') def test_exchange_unbind_interface(self): with pytest.raises(NotImplementedError): self.channel.exchange_unbind('dest', 'src', 'key') def test_queue_unbind_interface(self): self.channel.queue_unbind('dest', 'ex', 'key') def test_management(self): m = self.channel.connection.client.get_manager() assert m m.get_bindings() m.close() def test_exchange_declare(self): c = self.channel with pytest.raises(ChannelError): c.exchange_declare('test_exchange_declare', 'direct', durable=True, auto_delete=True, passive=True) c.exchange_declare('test_exchange_declare', 'direct', durable=True, auto_delete=True) c.exchange_declare('test_exchange_declare', 'direct', durable=True, auto_delete=True, passive=True) assert 'test_exchange_declare' in c.state.exchanges # can declare again with same values c.exchange_declare('test_exchange_declare', 'direct', durable=True, auto_delete=True) assert 'test_exchange_declare' in c.state.exchanges # using different values raises NotEquivalentError with pytest.raises(virtual.NotEquivalentError): c.exchange_declare('test_exchange_declare', 'direct', durable=False, auto_delete=True) def test_exchange_delete(self, ex='test_exchange_delete'): class PurgeChannel(virtual.Channel): purged = [] def _purge(self, queue): self.purged.append(queue) c = PurgeChannel(self.channel.connection) c.exchange_declare(ex, 'direct', durable=True, auto_delete=True) assert ex in c.state.exchanges assert not c.state.has_binding(ex, ex, ex) # no bindings yet c.exchange_delete(ex) assert ex not in c.state.exchanges c.exchange_declare(ex, 'direct', durable=True, auto_delete=True) c.queue_declare(ex) c.queue_bind(ex, ex, ex) assert c.state.has_binding(ex, ex, ex) c.exchange_delete(ex) assert not c.state.has_binding(ex, ex, ex) assert ex in c.purged def test_queue_delete__if_empty(self, n='test_queue_delete__if_empty'): class PurgeChannel(virtual.Channel): purged = [] size = 30 def _purge(self, queue): self.purged.append(queue) def _size(self, queue): return self.size c = PurgeChannel(self.channel.connection) c.exchange_declare(n) c.queue_declare(n) c.queue_bind(n, n, n) # tests code path that returns if queue already bound. c.queue_bind(n, n, n) c.queue_delete(n, if_empty=True) assert c.state.has_binding(n, n, n) c.size = 0 c.queue_delete(n, if_empty=True) assert not c.state.has_binding(n, n, n) assert n in c.purged def test_queue_purge(self, n='test_queue_purge'): class PurgeChannel(virtual.Channel): purged = [] def _purge(self, queue): self.purged.append(queue) c = PurgeChannel(self.channel.connection) c.exchange_declare(n) c.queue_declare(n) c.queue_bind(n, n, n) c.queue_purge(n) assert n in c.purged def test_basic_publish__anon_exchange(self): c = memory_client().channel() msg = MagicMock(name='msg') c.encode_body = Mock(name='c.encode_body') c.encode_body.return_value = (1, 2) c._put = Mock(name='c._put') c.basic_publish(msg, None, 'rkey', kw=1) c._put.assert_called_with('rkey', msg, kw=1) def test_basic_publish_unique_delivery_tags(self, n='test_uniq_tag'): c1 = memory_client().channel() c2 = memory_client().channel() for c in (c1, c2): c.exchange_declare(n) c.queue_declare(n) c.queue_bind(n, n, n) m1 = c1.prepare_message('George Costanza') m2 = c2.prepare_message('Elaine Marie Benes') c1.basic_publish(m1, n, n) c2.basic_publish(m2, n, n) r1 = c1.message_to_python(c1.basic_get(n)) r2 = c2.message_to_python(c2.basic_get(n)) assert r1.delivery_tag != r2.delivery_tag with pytest.raises(ValueError): int(r1.delivery_tag) with pytest.raises(ValueError): int(r2.delivery_tag) def test_basic_publish__get__consume__restore(self, n='test_basic_publish'): c = memory_client().channel() c.exchange_declare(n) c.queue_declare(n) c.queue_bind(n, n, n) c.queue_declare(n + '2') c.queue_bind(n + '2', n, n) messages = [] c.connection._deliver = Mock(name='_deliver') def on_deliver(message, queue): messages.append(message) c.connection._deliver.side_effect = on_deliver m = c.prepare_message('nthex quick brown fox...') c.basic_publish(m, n, n) r1 = c.message_to_python(c.basic_get(n)) assert r1 assert r1.body == b'nthex quick brown fox...' assert c.basic_get(n) is None consumer_tag = uuid() c.basic_consume(n + '2', False, consumer_tag=consumer_tag, callback=lambda *a: None) assert n + '2' in c._active_queues c.drain_events() r2 = c.message_to_python(messages[-1]) assert r2.body == b'nthex quick brown fox...' assert r2.delivery_info['exchange'] == n assert r2.delivery_info['routing_key'] == n with pytest.raises(virtual.Empty): c.drain_events() c.basic_cancel(consumer_tag) c._restore(r2) r3 = c.message_to_python(c.basic_get(n)) assert r3 assert r3.body == b'nthex quick brown fox...' assert c.basic_get(n) is None def test_basic_ack(self): class MockQoS(virtual.QoS): was_acked = False def ack(self, delivery_tag): self.was_acked = True self.channel._qos = MockQoS(self.channel) self.channel.basic_ack('foo') assert self.channel._qos.was_acked def test_basic_recover__requeue(self): class MockQoS(virtual.QoS): was_restored = False def restore_unacked(self): self.was_restored = True self.channel._qos = MockQoS(self.channel) self.channel.basic_recover(requeue=True) assert self.channel._qos.was_restored def test_restore_unacked_raises_BaseException(self): q = self.channel.qos q._flush = Mock() q._delivered = {1: 1} q.channel._restore = Mock() q.channel._restore.side_effect = SystemExit errors = q.restore_unacked() assert isinstance(errors[0][0], SystemExit) assert errors[0][1] == 1 assert not q._delivered @patch('kombu.transport.virtual.base.emergency_dump_state') @patch(PRINT_FQDN) def test_restore_unacked_once_when_unrestored(self, print_, emergency_dump_state): q = self.channel.qos q._flush = Mock() class State(dict): restored = False q._delivered = State({1: 1}) ru = q.restore_unacked = Mock() exc = None try: raise KeyError() except KeyError as exc_: exc = exc_ ru.return_value = [(exc, 1)] self.channel.do_restore = True q.restore_unacked_once() print_.assert_called() emergency_dump_state.assert_called() def test_basic_recover(self): with pytest.raises(NotImplementedError): self.channel.basic_recover(requeue=False) def test_basic_reject(self): class MockQoS(virtual.QoS): was_rejected = False def reject(self, delivery_tag, requeue=False): self.was_rejected = True self.channel._qos = MockQoS(self.channel) self.channel.basic_reject('foo') assert self.channel._qos.was_rejected def test_basic_qos(self): self.channel.basic_qos(prefetch_count=128) assert self.channel._qos.prefetch_count == 128 def test_lookup__undeliverable(self, n='test_lookup__undeliverable'): warnings.resetwarnings() with warnings.catch_warnings(record=True) as log: assert self.channel._lookup(n, n, 'ae.undeliver') == [ 'ae.undeliver', ] assert log assert 'could not be delivered' in log[0].message.args[0] def test_context(self): with self.channel as x: assert x is self.channel assert x.closed def test_cycle_property(self): assert self.channel.cycle def test_flow(self): with pytest.raises(NotImplementedError): self.channel.flow(False) def test_close_when_no_connection(self): self.channel.connection = None self.channel.close() assert self.channel.closed def test_drain_events_has_get_many(self): c = self.channel c._get_many = Mock() c._poll = Mock() c._consumers = [1] c._qos = Mock() c._qos.can_consume.return_value = True c.drain_events(timeout=10.0) c._get_many.assert_called_with(c._active_queues, timeout=10.0) def test_get_exchanges(self): self.channel.exchange_declare(exchange='unique_name') assert self.channel.get_exchanges() def test_basic_cancel_not_in_active_queues(self): c = self.channel c._consumers.add('x') c._tag_to_queue['x'] = 'foo' c._active_queues = Mock() c._active_queues.remove.side_effect = ValueError() c.basic_cancel('x') c._active_queues.remove.assert_called_with('foo') def test_basic_cancel_unknown_ctag(self): assert self.channel.basic_cancel('unknown-tag') is None def test_list_bindings(self): c = self.channel c.exchange_declare(exchange='unique_name') c.queue_declare(queue='q') c.queue_bind(queue='q', exchange='unique_name', routing_key='rk') assert ('q', 'unique_name', 'rk') in list(c.list_bindings()) def test_after_reply_message_received(self): c = self.channel c.queue_delete = Mock() c.after_reply_message_received('foo') c.queue_delete.assert_called_with('foo') def test_queue_delete_unknown_queue(self): assert self.channel.queue_delete('xiwjqjwel') is None def test_queue_declare_passive(self): has_queue = self.channel._has_queue = Mock() has_queue.return_value = False with pytest.raises(ChannelError): self.channel.queue_declare(queue='21wisdjwqe', passive=True) def test_get_message_priority(self): def _message(priority): return self.channel.prepare_message( 'the message with priority', priority=priority, ) assert self.channel._get_message_priority(_message(5)) == 5 assert self.channel._get_message_priority( _message(self.channel.min_priority - 10) ) == self.channel.min_priority assert self.channel._get_message_priority( _message(self.channel.max_priority + 10), ) == self.channel.max_priority assert self.channel._get_message_priority( _message('foobar'), ) == self.channel.default_priority assert self.channel._get_message_priority( _message(2), reverse=True, ) == self.channel.max_priority - 2 class test_Transport: def setup_method(self): self.transport = client().transport def test_state_is_transport_specific(self): # Tests that each Transport of Connection instance # has own state attribute conn1 = client() conn2 = client() assert conn1.transport.state != conn2.transport.state def test_custom_polling_interval(self): x = client(transport_options={'polling_interval': 32.3}) assert x.transport.polling_interval == 32.3 def test_timeout_over_polling_interval(self): x = client(transport_options=dict(polling_interval=60)) start = monotonic() with pytest.raises(socket.timeout): x.transport.drain_events(x, timeout=.5) assert monotonic() - start < 60 def test_close_connection(self): c1 = self.transport.create_channel(self.transport) c2 = self.transport.create_channel(self.transport) assert len(self.transport.channels) == 2 self.transport.close_connection(self.transport) assert not self.transport.channels del c1 # so pyflakes doesn't complain del c2 def test_create_channel(self): """Ensure create_channel can create channels successfully.""" assert self.transport.channels == [] created_channel = self.transport.create_channel(self.transport) assert self.transport.channels == [created_channel] def test_close_channel(self): """Ensure close_channel actually removes the channel and updates _used_channel_ids. """ assert self.transport._used_channel_ids == array('H') created_channel = self.transport.create_channel(self.transport) assert self.transport._used_channel_ids == array('H', (1,)) self.transport.close_channel(created_channel) assert self.transport.channels == [] assert self.transport._used_channel_ids == array('H') def test_drain_channel(self): channel = self.transport.create_channel(self.transport) with pytest.raises(virtual.Empty): self.transport._drain_channel(channel, Mock()) def test__deliver__no_queue(self): with pytest.raises(KeyError): self.transport._deliver(Mock(name='msg'), queue=None) def test__reject_inbound_message(self): channel = Mock(name='channel') self.transport.channels = [None, channel] self.transport._reject_inbound_message({'foo': 'bar'}) channel.Message.assert_called_with({'foo': 'bar'}, channel=channel) channel.qos.append.assert_called_with( channel.Message(), channel.Message().delivery_tag, ) channel.basic_reject.assert_called_with( channel.Message().delivery_tag, requeue=True, ) def test_on_message_ready(self): channel = Mock(name='channel') msg = Mock(name='msg') callback = Mock(name='callback') self.transport._callbacks = {'q1': callback} self.transport.on_message_ready(channel, msg, queue='q1') callback.assert_called_with(msg) def test_on_message_ready__no_queue(self): with pytest.raises(KeyError): self.transport.on_message_ready( Mock(name='channel'), Mock(name='msg'), queue=None) def test_on_message_ready__no_callback(self): self.transport._callbacks = {} with pytest.raises(KeyError): self.transport.on_message_ready( Mock(name='channel'), Mock(name='msg'), queue='q1') kombu-5.5.3/t/unit/transport/virtual/test_exchange.py000066400000000000000000000126751477772317200230340ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock import pytest from kombu import Connection from kombu.transport.virtual import exchange from t.mocks import Transport class ExchangeCase: type = None def setup_method(self): if self.type: self.e = self.type(Connection(transport=Transport).channel()) class test_Direct(ExchangeCase): type = exchange.DirectExchange table = [('rFoo', None, 'qFoo'), ('rFoo', None, 'qFox'), ('rBar', None, 'qBar'), ('rBaz', None, 'qBaz')] @pytest.mark.parametrize('exchange,routing_key,default,expected', [ ('eFoo', 'rFoo', None, {'qFoo', 'qFox'}), ('eMoz', 'rMoz', 'DEFAULT', set()), ('eBar', 'rBar', None, {'qBar'}), ]) def test_lookup(self, exchange, routing_key, default, expected): assert self.e.lookup( self.table, exchange, routing_key, default) == expected class test_Fanout(ExchangeCase): type = exchange.FanoutExchange table = [(None, None, 'qFoo'), (None, None, 'qFox'), (None, None, 'qBar')] def test_lookup(self): assert self.e.lookup(self.table, 'eFoo', 'rFoo', None) == { 'qFoo', 'qFox', 'qBar', } def test_deliver_when_fanout_supported(self): self.e.channel = Mock() self.e.channel.supports_fanout = True message = Mock() self.e.deliver(message, 'exchange', 'rkey') self.e.channel._put_fanout.assert_called_with( 'exchange', message, 'rkey', ) def test_deliver_when_fanout_unsupported(self): self.e.channel = Mock() self.e.channel.supports_fanout = False self.e.deliver(Mock(), 'exchange', None) self.e.channel._put_fanout.assert_not_called() class test_Topic(ExchangeCase): type = exchange.TopicExchange table = [ ('stock.#', None, 'rFoo'), ('stock.us.*', None, 'rBar'), ] def setup_method(self): super().setup_method() self.table = [(rkey, self.e.key_to_pattern(rkey), queue) for rkey, _, queue in self.table] def test_prepare_bind(self): x = self.e.prepare_bind('qFoo', 'eFoo', 'stock.#', {}) assert x == ('stock.#', r'^stock\..*?$', 'qFoo') @pytest.mark.parametrize('exchange,routing_key,default,expected', [ ('eFoo', 'stock.us.nasdaq', None, {'rFoo', 'rBar'}), ('eFoo', 'stock.europe.OSE', None, {'rFoo'}), ('eFoo', 'stockxeuropexOSE', None, set()), ('eFoo', 'candy.schleckpulver.snap_crackle', None, set()), ]) def test_lookup(self, exchange, routing_key, default, expected): assert self.e.lookup( self.table, exchange, routing_key, default) == expected assert self.e._compiled def test_deliver(self): self.e.channel = Mock() self.e.channel._lookup.return_value = ('a', 'b') message = Mock() self.e.deliver(message, 'exchange', 'rkey') assert self.e.channel._put.call_args_list == [ (('a', message), {}), (('b', message), {}), ] class test_TopicMultibind(ExchangeCase): # Testing message delivery in case of multiple overlapping # bindings for the same queue. As AMQP states, in case of # overlapping bindings, a message must be delivered once to # each matching queue. type = exchange.TopicExchange table = [ ('stock', None, 'rFoo'), ('stock.#', None, 'rFoo'), ('stock.us.*', None, 'rFoo'), ('#', None, 'rFoo'), ] def setup_method(self): super().setup_method() self.table = [(rkey, self.e.key_to_pattern(rkey), queue) for rkey, _, queue in self.table] @pytest.mark.parametrize('exchange,routing_key,default,expected', [ ('eFoo', 'stock.us.nasdaq', None, {'rFoo'}), ('eFoo', 'stock.europe.OSE', None, {'rFoo'}), ('eFoo', 'stockxeuropexOSE', None, {'rFoo'}), ('eFoo', 'candy.schleckpulver.snap_crackle', None, {'rFoo'}), ]) def test_lookup(self, exchange, routing_key, default, expected): assert self.e._compiled assert self.e.lookup( self.table, exchange, routing_key, default) == expected class test_ExchangeType(ExchangeCase): type = exchange.ExchangeType def test_lookup(self): with pytest.raises(NotImplementedError): self.e.lookup([], 'eFoo', 'rFoo', None) def test_prepare_bind(self): assert self.e.prepare_bind('qFoo', 'eFoo', 'rFoo', {}) == ( 'rFoo', None, 'qFoo', ) e1 = { 'type': 'direct', 'durable': True, 'auto_delete': True, 'arguments': {}, } e2 = dict(e1, arguments={'expires': 3000}) @pytest.mark.parametrize('ex,eq,name,type,durable,auto_delete,arguments', [ (e1, True, 'eFoo', 'direct', True, True, {}), (e1, False, 'eFoo', 'topic', True, True, {}), (e1, False, 'eFoo', 'direct', False, True, {}), (e1, False, 'eFoo', 'direct', True, False, {}), (e1, False, 'eFoo', 'direct', True, True, {'expires': 3000}), (e2, True, 'eFoo', 'direct', True, True, {'expires': 3000}), (e2, False, 'eFoo', 'direct', True, True, {'expires': 6000}), ]) def test_equivalent( self, ex, eq, name, type, durable, auto_delete, arguments): is_eq = self.e.equivalent( ex, name, type, durable, auto_delete, arguments) assert is_eq if eq else not is_eq kombu-5.5.3/t/unit/utils/000077500000000000000000000000001477772317200152445ustar00rootroot00000000000000kombu-5.5.3/t/unit/utils/__init__.py000066400000000000000000000000001477772317200173430ustar00rootroot00000000000000kombu-5.5.3/t/unit/utils/test_amq_manager.py000066400000000000000000000024011477772317200211220ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import patch import pytest from kombu import Connection class test_get_manager: @pytest.mark.masked_modules('pyrabbit') def test_without_pyrabbit(self, mask_modules): with pytest.raises(ImportError): Connection('amqp://').get_manager() @pytest.mark.ensured_modules('pyrabbit') def test_with_pyrabbit(self, module_exists): with patch('pyrabbit.Client', create=True) as Client: manager = Connection('amqp://').get_manager() assert manager is not None Client.assert_called_with( 'localhost:15672', 'guest', 'guest', ) @pytest.mark.ensured_modules('pyrabbit') def test_transport_options(self, module_exists): with patch('pyrabbit.Client', create=True) as Client: manager = Connection('amqp://', transport_options={ 'manager_hostname': 'admin.mq.vandelay.com', 'manager_port': 808, 'manager_userid': 'george', 'manager_password': 'bosco', }).get_manager() assert manager is not None Client.assert_called_with( 'admin.mq.vandelay.com:808', 'george', 'bosco', ) kombu-5.5.3/t/unit/utils/test_compat.py000066400000000000000000000053661477772317200201520ustar00rootroot00000000000000from __future__ import annotations import socket import sys import types from unittest.mock import Mock, patch import pytest from kombu.utils import compat from kombu.utils.compat import entrypoints, maybe_fileno def test_entrypoints(): with patch( 'kombu.utils.compat.importlib_metadata.entry_points', create=True ) as iterep: eps = [Mock(), Mock()] iterep.return_value = ( {'kombu.test': eps} if sys.version_info < (3, 10) else eps) assert list(entrypoints('kombu.test')) if sys.version_info < (3, 10): iterep.assert_called_with() else: iterep.assert_called_with(group='kombu.test') eps[0].load.assert_called_with() eps[1].load.assert_called_with() def test_maybe_fileno(): assert maybe_fileno(3) == 3 f = Mock(name='file') assert maybe_fileno(f) is f.fileno() f.fileno.side_effect = ValueError() assert maybe_fileno(f) is None class test_detect_environment: def test_detect_environment(self): try: compat._environment = None X = compat.detect_environment() assert compat._environment == X Y = compat.detect_environment() assert Y == X finally: compat._environment = None @pytest.mark.ensured_modules('eventlet', 'eventlet.patcher') def test_detect_environment_eventlet(self, module_exists): with patch('eventlet.patcher.is_monkey_patched', create=True) as m: assert sys.modules['eventlet'] m.return_value = True env = compat._detect_environment() m.assert_called_with(socket) assert env == 'eventlet' @pytest.mark.ensured_modules('gevent') def test_detect_environment_gevent(self, module_exists): with patch('gevent.socket', create=True) as m: prev, socket.socket = socket.socket, m.socket try: assert sys.modules['gevent'] env = compat._detect_environment() assert env == 'gevent' finally: socket.socket = prev def test_detect_environment_no_eventlet_or_gevent(self): try: sys.modules['eventlet'] = types.ModuleType('eventlet') sys.modules['eventlet.patcher'] = types.ModuleType('patcher') assert compat._detect_environment() == 'default' finally: sys.modules.pop('eventlet.patcher', None) sys.modules.pop('eventlet', None) compat._detect_environment() try: sys.modules['gevent'] = types.ModuleType('gevent') assert compat._detect_environment() == 'default' finally: sys.modules.pop('gevent', None) compat._detect_environment() kombu-5.5.3/t/unit/utils/test_debug.py000066400000000000000000000031361477772317200177460ustar00rootroot00000000000000from __future__ import annotations import logging from unittest.mock import Mock, patch from kombu.utils.debug import Logwrapped, setup_logging class test_setup_logging: def test_adds_handlers_sets_level(self): with patch('kombu.utils.debug.get_logger') as get_logger: logger = get_logger.return_value = Mock() setup_logging(loggers=['kombu.test']) get_logger.assert_called_with('kombu.test') logger.addHandler.assert_called() logger.setLevel.assert_called_with(logging.DEBUG) class test_Logwrapped: def test_wraps(self): with patch('kombu.utils.debug.get_logger') as get_logger: logger = get_logger.return_value = Mock() W = Logwrapped(Mock(), 'kombu.test') get_logger.assert_called_with('kombu.test') assert W.instance is not None assert W.logger is logger W.instance.__repr__ = lambda s: 'foo' assert repr(W) == 'foo' W.instance.some_attr = 303 assert W.some_attr == 303 W.instance.some_method.__name__ = 'some_method' W.some_method(1, 2, kw=1) W.instance.some_method.assert_called_with(1, 2, kw=1) W.some_method() W.instance.some_method.assert_called_with() W.some_method(kw=1) W.instance.some_method.assert_called_with(kw=1) W.ident = 'ident' W.some_method(kw=1) logger.debug.assert_called() assert 'ident' in logger.debug.call_args[0][0] assert dir(W) == dir(W.instance) kombu-5.5.3/t/unit/utils/test_div.py000066400000000000000000000021541477772317200174410ustar00rootroot00000000000000from __future__ import annotations import pickle from io import BytesIO, StringIO from kombu.utils.div import emergency_dump_state class MyStringIO(StringIO): def close(self): pass class MyBytesIO(BytesIO): def close(self): pass class test_emergency_dump_state: def test_dump(self, stdouts): fh = MyBytesIO() stderr = StringIO() emergency_dump_state( {'foo': 'bar'}, open_file=lambda n, m: fh, stderr=stderr) assert pickle.loads(fh.getvalue()) == {'foo': 'bar'} assert stderr.getvalue() assert not stdouts.stdout.getvalue() def test_dump_second_strategy(self, stdouts): fh = MyStringIO() stderr = StringIO() def raise_something(*args, **kwargs): raise KeyError('foo') emergency_dump_state( {'foo': 'bar'}, open_file=lambda n, m: fh, dump=raise_something, stderr=stderr, ) assert 'foo' in fh.getvalue() assert 'bar' in fh.getvalue() assert stderr.getvalue() assert not stdouts.stdout.getvalue() kombu-5.5.3/t/unit/utils/test_encoding.py000066400000000000000000000056311477772317200204500ustar00rootroot00000000000000from __future__ import annotations import sys from contextlib import contextmanager from unittest.mock import patch from kombu.utils.encoding import (default_encoding, get_default_encoding_file, safe_str, set_default_encoding_file) @contextmanager def clean_encoding(): old_encoding = sys.modules.pop('kombu.utils.encoding', None) import kombu.utils.encoding try: yield kombu.utils.encoding finally: if old_encoding: sys.modules['kombu.utils.encoding'] = old_encoding class test_default_encoding: def test_set_default_file(self): prev = get_default_encoding_file() try: set_default_encoding_file('/foo.txt') assert get_default_encoding_file() == '/foo.txt' finally: set_default_encoding_file(prev) @patch('sys.getfilesystemencoding') def test_default(self, getdefaultencoding): getdefaultencoding.return_value = 'ascii' with clean_encoding() as encoding: enc = encoding.default_encoding() if sys.platform.startswith('java'): assert enc == 'utf-8' else: assert enc == 'ascii' getdefaultencoding.assert_called_with() class newbytes(bytes): """Mock class to simulate python-future newbytes class""" def __repr__(self): return 'b' + super().__repr__() def __str__(self): return 'b' + f"'{super().__str__()}'" class newstr(str): """Mock class to simulate python-future newstr class""" def encode(self, encoding=None, errors=None): return newbytes(super().encode(encoding, errors)) class test_safe_str: def setup_method(self): self._encoding = self.patching('sys.getfilesystemencoding') self._encoding.return_value = 'ascii' def test_when_bytes(self): assert safe_str('foo') == 'foo' def test_when_newstr(self): """Simulates using python-future package under 2.7""" assert str(safe_str(newstr('foo'))) == 'foo' def test_when_unicode(self): assert isinstance(safe_str('foo'), str) def test_when_encoding_utf8(self): self._encoding.return_value = 'utf-8' assert default_encoding() == 'utf-8' s = 'The quiæk fåx jømps øver the lazy dåg' res = safe_str(s) assert isinstance(res, str) def test_when_containing_high_chars(self): self._encoding.return_value = 'ascii' s = 'The quiæk fåx jømps øver the lazy dåg' res = safe_str(s) assert isinstance(res, str) assert len(s) == len(res) def test_when_not_string(self): o = object() assert safe_str(o) == repr(o) def test_when_unrepresentable(self): class UnrepresentableObject: def __repr__(self): raise KeyError('foo') assert '= 10: return 42 raise Predicate() finally: self.calls += 1 fun = Fun() assert retry_over_time( fun, self.Predicate, max_retries=None, errback=None, interval_max=14) == 42 assert fun.calls == 11 @pytest.mark.parametrize('obj,expected', [ (None, None), (1, [1]), ([1, 2, 3], [1, 2, 3]), ]) def test_maybe_list(obj, expected): assert maybe_list(obj) == expected def test_fxrange__no_repeatlast(): assert list(fxrange(1.0, 3.0, 1.0)) == [1.0, 2.0, 3.0] @pytest.mark.parametrize('args,expected', [ ((1.0, 3.0, 1.0, 30.0), [1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), ((1.0, None, 1.0, 30.0), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]), ]) def test_fxrangemax(args, expected): assert list(fxrangemax(*args)) == expected def test_reprkwargs(): assert reprkwargs({'foo': 'bar', 1: 2, 'k': 'v'}) def test_reprcall(): assert reprcall('add', (2, 2), {'copy': True}) class test_accepts_arg: def function(self, foo, bar, baz="baz"): pass def test_valid_argument(self): assert accepts_argument(self.function, 'self') assert accepts_argument(self.function, 'foo') assert accepts_argument(self.function, 'baz') def test_invalid_argument(self): assert not accepts_argument(self.function, 'random_argument') def test_raise_exception(self): with pytest.raises(Exception): accepts_argument(None, 'foo') kombu-5.5.3/t/unit/utils/test_imports.py000066400000000000000000000017071477772317200203570ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock import pytest from kombu import Exchange from kombu.utils.imports import symbol_by_name class test_symbol_by_name: def test_instance_returns_instance(self): instance = object() assert symbol_by_name(instance) is instance def test_returns_default(self): default = object() assert symbol_by_name( 'xyz.ryx.qedoa.weq:foz', default=default) is default def test_no_default(self): with pytest.raises(ImportError): symbol_by_name('xyz.ryx.qedoa.weq:foz') def test_imp_reraises_ValueError(self): imp = Mock() imp.side_effect = ValueError() with pytest.raises(ValueError): symbol_by_name('kombu.Connection', imp=imp) def test_package(self): assert symbol_by_name('.entity:Exchange', package='kombu') is Exchange assert symbol_by_name(':Consumer', package='kombu') kombu-5.5.3/t/unit/utils/test_json.py000066400000000000000000000111321477772317200176240ustar00rootroot00000000000000from __future__ import annotations import sys import uuid from collections import namedtuple from dataclasses import dataclass from datetime import datetime from decimal import Decimal import pytest from hypothesis import given, settings from hypothesis import strategies as st from kombu.utils.encoding import str_to_bytes from kombu.utils.json import (_register_default_types, dumps, loads, register_type) if sys.version_info >= (3, 9): from zoneinfo import ZoneInfo else: from backports.zoneinfo import ZoneInfo class Custom: def __init__(self, data): self.data = data def __json__(self): return self.data class test_JSONEncoder: @pytest.fixture(autouse=True) def reset_registered_types(self): _register_default_types() @pytest.mark.freeze_time("2015-10-21") def test_datetime(self): now = datetime.utcnow() now_utc = now.replace(tzinfo=ZoneInfo("UTC")) original = { 'datetime': now, 'tz': now_utc, 'date': now.date(), 'time': now.time(), } serialized = loads(dumps(original)) assert serialized == original @given(message=st.binary()) @settings(print_blob=True) def test_binary(self, message): serialized = loads(dumps({ 'args': (message,), })) assert serialized == { 'args': [message], } def test_Decimal(self): original = {'d': Decimal('3314132.13363235235324234123213213214134')} serialized = loads(dumps(original)) assert serialized == original def test_namedtuple(self): Foo = namedtuple('Foo', ['bar']) assert loads(dumps(Foo(123))) == [123] def test_UUID(self): constructors = [ uuid.uuid1, lambda: uuid.uuid3(uuid.NAMESPACE_URL, "https://example.org"), uuid.uuid4, lambda: uuid.uuid5(uuid.NAMESPACE_URL, "https://example.org"), # The uuids below correspond to v6, v7 and v8 respectively and were # generated using the package uuid6. lambda: uuid.UUID("1ee0b1e6-dd55-63d2-867f-88cb9205458f"), lambda: uuid.UUID("0188bcbb-8475-7605-a094-fe41c58df798"), lambda: uuid.UUID("0188bcbb-8cb2-8bf7-b3b5-fd1faa0431bd"), ] for constructor in constructors: id = constructor() loaded_value = loads(dumps({'u': id})) assert loaded_value == {'u': id} assert loaded_value["u"].version == id.version def test_register_type_overrides_defaults(self): # This type is already registered by default, let's override it register_type(uuid.UUID, "uuid", lambda o: "custom", lambda o: o) value = uuid.uuid4() loaded_value = loads(dumps({'u': value})) assert loaded_value == {'u': "custom"} def test_register_type_with_new_type(self): # Guaranteed never before seen type @dataclass() class SomeType: a: int register_type(SomeType, "some_type", lambda o: "custom", lambda o: o) value = SomeType(42) loaded_value = loads(dumps({'u': value})) assert loaded_value == {'u': "custom"} def test_register_type_with_empty_marker(self): register_type( datetime, None, lambda o: o.isoformat(), lambda o: "should never be used" ) now = datetime.utcnow() serialized_str = dumps({'now': now}) deserialized_value = loads(serialized_str) assert "__type__" not in serialized_str assert "__value__" not in serialized_str # Check that there is no extra deserialization happening assert deserialized_value == {'now': now.isoformat()} def test_default(self): with pytest.raises(TypeError): dumps({'o': object()}) class test_dumps_loads: def test_dumps_custom_object(self): x = {'foo': Custom({'a': 'b'})} assert loads(dumps(x)) == {'foo': x['foo'].__json__()} def test_dumps_custom_object_no_json(self): x = {'foo': object()} with pytest.raises(TypeError): dumps(x) def test_loads_memoryview(self): assert loads( memoryview(bytearray(dumps({'x': 'z'}), encoding='utf-8')) ) == {'x': 'z'} def test_loads_bytearray(self): assert loads( bytearray(dumps({'x': 'z'}), encoding='utf-8') ) == {'x': 'z'} def test_loads_bytes(self): assert loads( str_to_bytes(dumps({'x': 'z'})), decode_bytes=True) == {'x': 'z'} kombu-5.5.3/t/unit/utils/test_objects.py000066400000000000000000000040531477772317200203100ustar00rootroot00000000000000from __future__ import annotations from unittest import mock from kombu.utils.objects import cached_property class test_cached_property: def test_deleting(self): class X: xx = False @cached_property def foo(self): return 42 @foo.deleter def foo(self, value): self.xx = value x = X() del x.foo assert not x.xx x.__dict__['foo'] = 'here' del x.foo assert x.xx == 'here' def test_when_access_from_class(self): class X: xx = None @cached_property def foo(self): return 42 @foo.setter def foo(self, value): self.xx = 10 desc = X.__dict__['foo'] assert X.foo is desc assert desc.__get__(None) is desc assert desc.__set__(None, 1) is desc assert desc.__delete__(None) is desc assert desc.setter(1) x = X() x.foo = 30 assert x.xx == 10 del x.foo def test_locks_on_access(self): class X: @cached_property def foo(self): return 42 x = X() # Getting the value acquires the lock, and may do so recursively # on Python < 3.12 because the superclass acquires it. with mock.patch.object(X.foo, 'lock') as mock_lock: assert x.foo == 42 mock_lock.__enter__.assert_called() mock_lock.__exit__.assert_called() # Setting a value also acquires the lock. with mock.patch.object(X.foo, 'lock') as mock_lock: x.foo = 314 assert x.foo == 314 mock_lock.__enter__.assert_called_once() mock_lock.__exit__.assert_called_once() # .. as does clearing the cached value to recompute it. with mock.patch.object(X.foo, 'lock') as mock_lock: del x.foo assert x.foo == 42 mock_lock.__enter__.assert_called_once() mock_lock.__exit__.assert_called_once() kombu-5.5.3/t/unit/utils/test_scheduling.py000066400000000000000000000053211477772317200210030ustar00rootroot00000000000000from __future__ import annotations from unittest.mock import Mock import pytest from kombu.utils.scheduling import FairCycle, cycle_by_name class MyEmpty(Exception): pass def consume(fun, n): r = [] for i in range(n): r.append(fun(Mock(name='callback'))) return r class test_FairCycle: def test_cycle(self): resources = ['a', 'b', 'c', 'd', 'e'] callback = Mock(name='callback') def echo(r, timeout=None): return r # cycle should be ['a', 'b', 'c', 'd', 'e', ... repeat] cycle = FairCycle(echo, resources, MyEmpty) for i in range(len(resources)): assert cycle.get(callback) == resources[i] for i in range(len(resources)): assert cycle.get(callback) == resources[i] def test_cycle_breaks(self): resources = ['a', 'b', 'c', 'd', 'e'] def echo(r, callback): if r == 'c': raise MyEmpty(r) return r cycle = FairCycle(echo, resources, MyEmpty) assert consume(cycle.get, len(resources)) == [ 'a', 'b', 'd', 'e', 'a', ] assert consume(cycle.get, len(resources)) == [ 'b', 'd', 'e', 'a', 'b', ] cycle2 = FairCycle(echo, ['c', 'c'], MyEmpty) with pytest.raises(MyEmpty): consume(cycle2.get, 3) def test_cycle_no_resources(self): cycle = FairCycle(None, [], MyEmpty) cycle.pos = 10 with pytest.raises(MyEmpty): cycle._next() def test__repr__(self): assert repr(FairCycle(lambda x: x, [1, 2, 3], MyEmpty)) def test_round_robin_cycle(): it = cycle_by_name('round_robin')(['A', 'B', 'C']) assert it.consume(3) == ['A', 'B', 'C'] it.rotate('B') assert it.consume(3) == ['A', 'C', 'B'] it.rotate('A') assert it.consume(3) == ['C', 'B', 'A'] it.rotate('A') assert it.consume(3) == ['C', 'B', 'A'] it.rotate('C') assert it.consume(3) == ['B', 'A', 'C'] def test_priority_cycle(): it = cycle_by_name('priority')(['A', 'B', 'C']) assert it.consume(3) == ['A', 'B', 'C'] it.rotate('B') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('A') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('A') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('C') assert it.consume(3) == ['A', 'B', 'C'] def test_sorted_cycle(): it = cycle_by_name('sorted')(['B', 'C', 'A']) assert it.consume(3) == ['A', 'B', 'C'] it.rotate('B') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('A') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('A') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('C') assert it.consume(3) == ['A', 'B', 'C'] kombu-5.5.3/t/unit/utils/test_time.py000066400000000000000000000007131477772317200176140ustar00rootroot00000000000000from __future__ import annotations import pytest from kombu.utils.time import maybe_s_to_ms @pytest.mark.parametrize('input,expected', [ (3, 3000), (3.0, 3000), (303, 303000), (303.33, 303330), (303.333, 303333), (303.3334, 303333), (None, None), (0, 0), ]) def test_maybe_s_to_ms(input, expected): ret = maybe_s_to_ms(input) if expected is None: assert ret is None else: assert ret == expected kombu-5.5.3/t/unit/utils/test_url.py000066400000000000000000000063111477772317200174600ustar00rootroot00000000000000from __future__ import annotations try: from urllib.parse import urlencode except ImportError: from urllib import urlencode import ssl import pytest import kombu.utils.url from kombu.utils.url import (as_url, maybe_sanitize_url, parse_ssl_cert_reqs, parse_url) def test_parse_url(): assert parse_url('amqp://user:pass@localhost:5672/my/vhost') == { 'transport': 'amqp', 'userid': 'user', 'password': 'pass', 'hostname': 'localhost', 'port': 5672, 'virtual_host': 'my/vhost', } @pytest.mark.parametrize('urltuple,expected', [ (('https',), 'https:///'), (('https', 'e.com'), 'https://e.com/'), (('https', 'e.com', 80), 'https://e.com:80/'), (('https', 'e.com', 80, 'u'), 'https://u@e.com:80/'), (('https', 'e.com', 80, 'u', 'p'), 'https://u:p@e.com:80/'), (('https', 'e.com', 80, None, 'p'), 'https://:p@e.com:80/'), (('https', 'e.com', 80, None, 'p', '/foo'), 'https://:p@e.com:80//foo'), ]) def test_as_url(urltuple, expected): assert as_url(*urltuple) == expected @pytest.mark.parametrize('url,expected', [ ('foo', 'foo'), ('http://u:p@e.com//foo', 'http://u:**@e.com//foo'), ]) def test_maybe_sanitize_url(url, expected): assert maybe_sanitize_url(url) == expected assert (maybe_sanitize_url('http://u:p@e.com//foo') == 'http://u:**@e.com//foo') def test_ssl_parameters(): url = 'rediss://user:password@host:6379/0?' querystring = urlencode({ "ssl_check_hostname": "on", }) kwargs = parse_url(url + querystring) assert kwargs['transport'] == 'rediss' assert kwargs['ssl']['ssl_check_hostname'] is True querystring = urlencode({ 'ssl_cert_reqs': 'required', 'ssl_ca_certs': '/var/ssl/myca.pem', 'ssl_certfile': '/var/ssl/server-cert.pem', 'ssl_keyfile': '/var/ssl/priv/worker-key.pem', "ssl_check_hostname": "false", }) kwargs = parse_url(url + querystring) assert kwargs['transport'] == 'rediss' assert kwargs['ssl']['ssl_cert_reqs'] == ssl.CERT_REQUIRED assert kwargs['ssl']['ssl_ca_certs'] == '/var/ssl/myca.pem' assert kwargs['ssl']['ssl_certfile'] == '/var/ssl/server-cert.pem' assert kwargs['ssl']['ssl_keyfile'] == '/var/ssl/priv/worker-key.pem' assert kwargs['ssl']['ssl_check_hostname'] is False kombu.utils.url.ssl_available = False kwargs = parse_url(url + querystring) assert kwargs['ssl']['ssl_cert_reqs'] is None kombu.utils.url.ssl_available = True @pytest.mark.parametrize('query_param,ssl_available,expected', [ ('CERT_REQUIRED', True, ssl.CERT_REQUIRED), ('CERT_OPTIONAL', True, ssl.CERT_OPTIONAL), ('CERT_NONE', True, ssl.CERT_NONE), ('required', True, ssl.CERT_REQUIRED), ('optional', True, ssl.CERT_OPTIONAL), ('none', True, ssl.CERT_NONE), ('CERT_REQUIRED', None, None), ]) def test_parse_ssl_cert_reqs(query_param, ssl_available, expected): kombu.utils.url.ssl_available = ssl_available result = parse_ssl_cert_reqs(query_param) kombu.utils.url.ssl_available = True assert result == expected def test_parse_ssl_cert_reqs_bad_value(): with pytest.raises(KeyError): parse_ssl_cert_reqs('badvalue') kombu-5.5.3/t/unit/utils/test_utils.py000066400000000000000000000011411477772317200200120ustar00rootroot00000000000000from __future__ import annotations import pytest from kombu import version_info_t from kombu.utils.text import version_string_as_tuple def test_dir(): import kombu assert dir(kombu) @pytest.mark.parametrize('version,expected', [ ('3', version_info_t(3, 0, 0, '', '')), ('3.3', version_info_t(3, 3, 0, '', '')), ('3.3.1', version_info_t(3, 3, 1, '', '')), ('3.3.1a3', version_info_t(3, 3, 1, 'a3', '')), ('3.3.1.a3.40c32', version_info_t(3, 3, 1, 'a3', '40c32')), ]) def test_version_string_as_tuple(version, expected): assert version_string_as_tuple(version) == expected kombu-5.5.3/t/unit/utils/test_uuid.py000066400000000000000000000004411477772317200176220ustar00rootroot00000000000000from __future__ import annotations from kombu.utils.uuid import uuid class test_UUID: def test_uuid4(self) -> None: assert uuid() != uuid() def test_uuid(self) -> None: i1 = uuid() i2 = uuid() assert isinstance(i1, str) assert i1 != i2 kombu-5.5.3/tox.ini000066400000000000000000000117111477772317200141760ustar00rootroot00000000000000[tox] envlist = {pypy3.10,3.8,3.9,3.10,3.11,3.12,3.13}-unit {pypy3.10,3.8,3.9,3.10,3.11,3.12,3.13}-linux-integration-py-amqp {pypy3.10,3.8,3.9,3.10,3.11,3.12,3.13}-linux-integration-redis {pypy3.10,3.8,3.9,3.10,3.11,3.12,3.13}-linux-integration-mongodb {pypy3.10,3.8,3.9,3.10,3.11,3.12,3.13}-linux-integration-kafka flake8 apicheck pydocstyle mypy requires = tox-docker<=4.1 requests<2.32.0 [gh-actions] python = 3.8: py38 3.9: py39 3.10: py310 3.11: py311 3.12: py312 3.13: py313 pypy3: pypy3 [testenv] sitepackages = False setenv = C_DEBUG_TEST = 1 PIP_EXTRA_INDEX_URL=https://celery.github.io/celery-wheelhouse/repo/simple/ passenv = DISTUTILS_USE_SDK deps= -r{toxinidir}/requirements/dev.txt apicheck,pypy3.10,3.8,3.9,3.10,3.11,3.12,3.13: -r{toxinidir}/requirements/default.txt apicheck,pypy3.10,3.8,3.9,3.10,3.11,3.12,3.13: -r{toxinidir}/requirements/test.txt apicheck,pypy3.10,3.8,3.9,3.10,3.11,3.12,3.13: -r{toxinidir}/requirements/test-ci.txt apicheck,3.8-linux,3.9-linux,3.10-linux,3.11-linux,3.12-linux,3.13-linux: -r{toxinidir}/requirements/extras/confluentkafka.txt apicheck,linkcheck: -r{toxinidir}/requirements/docs.txt flake8,pydocstyle,mypy: -r{toxinidir}/requirements/pkgutils.txt integration: -r{toxinidir}/requirements/test-integration.txt commands = unit: python -bb -m pytest -rxs -xv --cov=kombu --cov-report=xml --no-cov-on-fail --cov-report term {posargs} integration-py-amqp: pytest -xv -E py-amqp t/integration -n auto --reruns 2 --reruns-delay 1 {posargs} integration-redis: pytest -xv -E redis t/integration -n auto --reruns 2 --reruns-delay 1 {posargs} integration-mongodb: pytest -xv -E mongodb t/integration -n auto --reruns 2 --reruns-delay 1 {posargs} integration-kafka: pytest -xv -E kafka t/integration -n auto --reruns 2 --reruns-delay 1 {posargs} basepython = pypy3: pypy3 3.8: python3.8 3.9: python3.9 3.10: python3.10 3.11: python3.11 3.12: python3.12 3.13: python3.13 apicheck,pydocstyle,flake8,linkcheck,cov,mypy: python3.13 install_command = python -m pip --disable-pip-version-check install {opts} {packages} docker = integration-py-amqp: rabbitmq integration-redis: redis integration-mongodb: mongodb integration-kafka: zookeeper integration-kafka: kafka dockerenv = PYAMQP_INTEGRATION_INSTANCE=1 RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=-rabbit tcp_listeners [5672] [docker:rabbitmq] image = rabbitmq ports = 5672:5672/tcp healthcheck_cmd = /bin/bash -c 'rabbitmq-diagnostics ping -q' healthcheck_interval = 10 healthcheck_timeout = 10 healthcheck_retries = 30 healthcheck_start_period = 5 [docker:redis] image = redis ports = 6379:6379/tcp healthcheck_cmd = /bin/sh -c 'redis-cli ping' healthcheck_interval = 10 healthcheck_timeout = 10 healthcheck_retries = 30 healthcheck_start_period = 5 [docker:mongodb] image = mongo ports = 27017:27017/tcp healthcheck_cmd = /usr/bin/mongosh --eval 'db.runCommand("ping")' healthcheck_interval = 10 healthcheck_timeout = 10 healthcheck_retries = 30 healthcheck_start_period = 5 [docker:zookeeper] image = bitnami/zookeeper:latest ports = 2181:2181/tcp healthcheck_interval = 10 healthcheck_timeout = 10 healthcheck_retries = 30 healthcheck_start_period = 5 environment = ALLOW_ANONYMOUS_LOGIN=yes [docker:kafka] image = bitnami/kafka:3.8 ports = 9092:9092/tcp healthcheck_cmd = /bin/bash -c 'kafka-topics.sh --list --bootstrap-server 127.0.0.1:9092' healthcheck_interval = 10 healthcheck_timeout = 10 healthcheck_retries = 30 healthcheck_start_period = 5 links = zookeeper:zookeeper environment = KAFKA_BROKER_ID=1 KAFKA_CFG_LISTENERS=PLAINTEXT://:9092 KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 ALLOW_PLAINTEXT_LISTENER=yes [testenv:apicheck] commands = pip install -U -r{toxinidir}/requirements/dev.txt sphinx-build -j2 -b apicheck -d {envtmpdir}/doctrees docs docs/_build/apicheck [testenv:linkcheck] commands = pip install -U -r{toxinidir}/requirements/dev.txt sphinx-build -j2 -W -b linkcheck -d {envtmpdir}/doctrees docs docs/_build/linkcheck [testenv:flake8] commands = flake8 -j2 {toxinidir}/kombu {toxinidir}/t [testenv:pydocstyle] commands = pydocstyle {toxinidir}/kombu [testenv:mypy] commands = python -m mypy --config-file setup.cfg [testenv:lint] allowlist_externals = pre-commit commands = pre-commit {posargs:run --all-files --show-diff-on-failure} [testenv:clean] deps = cleanpy allowlist_externals = make commands = python -m cleanpy . make clean [testenv:parallel] description = Run all unit and integration tests in parallel allowlist_externals = tox setenv = PYTHONUNBUFFERED = 1 PYTHONDONTWRITEBYTECODE = 1 commands = tox -e \ 3.13-unit,\ 3.13-linux-integration-py-amqp,\ 3.13-linux-integration-redis,\ 3.13-linux-integration-mongodb,\ 3.13-linux-integration-kafka \ -p -o -- --exitfirst {posargs}