pax_global_header00006660000000000000000000000064147651616110014522gustar00rootroot0000000000000052 comment=8b111eb5f0298e5b095272027bf3194d2c999aa8 flask-limiter-3.12/000077500000000000000000000000001476516161100142125ustar00rootroot00000000000000flask-limiter-3.12/.coveragerc000066400000000000000000000004661476516161100163410ustar00rootroot00000000000000[run] omit = /**/flask_limiter/_compat.py /**/flask_limiter/_version* /**/tests/*.py /**/flask_limiter/contrib/*.py versioneer.py setup.py [report] exclude_lines = pragma: no cover noqa raise NotImplementedError if typing.TYPE_CHECKING @overload @abstractmethod flask-limiter-3.12/.gitattributes000066400000000000000000000001211476516161100170770ustar00rootroot00000000000000flask_ratelimits/_version.py export-subst flask_limiter/_version.py export-subst flask-limiter-3.12/.github/000077500000000000000000000000001476516161100155525ustar00rootroot00000000000000flask-limiter-3.12/.github/FUNDING.yml000066400000000000000000000000611476516161100173640ustar00rootroot00000000000000github: alisaifee open_collective: flask-limiter flask-limiter-3.12/.github/ISSUE_TEMPLATE/000077500000000000000000000000001476516161100177355ustar00rootroot00000000000000flask-limiter-3.12/.github/ISSUE_TEMPLATE/bug-report.md000066400000000000000000000013221476516161100223430ustar00rootroot00000000000000--- name: Bug Report about: Submit a bug report labels: 'bug' --- ## Expected Behaviour ## Current Behaviour ## Steps to Reproduce 1. 1. 1. 1. ## Your Environment - Flask-limiter version: - Flask version: - Operating system: - Python version: flask-limiter-3.12/.github/ISSUE_TEMPLATE/feature.md000066400000000000000000000005131476516161100217110ustar00rootroot00000000000000--- name: Feature or Enhancement about: Propose a new feature or enhancement labels: 'enhancement' --- ## Expected Behaviour flask-limiter-3.12/.github/dependabot.yml000066400000000000000000000002121476516161100203750ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "pip" directory: "/" # Location of package manifests schedule: interval: "daily" flask-limiter-3.12/.github/stale.yml000066400000000000000000000027721476516161100174150ustar00rootroot00000000000000# Configuration for probot-stale - https://github.com/probot/stale # Number of days of inactivity before an Issue or Pull Request becomes stale daysUntilStale: 90 daysUntilClose: 7 onlyLabels: [] exemptLabels: - pinned - security - enhancement - bug # Set to true to ignore issues in a project (defaults to false) exemptProjects: false # Set to true to ignore issues in a milestone (defaults to false) exemptMilestones: false # Set to true to ignore issues with an assignee (defaults to false) exemptAssignees: false # Label to use when marking as stale staleLabel: wontfix # Comment to post when marking as stale. Set to `false` to disable markComment: > This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. # Comment to post when removing the stale label. # unmarkComment: > # Your comment here. # Comment to post when closing a stale Issue or Pull Request. # closeComment: > # Your comment here. # Limit the number of actions per hour, from 1-30. Default is 30 limitPerRun: 30 # Limit to only `issues` or `pulls` only: issues # Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': # pulls: # daysUntilStale: 30 # markComment: > # This pull request has been automatically marked as stale because it has not had # recent activity. It will be closed if no further activity occurs. Thank you # for your contributions. # issues: # exemptLabels: # - confirmed flask-limiter-3.12/.github/workflows/000077500000000000000000000000001476516161100176075ustar00rootroot00000000000000flask-limiter-3.12/.github/workflows/main.yml000066400000000000000000000122211476516161100212540ustar00rootroot00000000000000name: CI on: [push, pull_request] jobs: lint: runs-on: ubuntu-latest strategy: matrix: python-version: ["3.10", "3.11", "3.12", "3.13"] steps: - uses: actions/checkout@v3 - name: Cache dependencies uses: actions/cache@v3 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements/**') }} restore-keys: | ${{ runner.os }}-pip- - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip setuptools wheel pip install -r requirements/ci.txt - name: Lint with ruff run: | ruff check --select I flask_limiter tests examples ruff format --check flask_limiter tests examples ruff check flask_limiter tests examples - name: Type checking run: | mypy flask_limiter test: runs-on: ubuntu-latest name: Test (Python ${{ matrix.python-version }}, Flask ${{matrix.flask-version}}) strategy: fail-fast: false matrix: python-version: ["3.10", "3.11", "3.12", "3.13"] flask-version: ["flask>=2.3,<2.4", "flask>=3.0,<3.1", "flask>=3.1,<3.2"] steps: - uses: actions/checkout@v3 - name: Cache dependencies uses: actions/cache@v3 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements/**') }} restore-keys: | ${{ runner.os }}-pip- - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip setuptools wheel pip install -r requirements/ci.txt - name: Install Flask ${{ matrix.flask-version }} run: | pip uninstall -y flask werkzeug pip install "${{ matrix.flask-version }}" - name: Test run: | pytest --cov-report=xml - name: Upload coverage to Codecov uses: codecov/codecov-action@v5 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - name: Check Coverage run: | coverage report --fail-under=100 || (echo 'Insufficient coverage' && $(exit 1)) build_wheels: needs: [lint] name: Build wheel runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v3 with: python-version: '3.13' - name: Build wheels run: | python -m pip install -U build python -m build --wheel - uses: actions/upload-artifact@v4 with: name: wheels path: ./dist/*.whl build_sdist: needs: [lint] name: Build source distribution runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v3 with: python-version: '3.13' - name: Build sdist run: | pipx run build --sdist - uses: actions/upload-artifact@v4 with: name: src_dist path: dist/*.tar.gz upload_pypi: needs: [test, build_wheels, build_sdist] runs-on: ubuntu-latest if: github.ref == 'refs/heads/master' permissions: id-token: write steps: - uses: actions/download-artifact@v4.1.7 with: name: wheels path: dist - uses: actions/download-artifact@v4.1.7 with: name: src_dist path: dist - uses: pypa/gh-action-pypi-publish@release/v1 with: repository-url: https://test.pypi.org/legacy/ skip-existing: true upload_pypi_release: needs: [test, build_wheels, build_sdist] runs-on: ubuntu-latest if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') permissions: id-token: write steps: - uses: actions/download-artifact@v4.1.7 with: name: wheels path: dist - uses: actions/download-artifact@v4.1.7 with: name: src_dist path: dist - uses: pypa/gh-action-pypi-publish@release/v1 github_release: needs: [upload_pypi_release] name: Create Release runs-on: ubuntu-latest if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') steps: - name: Checkout code uses: actions/checkout@v3 with: fetch-depth: 0 - name: Download wheels uses: actions/download-artifact@v4.1.7 with: name: wheels path: dist - name: Download src dist uses: actions/download-artifact@v4.1.7 with: name: src_dist path: dist - name: Generate release notes run: | ./scripts/github_release_notes.sh > release_notes.md - name: Create Release uses: ncipollo/release-action@v1 with: artifacts: "dist/*" bodyFile: release_notes.md token: ${{ secrets.GITHUB_TOKEN }} flask-limiter-3.12/.gitignore000066400000000000000000000002151476516161100162000ustar00rootroot00000000000000*.pyc *.log cover/* .coverage* .test_env .tool-versions .idea build/ dist/ doc/_build htmlcov *egg-info* .cache .eggs .python-version .*.swp flask-limiter-3.12/.gitmodules000066400000000000000000000000001476516161100163550ustar00rootroot00000000000000flask-limiter-3.12/.readthedocs.yml000066400000000000000000000005661476516161100173070ustar00rootroot00000000000000version: 2 build: os: ubuntu-20.04 tools: python: "3.13" # You can also specify other tool versions: # nodejs: "16" # rust: "1.55" # golang: "1.17" # Build documentation in the docs/ directory with Sphinx sphinx: configuration: doc/source/conf.py python: install: - requirements: requirements/docs.txt - method: setuptools path: . flask-limiter-3.12/CLASSIFIERS000066400000000000000000000007361476516161100157120ustar00rootroot00000000000000Development Status :: 5 - Production/Stable Environment :: Web Environment Framework :: Flask Intended Audience :: Developers License :: OSI Approved :: MIT License Operating System :: MacOS Operating System :: POSIX :: Linux Operating System :: OS Independent Topic :: Software Development :: Libraries :: Python Modules Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Programming Language :: Python :: 3.13 flask-limiter-3.12/CODE_OF_CONDUCT.md000066400000000000000000000121221476516161100170070ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at . All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. flask-limiter-3.12/CONTRIBUTIONS.rst000066400000000000000000000005721476516161100170120ustar00rootroot00000000000000Contributions ============= * `Timothee Groleau `_ * `Zehua Liu `_ * `Guilherme Polo `_ * `Mattias Granlund `_ * `Josh Friend `_ * `Sami Hiltunen `_ * `Henning Peters `_ flask-limiter-3.12/HISTORY.rst000066400000000000000000000465061476516161100161200ustar00rootroot00000000000000.. :changelog: Changelog ========= v3.12 ----- Release Date: 2025-03-14 * Compatibility * Drop support for python 3.9 * Add install extra for valkey v3.11.0 ------- Release Date: 2025-03-11 * Documentation * Improve documentation about rate limiting strategies * Typing * Use builtin types (list, set, tuple) v3.10.1 ------- Release Date: 2025-01-16 * Security * Change pypi release to use trusted publishing v3.10.0 ------- Release Date: 2025-01-05 * Chores * Fix typing error with exempt decorator * Update types for window wrapper to use named tuple v3.9.2 ------ Release Date: 2024-11-27 * Compatibility * Ensure wheels are uploaded to pypi as well (for real) v3.9.1 ------ Release Date: 2024-11-27 * Compatibility * Ensure wheels are uploaded to pypi as well v3.9.0 ------ Release Date: 2024-11-27 * Chores * Update development dependencies * Compatibility * Drop support for python 3.8 * Add support for python 3.13 * Add CI matrix entry for flask 3.1 v3.8.0 ------ Release Date: 2024-07-20 * Bug fix * Ensure cost is accounted for when testing conditional limits v3.7.0 ------ Release Date: 2024-05-19 * Bug fix * Fix errors with concurrent access to internal exemption maps during application startup. v3.6.0 ------ Release Date: 2024-04-21 * Bug fix * Ensure `exempt` routes are exempt from meta limits as well v3.5.1 ------ Release Date: 2024-02-11 * Chores * Update development dependencies * Use ruff for all linting * Update CI compatibility matrix v3.5.0 ------ Release Date: 2023-08-30 * Feature * Add `meta_limits` to allow for creating upper limits for requesting clients to breach application rate limits. * Bug fix * Ensure on breach callbacks can be configured using flask config v3.4.1 ------ Release Date: 2023-08-26 * Bug fix - Ensure _version.py has stable content when generated using `git archive` from a tag regardless of when it is run. v3.4.0 ------ Release Date: 2023-08-22 * Feature * Add extended configuration for application limits * `application_limits_exempt_when` * `application_limits_deduct_when` * `application_limits_per_method` * Bug fix * Ensure blueprint static routes are exempt v3.3.1 ------ Release Date: 2023-05-03 * Chores * Improve default limits documentation * Update documentation dependencies * Fix typing compatibility errors in headers v3.3.0 ------ Release Date: 2023-02-26 * Bug Fix * Ensure per route limits are preferred (over application limits) when populating rate limiting headers in the case where no rate limit has been breached in the request. v3.2.0 ------ Release Date: 2023-02-15 * Feature * Allow configuring request identity * Chores * Improve linting with ruff * Update development dependencies v3.1.0 ------ Release Date: 2022-12-29 * Feature * Skip logging an error if a decorated limit uses a callable to return the "current" rate limit and returns an empty string. Treat this is a signal that the rate limit should be skipped for this request. v3.0.0 ------ Release Date: 2022-12-28 * Breaking changes * Change order of extension constructor arguments to only require ``key_func`` as the first positional argument and all other arguments as keyword arguments. * Separate positional/keyword arguments in limit/shared_limit decorators * Remove deprecated config variable RATELIMIT_STORAGE_URL * Remove legacy backward compatibility path for flask < 2 * Features * Allow scoping regular limit decorators / context managers v3.0.0b2 -------- Release Date: 2022-12-28 * Breaking changes * Remove deprecated config variable RATELIMIT_STORAGE_URL * Remove legacy backward compatibility path for flask < 2 * Enforce key_func as a required argument * Chores * Simplify registration of decorated function & blueprint limits v3.0.0b1 -------- Release Date: 2022-12-26 * Breaking changes * Change order of extension constructor arguments to only require ``key_func`` as the first positional argument and all other arguments as keyword arguments. * Separate positional/keyword arguments in limit/shared_limit decorators * Features * Allow scoping regular limit decorators / context managers v2.9.2 ------ Release Date: 2022-12-26 * Feature * Extend customization by http method to shared_limit decorator v2.9.1 ------ Release Date: 2022-12-26 * Chores * Update documentation quick start * Refresh documentation for class based views v2.9.0 ------ Release Date: 2022-12-24 * Features * Allow using `limit` & `shared_limit` decorators on pure functions that are not decorated as routes. The functions when called from within a request context will get rate limited. * Allow using `limit` as a context manager to rate limit a code block explicitly within a request * Chores * Updated development dependencies * Fix error running tests depending on docker locally * Update internals to use dataclasses v2.8.1 ------ Release Date: 2022-11-15 * Chores * Add sponsorship banner to rtd * Update documentation dependencies v2.8.0 ------ Release Date: 2022-11-13 * Breaking changes * Any exception raised when calling an ``on_breach`` callback will be re-raised instead of being absorbed unless ``swallow_errors`` is set. In the case of ``swallow_errors`` the exception will now be logged at ``ERROR`` level instead of ``WARN`` * Reduce log level of rate limit exceeded log messages to ``INFO`` v2.7.0 ------ Release Date: 2022-10-25 * Bug Fix * Add default value for RateLimitExceeded optional parameter * Fix suppression of errors when using conditional deduction (`Issue 363 `_) v2.6.3 ------ Release Date: 2022-09-22 * Compatibility * Ensure typing_extensions dependency has a minimum version * Chores * Documentation tweaks * Update CI to use 3.11 rc2 v2.6.2 ------ Release Date: 2022-08-24 * Chores * Improve quick start documentation v2.6.1 ------ Release Date: 2022-08-23 * Usability * Emit warning when in memory storage is used as a default when no storage uri is provided v2.6.0 ------ Release Date: 2022-08-11 * Feature * Expand use of ``on_breach`` callback to return a ``Response`` object that will be used as the error response on rate limits being exceeded v2.5.1 ------ Release Date: 2022-08-05 * Compatibility * Migrate use of `flask._request_ctx_stack` to `flask.globals.request_ctx` to support Flask 2.2+ * Chores * Expand CI matrix to test against Flask 2.0,2.1 & 2.2 * Make tests compatible with Flask 2.2.+ v2.5.0 ------ Release Date: 2022-07-07 * Features * Ensure multiple extension instances registered on a single application exercise before/after request hooks * Chores * Improve documentation v2.4.6 ------ Release Date: 2022-06-06 * Chore * Add python 3.11 to CI matrix v2.4.5.1 -------- Release Date: 2022-04-22 * Chore * Automate github releases v2.4.5 ------ Release Date: 2022-04-21 * Chore * Automate github releases v2.4.4 ------ Release Date: 2022-04-21 * Chore * Automate github releases v2.4.3 ------ Release Date: 2022-04-21 * Chore * Second attempt to generate release notes v2.4.2 ------ Release Date: 2022-04-21 * Chore * Test for automating github release notes v2.4.1 ------ Release Date: 2022-04-21 * Chore * Automate github releases v2.4.0 ------ Release Date: 2022-04-20 * Feature * Add CLI for inspecting & clearing rate limits * Bug Fix * Ensure exempt decorator can be used with flags for view functions * Chores * Refactor rate limit resolution to limit manager v2.3.3 ------ Release Date: 2022-04-20 * Bug Fix * Ensure `request.blueprint` is actually registered on the current app before using it for blueprint limits or exemptions. (`Issue 336 `_) v2.3.2 ------ Release Date: 2022-04-17 * Feature * Extend cost parameter to default & application limits * Chore * Improve type strictness / checking * Improve documentation on landing page v2.3.1 ------ Release Date: 2022-04-14 * Bug Fixes * Add missing extras requirements for installation * Add py.typed for PEP 561 compliance v2.3.0 ------ Release Date: 2022-04-11 * Features * Expose option to register a callback for rate limit breaches of default limits via the :paramref:`~flask_limiter.Limiter.on_breach` constructor parameter * Replace use of `flask.g` with request context for keeping track of extension state (:issue:`327`) * Rework implementation of :meth:`~flask_limiter.Limiter.exempt` to accomodate nested blueprints. (:issue:`326`) * Chores * Add python 3.11 to CI * Extract management and filtering of limits to LimitManager * Improve correctness of resolving inherited limits & extensions when working with Blueprints (especially nested ones) v2.2.0 ------ Release Date: 2022-03-05 * Feature * Allow a function to be used for the ``cost`` parameter to limiter decorators. v2.1.3 ------ Release Date: 2022-01-30 * Chore * Update documentation theme v2.1 ---- Release Date: 2022-01-15 * Feature * Add ``current_limit`` attribute to extension to allow clients to fetch the relevant current limit that was evaluated. * Update extension constructor parameters to match flask config for header control * Add ``on_breach`` callback for ``limit`` and ``shared_limit`` decorators to be used as hooks for when a limit is breached * Add ``cost`` argument to ``limit`` and ``shared_limit`` to control how much is deducted when a hit occurs. * Chore * Improve documentation around configuration * Deprecation * Remove hacks for managing incorrectly ordered limit/route decorators v2.0.4 ------ Release Date: 2021-12-22 * Chore * Documentation theme upgrades * Integrate pytest-docker plugin * Mass linting * Deprecation * Removed deprecated RATELIMIT_GLOBAL config * Added deprecation doc for RATELIMIT_STORAGE_URL config v2.0.3 ------ Release Date: 2021-12-15 Documentation & test tweaks v2.0.2 ------ Release Date: 2021-11-28 * Features * Pin Flask, limits to >= 2 * Add type hints v2.0.1 ------ Release Date: 2021-11-28 * Deprecations * Remove deprecated get_ipaddr method * Remove use of six * Remove backward compatibility hacks for RateLimit exceptions v2.0.0 ------ Release Date: 2021-11-27 Drop support for python < 3.7 & Flask < 2.0 v1.5 ---- Release Date: 2021-11-27 Final Release for python < 3.7 * Features * Prepend ``key_prefix`` to extension variables attached to ``g`` * Expose ``g.view_limits`` v1.4 ---- Release Date: 2020-08-25 * Bug Fix * Always set headers for conditional limits * Skip init_app sequence when the rate limiter is disabled v1.3.1 ------ Release Date: 2020-05-21 * Bug Fix * Ensure headers provided explictely by setting `_header_mapping` take precedence over configuration values. v1.3 ---- Release Date: 2020-05-20 * Features * Add new ``deduct_when`` argument that accepts a function to decorated limits to conditionally perform depletion of a rate limit (`Pull Request 248 `_) * Add new ``default_limits_deduct_when`` argument to Limiter constructor to conditionally perform depletion of default rate limits * Add ``default_limits_exempt_when`` argument that accepts a function to allow skipping the default limits in the ``before_request`` phase * Bug Fix * Fix handling of storage failures during ``after_request`` phase. * Code Quality * Use github-actions instead of travis for CI * Use pytest instaad of nosetests * Add docker configuration for test dependencies * Increase code coverage to 100% * Ensure pyflake8 compliance v1.2.1 ------ Release Date: 2020-02-26 * Bug fix * Syntax error in version 1.2.0 when application limits are provided through configuration file (`Issue 241 `_) v1.2.0 ------ Release Date: 2020-02-25 * Add `override_defaults` argument to decorated limits to allow combinined defaults with decorated limits. * Add configuration parameter RATELIMIT_DEFAULTS_PER_METHOD to control whether defaults are applied per method. * Add support for in memory fallback without override (`Pull Request 236 `_) * Bug fix * Ensure defaults are enforced when decorated limits are skipped (`Issue 238 `_) v1.1.0 ------ Release Date: 2019-10-02 * Provide Rate limit information with Exception (`Pull Request 202 `_) * Respect existing Retry-After header values (`Pull Request 143 `_) * Documentation improvements v1.0.1 ------ Release Date: 2017-12-08 * Bug fix * Duplicate rate limits applied via application limits (`Issue 108 `_) v1.0.0 ------ Release Date: 2017-11-06 * Improved documentation for handling ip addresses for applications behind proxiues (`Issue 41 `_) * Execute rate limits for decorated routes in decorator instead of `before_request` (`Issue 67 `_) * Bug Fix * Python 3.5 Errors (`Issue 82 `_) * RATELIMIT_KEY_PREFIX configuration constant not used (`Issue 88 `_) * Can't use dynamic limit in `default_limits` (`Issue 94 `_) * Retry-After header always zero when using key prefix (`Issue 99 `_) v0.9.5.1 -------- Release Date: 2017-08-18 * Upgrade versioneer v0.9.5 ------ Release Date: 2017-07-26 * Add support for key prefixes v0.9.4 ------ Release Date: 2017-05-01 * Implemented application wide shared limits v0.9.3 ------ Release Date: 2016-03-14 * Allow `reset` of limiter storage if available v0.9.2 ------ Release Date: 2016-03-04 * Deprecation warning for default `key_func` `get_ipaddr` * Support for `Retry-After` header v0.9.1 ------ Release Date: 2015-11-21 * Re-expose `enabled` property on `Limiter` instance. v0.9 ----- Release Date: 2015-11-13 * In-memory fallback option for unresponsive storage * Rate limit exemption option per limit v0.8.5 ------ Release Date: 2015-10-05 * Bug fix for reported issues of missing (limits) dependency upon installation. v0.8.4 ------ Release Date: 2015-10-03 * Documentation tweaks. v0.8.2 ------ Release Date: 2015-09-17 * Remove outdated files from egg v0.8.1 ------ Release Date: 2015-08-06 * Fixed compatibility with latest version of **Flask-Restful** v0.8 ----- Release Date: 2015-06-07 * No functional change v0.7.9 ------ Release Date: 2015-04-02 * Bug fix for case sensitive `methods` whitelist for `limits` decorator v0.7.8 ------ Release Date: 2015-03-20 * Hotfix for dynamic limits with blueprints * Undocumented feature to pass storage options to underlying storage backend. v0.7.6 ------ Release Date: 2015-03-02 * `methods` keyword argument for `limits` decorator to specify specific http methods to apply the rate limit to. v0.7.5 ------ Release Date: 2015-02-16 * `Custom error messages `_. v0.7.4 ------ Release Date: 2015-02-03 * Use Werkzeug TooManyRequests as the exception raised when available. v0.7.3 ------ Release Date: 2015-01-30 * Bug Fix * Fix for version comparison when monkey patching Werkzeug (`Issue 24 `_) v0.7.1 ------ Release Date: 2015-01-09 * Refactor core storage & ratelimiting strategy out into the `limits `_ package. * Remove duplicate hits when stacked rate limits are in use and a rate limit is hit. v0.7 ---- Release Date: 2015-01-09 * Refactoring of RedisStorage for extensibility (`Issue 18 `_) * Bug fix: Correct default setting for enabling rate limit headers. (`Issue 22 `_) v0.6.6 ------ Release Date: 2014-10-21 * Bug fix * Fix for responses slower than rate limiting window. (`Issue 17 `_.) v0.6.5 ------ Release Date: 2014-10-01 * Bug fix: in memory storage thread safety v0.6.4 ------ Release Date: 2014-08-31 * Support for manually triggering rate limit check v0.6.3 ------ Release Date: 2014-08-26 * Header name overrides v0.6.2 ------ Release Date: 2014-07-13 * `Rate limiting for blueprints `_ v0.6.1 ------ Release Date: 2014-07-11 * per http method rate limit separation (`Recipe `_) * documentation improvements v0.6 ---- Release Date: 2014-06-24 * `Shared limits between routes `_ v0.5 ---- Release Date: 2014-06-13 * `Request Filters `_ v0.4.4 ------ Release Date: 2014-06-13 * Bug fix * Werkzeug < 0.9 Compatibility (`Issue 6 `_.) v0.4.3 ------ Release Date: 2014-06-12 * Hotfix : use HTTPException instead of abort to play well with other extensions. v0.4.2 ------ Release Date: 2014-06-12 * Allow configuration overrides via extension constructor v0.4.1 ------ Release Date: 2014-06-04 * Improved implementation of moving-window X-RateLimit-Reset value. v0.4 ---- Release Date: 2014-05-28 * `Rate limiting headers `_ v0.3.2 ------ Release Date: 2014-05-26 * Bug fix * Memory leak when using ``Limiter.storage.MemoryStorage`` (`Issue 4 `_.) * Improved test coverage v0.3.1 ------ Release Date: 2014-02-20 * Strict version requirement on six * documentation tweaks v0.3.0 ------ Release Date: 2014-02-19 * improved logging support for multiple handlers * allow callables to be passed to ``Limiter.limit`` decorator to dynamically load rate limit strings. * add a global kill switch in flask config for all rate limits. * Bug fixes * default key function for rate limit domain wasn't accounting for X-Forwarded-For header. v0.2.2 ------ Release Date: 2014-02-18 * add new decorator to exempt routes from limiting. * Bug fixes * versioneer.py wasn't included in manifest. * configuration string for strategy was out of sync with docs. v0.2.1 ------ Release Date: 2014-02-15 * python 2.6 support via counter backport * source docs. v0.2 ---- Release Date: 2014-02-15 * Implemented configurable strategies for rate limiting. * Bug fixes * better locking for in-memory storage * multi threading support for memcached storage v0.1.1 ------ Release Date: 2014-02-14 * Bug fixes * fix initializing the extension without an app * don't rate limit static files v0.1.0 ------ Release Date: 2014-02-13 * first release. flask-limiter-3.12/LICENSE.txt000066400000000000000000000020451476516161100160360ustar00rootroot00000000000000Copyright (c) 2023 Ali-Akber Saifee Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. flask-limiter-3.12/MANIFEST.in000066400000000000000000000004101476516161100157430ustar00rootroot00000000000000include README.rst include LICENSE.txt include HISTORY.rst include CONTRIBUTIONS.rst include CLASSIFIERS include versioneer.py recursive-include requirements *.txt recursive-include doc/source * recursive-include doc *.py Make* include flask_limiter/_version.py flask-limiter-3.12/Makefile000066400000000000000000000005351476516161100156550ustar00rootroot00000000000000lint: ruff check flask_limiter tests examples --select I ruff format --check flask_limiter tests examples ruff check flask_limiter tests examples mypy flask_limiter lint-fix: ruff check flask_limiter tests examples --select I --fix ruff format flask_limiter tests examples ruff check --fix flask_limiter tests examples mypy flask_limiter flask-limiter-3.12/README.rst000066400000000000000000000110521476516161100157000ustar00rootroot00000000000000.. |ci| image:: https://github.com/alisaifee/flask-limiter/actions/workflows/main.yml/badge.svg?branch=master :target: https://github.com/alisaifee/flask-limiter/actions?query=branch%3Amaster+workflow%3ACI .. |codecov| image:: https://codecov.io/gh/alisaifee/flask-limiter/branch/master/graph/badge.svg :target: https://codecov.io/gh/alisaifee/flask-limiter .. |pypi| image:: https://img.shields.io/pypi/v/Flask-Limiter.svg?style=flat-square :target: https://pypi.python.org/pypi/Flask-Limiter .. |license| image:: https://img.shields.io/pypi/l/Flask-Limiter.svg?style=flat-square :target: https://pypi.python.org/pypi/Flask-Limiter .. |docs| image:: https://readthedocs.org/projects/flask-limiter/badge/?version=latest :target: https://flask-limiter.readthedocs.org/en/latest ************* Flask-Limiter ************* |docs| |ci| |codecov| |pypi| |license| **Flask-Limiter** adds rate limiting to `Flask `_ applications. You can configure rate limits at different levels such as: - Application wide global limits per user - Default limits per route - By `Blueprints `_ - By `Class-based views `_ - By `individual routes `_ **Flask-Limiter** can be `configured `_ to fit your application in many ways, including: - Persistance to various commonly used `storage backends `_ (such as Redis, Memcached & MongoDB) via `limits `__ - Any rate limiting strategy supported by `limits `__ Follow the quickstart below to get started or `read the documentation `_ for more details. Quickstart =========== Install ------- .. code-block:: bash pip install Flask-Limiter Add the rate limiter to your flask app --------------------------------------- .. code-block:: python # app.py from flask import Flask from flask_limiter import Limiter from flask_limiter.util import get_remote_address app = Flask(__name__) limiter = Limiter( get_remote_address, app=app, default_limits=["2 per minute", "1 per second"], storage_uri="memory://", # Redis # storage_uri="redis://localhost:6379", # Redis cluster # storage_uri="redis+cluster://localhost:7000,localhost:7001,localhost:70002", # Memcached # storage_uri="memcached://localhost:11211", # Memcached Cluster # storage_uri="memcached://localhost:11211,localhost:11212,localhost:11213", # MongoDB # storage_uri="mongodb://localhost:27017", strategy="fixed-window", # or "moving-window", or "sliding-window-counter" ) @app.route("/slow") @limiter.limit("1 per day") def slow(): return "24" @app.route("/fast") def fast(): return "42" @app.route("/ping") @limiter.exempt def ping(): return 'PONG' Inspect the limits using the command line interface --------------------------------------------------- .. code-block:: bash $ FLASK_APP=app:app flask limiter limits app ├── fast: /fast │ ├── 2 per 1 minute │ └── 1 per 1 second ├── ping: /ping │ └── Exempt └── slow: /slow └── 1 per 1 day Run the app ----------- .. code-block:: bash $ FLASK_APP=app:app flask run Test it out ----------- The ``fast`` endpoint respects the default rate limit while the ``slow`` endpoint uses the decorated one. ``ping`` has no rate limit associated with it. .. code-block:: bash $ curl localhost:5000/fast 42 $ curl localhost:5000/fast 42 $ curl localhost:5000/fast 429 Too Many Requests

Too Many Requests

2 per 1 minute

$ curl localhost:5000/slow 24 $ curl localhost:5000/slow 429 Too Many Requests

Too Many Requests

1 per 1 day

$ curl localhost:5000/ping PONG $ curl localhost:5000/ping PONG $ curl localhost:5000/ping PONG $ curl localhost:5000/ping PONG flask-limiter-3.12/doc/000077500000000000000000000000001476516161100147575ustar00rootroot00000000000000flask-limiter-3.12/doc/Makefile000066400000000000000000000152271476516161100164260ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Flask-Ratelimit.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Flask-Ratelimit.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Flask-Ratelimit" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Flask-Ratelimit" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." flask-limiter-3.12/doc/source/000077500000000000000000000000001476516161100162575ustar00rootroot00000000000000flask-limiter-3.12/doc/source/_static/000077500000000000000000000000001476516161100177055ustar00rootroot00000000000000flask-limiter-3.12/doc/source/_static/colors.css000066400000000000000000000007551476516161100217270ustar00rootroot00000000000000:root { --bg0: #fbf1c7; --bg1: #ebdbb2; --bg2: #d5c4a1; --bg3: #bdae93; --bg4: #a89984; --gry: #928374; --fg4: #7c6f64; --fg3: #665c54; --fg2: #504945; --fg1: #3c3836; --fg0: #282828; --red: #cc241d; --red2: #9d0006; --orange: #d65d0e; --orange2: #af3a03; --yellow: #d79921; --yellow2: #b57614; --green: #98971a; --green2: #79740e; --aqua: #689d6a; --aqua2: #427b58; --blue: #458588; --blue2: #076678; --purple: #b16286; --purple2: #8f3f71; } flask-limiter-3.12/doc/source/_static/custom.css000066400000000000000000000006341476516161100217340ustar00rootroot00000000000000#flask-limiter h1 { display: none; } a.image-reference.logo:hover { filter: none; } a.logo { padding-bottom: 1em; } body[data-theme="dark"] img.logo, body[data-theme="dark"] img.sidebar-logo { filter: invert(0.7) sepia(0.4); } .badges { display: flex; padding: 10px; flex-direction: row; justify-content: center; } .header-badge { padding: 2px; } @media only screen and (max-width: 768px) { } flask-limiter-3.12/doc/source/_static/limiter.css000066400000000000000000000006671476516161100220750ustar00rootroot00000000000000@import url("flasky.css"); div.warning, div.attention{ background-color: #ffedcc; } div.danger { background-color: #fdf3f2; } div.info, div.note { background-color: #e7f2fa; } div.tip, div.important { background-color: #dbfaf4; } div.alert { background-color: #ffedcc; } div.admonition{ border: none; } div.admonition p.admonition-title{ font-variant: small-caps; } p.admonition-title:after{ content: ""; } flask-limiter-3.12/doc/source/_static/logo-og.png000066400000000000000000000654201476516161100217650ustar00rootroot00000000000000PNG  IHDR昊}gAMA a cHRMz&u0`:pQ<bKGD̿ pHYs+tIME [83iIDATxwx^Weɶ{ 0B -K$BI(I! PB't1ƽ^$ݖk{;a,S OB/@A- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p B- p  SM[m͵-6y{Y^8EIƚ.U ƴ4,-R}vVn(#Q5}b٪";N YFk |CB\v8<>Ue׋1rܘݦ5Z;`,}ˎA g 2) v5w4Tq0_fL̙9&[߫ CB7j+wwIGIw>iW#Fmﵭ[q % gh>쨫;HL3 Pv|5C:ǒf2|`I *猲VLl9fݴ NMZtc2 if";mobL%RHAVy2:Ph TD*1A!Sg1FjyE +R4xȰ _e׽?P,T,[j%FD kse"RHɺ^mJ{/cn;5̰o@!1"Aӥ++Kػ<:APw;d:g \Z%ϰZg^Ht b}Ml_0^mQ7p Qu@)S).J]EnoTռHc3ީb麳 [[Xij*Dj@{PUemJ DDD#Ҏmhiľ,-Q{/dW{cKtYiue%Empi i=țG[$Wo[ʈ¬B׼sW&W'qZNwy}t6 K@@Ro⍦–֬ 믛UPUҒΎB XxoD٪sKœcUmբ r,)ή)]mD*C@@RzZ*кZ% &t)oTGG+TwZ}""fν*ãejw˔ gz}GU U(V/fd$TOoXlHgzae_҅w0I"r>M +)QZdHDMzTUS /; bt1FDJY{(s$?dً{eiBHqudv@@BgH !;mڎ8 :'w)kPԔ0\y@aZ1XxS{ۚ{]bU\ˠ/TI.ŹHUTf!/xW8[ᠻOjQb ?mC20rC5Uܬ ^YQ ڎw2%%?U "M>":9VC1A:bntbqv/p qtY˷v/nr#[XJְC R-&))̓yE"sl^"Zȟ;mel]Ǐ/Q)% Pǫ}  6򊇗z-0јk#Lc6Gq*̡JP p Ƅog(peӥ9!5ѴGuu*MKYwS0Ůuʤ7R)a. {eB Բ2t *Hv TODi8IB7zPG7T&W@7@4yS}-1 @7@zDT*|D ѐR3'$  O uT0jH?co>kw%I3 Zʊ^e>mIkJ3o@DJ{$"Uxq\ ?(I]b) fY_u/YQI{Hi۵to.n1b- NtaΏu_''@/x @eǮ>USfȞvBfL)פ]Zvj{i9[Y{1 9++GalC3g]'X4ճu]#3zO@|~ <'B/f^`:xp LY5"^rpO_ذ);b_u̟/S:}\EP8YܻjP d0^[oD*E=,@$+w\ڏ k[YtL ʪJΘVǙ-cUCxБn獴n->i'܆q5ǕߖzmR=bt[AUXZ^NE{$3ٴ$r1TC4ǝ$gOlcGLp7NjJd+9cQ0ZjT^T՟j4࿃]v/,\<#'h*q.>RF3i$GOIs>I+3-gIJ܍ow*DD çyMI)9=]DU&?2 #vjǮ:YSs/崑f6L60Qv1%p˦]Q'AKqjm53L=Y뜄!zsR}8ef7*14%SH,1;&G-అ}\<l&?o_&zq?.̛-ٗ$vCbÍ {ïOV6T"ySe^2K͗ <課VdFPڃZeIб?qV99E)v{NC՜~_k| yӪ˼mir쉩.х?#ԓ `""8eaCJ9H3`0jPeN]ɠ=*om8N-XFojvE?/-8f毂i{J'ڻ.{"K#wZAp"k+m#5ܡ%D,Ka7\46P}/PT4v2LC$Q9^> c05Q^2Ck!IS2"?i3W RSnn(g vZ~쵬N{Ӝn`l~d@D嫾K-P,ڣkvol)?k\CR!zS(͑;iw\0qNn_a]YltM*Sl@DZuI<kzC>ɱRܡR POyB BشL:A/>WיJ)iS>@{XQ}nֆM-1+v }? œٳHRxCe&%K:2dې xQ[0,-8zO8@'Ft_!G}K>ӳyd9yu&=A{8NsO[8{j/gx%<ϩ}Nٽ][*1Œe1)$\/문]p-fq,S3o ;uSgrvh$&XvO8۳GTRlYz8BI}n=57_ 2Zl+s+~3Nd&Lji 'ũyhP IUW:t7n&O$Mc&ݰ7pȑ Ԕ> ukںv͆um<ݸiKեj‰|]u~FJzɰً?~Ɋ*ED8ٗ:_%b/%j?찬ul^=#Ynin*zN35zx{:VzL@/9_nWzxȣPq4N{}zQ -;֨g?ٵAD,rγ'4?Tx/DU$no]zo\PTOm[sۧꌩ ѿg3Ps؈ 8FZ/c{;_xy6">V64RuM.7e}֨\WֻP\%&߿cVz P콼ozjYbÿϿsbD}oJxzjL$)Kj8 7u|lP|QMOeMA@\'}cStL֝k6EpG-;EBFܷooߋQ]^$Vuw*+ra#$$:""Ϧ/Vu"FxKfB!fǝC}GzO&\0n%c" uڻ"?StZ^laDDΗ^iƾs@8Zpudm6y=Ɨ}_x%nTOϝO:T8a?Nlݟ7~G~PyIfp%El u_7E[Q]He髍z{k'JY W28g{pΉwU%]`1To%""'N_mȻ8OŻץ=7EtcES})16|4uܤ;{'m:ɦLs8B.iؖHGQ[Q|wߑu'dW/32q%""5Ś:Ӭѯ> .bc-7L|tM~|)&`K7~jKM(ΜI|[^}v- Pu|M`5z =J~5#o]WIa~SNM9ڑ:YͻȢCKmkX,H_01G(߰¦Y`ߺf|Y'4rطt7؟ 7UϏyUڿ~"&Nٺc[@Dfؽ״~wsA*C9wļe`{UF?3"C!揾kŜ}ra5^Ӑ>H~en)B/;_dи~砘.$o 5fw=3WnzvK䍮'*?ۂXN2ewρ;Wu?up]͋89)D#o}ND}$oM> P>,|KBk]ITX R "Y55` /}g-!2]1Y ԼU+}Bd[D^OgW{7G{5&BEO_a&w(%"a5g;Ԟg~q?WansZ4==bnM(&3]$r$(TYcO-?*5_9p"d{ūq @w~uU&ٺc#{[\p]I<j`Lo ySJzd74\ItzmW{־i`0{]gEń^bYIelƼ ;Rcܿ~bA))Ei&mIYBpC"ɔxv8%^(G5w7>q1G~3*ˮ-؂G%3yМY.<$WW0mM(+{aoV":q?}~"[FNU装ՎOhVUOH0*K3ʈu͚*7}C%@+}XDrioeքu+aj>-: d[KȭneI+c[S=qM(rJvmyY ?ldE[#z.=8ڻ{߫K6}Gfx3N[|+hwE7up;3,DL3:uGQ\~wVժckUv4X3x4P#2CDNN0UJPghyUrGh\5-[/ѹ$YezT֭F۩eOy`TJ?a~֌&9;7 f/Oo`rY]sMN*G*VMQd^K~7K~eWi:4ϒZ4<6iG\UIUYd*1z3}Ž} J~,gG ̕D_#|.Jz9QwIe>}AQ||b96*}5XЭȤ?'uW`5+c w<%R&35ٛHjXz&v#LJ7RK^:Gn$X+/)yG_[7a*HdW se͞!M@0U:Pzѵ>LND%Q4ę TmqL5,%ьxj`k:*Ϗj{W)g Òی=BbJfrq:ފ:vЎc=`2ƨۖy#znMoQGhkr7o*\8ܔBB14 lG~uFR2j~jLaUXOmͥq5]T $VLܮ^O/;Ŵ[WFvk7>g;nϳrrf9Zo=R# n$9p]{]5[zcCGK>A5e[J'yӜ#:Q^Fd-P\άAYUL5z?3-]F!U{\';IqtQ TzBKSOVޠ7yC RSG'X6%x5}Y\uF{.m[*^^I?}K{3/CWY#LzYE 5R28cƙ]'5u XTiXۨK[?;JOKq*X,%lal.aw y;`%K}f(TM[niEKKx77hɜYIvJi)?ۯ^BDwd tkopΎc埑]<ּLyM tTƈ"~D"|"xb.* qO}P5Ჲ$<#PN]Qj,ltth4FD >XVZ/HljO' .rM Cr=_7Ox {_ufm^s͵ :tKp,~ W!Q]5}>]S֍!Qmƚ/ix!Ƅ/`,Psq`H5<֧POMp|QKh$鍭] ?:;:㭰eOK8a޸O`/Z23_6UBSŁ%<6aPNi4)!Phku)vD(_3mu[JrAHv~QTص|Y/|=e) >JQ{@{/W͚<(7B#Xr쵟pt{xwĔef-[OZK"ɔ~i96|U k$s8X3% }eԱ/,<-E5@nQ~^zWE?#3~ (%Z1{=/?|9Aw{*]O Es&C闌+ 'YnY1f1{tT9=g{ eŒ_?<No8WxvuYjKLJE7iNǾ̽p#Q2\IQ#T{J8J=r$N&#=Lmg'{CREah\|}<4'%آk>MکL+^ߔ93}R^R߻_9G|4e#NNc克vלvPϴv]2YXcI6oRF]AH:[TiZ>jWoI__TpT%I Ƣ~?xz5)kvCԆIWﵢk P̊&<)DAWj>ҕwęvn9Ѭ' PeMdԱ6:-s?mxƢ;V04iڵo t[՝])if? MU7ӒO󎿡`{5-sEfF]V>5GA2=іָl]chӟM戕\_.DM~-RQŪDD^Watwߞ_yKY}S1"9&*Y_<"g}mw/pKۺESVNآ]wO~ 6ed% zĥR4ք+K95bohНo/e$w5MՄaCc/$ E^ݡ 죟m4:18tɃ¨3o7m\}p/+Y.}=fQ 0&wS75ք;cW>NIb $Kʖw{`SFR$AFktKfA[ /%@з.-x?@ 0Z R[777ːbe__WȚ3a~8*mIKX[𦶿/ soKI%Uo$Z-Sg/+9 bJP@n臮%Zԋfr PSZK:H5Lّ66L5 =|ۄTx8-pR3- gG.kSetմ_e^S\9r⑊g֟jd[65Mn.6'1=xu1]LWDEv)$ WGZY ^"wۄ]]eh힢B[9x= )pm*7U컚K`5MӀ(nQ_~'..[VQL:q_8[3v,0圪qל X^K0~""1aΧS`scdEF'sS]$~8kN ȸCX`4%&kEvo"w _{j.Uw**5}mU/AoMͺC;+7%_ds~Q~ɟD}}ў?sG]lZSA\8_e/nX\0vff6Ogy|2D|YqИ$kϠu{5ZҚZg6{*/=XТ8ޞ5eGZfpلs>:d^ݸ3Eͮ|lҠЬK3gwEi~mz/Z;<$@)2Ds Jo?;Z,J:S {៘C3ҎyF־$\}odx]Wk}`"w vg[ Yc9TZHz۽]^ENᏪl>붌@YV;b胶ҫ|}r6ۗ[kƭjnO]k{kKhhKIު^zlG[[ h=Iî,,(wĝ.JHkKԶ~Gٟx(q#a^ۦNm U~'5]Iyy,w 3cWm;l}zdI>x"|͸_{/tm)֌]'3~0Z~I}ƶ6u} [;g?M8mo$VDǹgIaݢ.RE{$B)-j葽 RTKkCwgxvĿۖsAj`[H=muD.J0(iE=ҷ#m&qj G""tD5B_{k,D 啗Cl~ſsܛ2JxkCҋO7x~ u׷r SnSX;Jr@fn\y@IfC{t`jGD*5~՛o&ZZo&Bxě ؚ0"m-'-Ľ~}.}?/|uj) Wu a}0Irc D3D~Bk0҇?phY5M&Wd[`-4t2l̔κtӮ;رt3 7y)T6Daچ?<ZT ľY@?EWn[xkPqKjsqV4%ܫFH>Ki6?A[VpRh{U[G4m}`DylI͚KDvhuKp|c\ET\b]󿃫㻤˷b5DdP㿪T^!"[[˚_n[c뜩Gڙ!ﴁ l!ӥ!F.ۗ$:^zk<)#{_Gx~9 z4cSx_ xӄ%FN3I;I%2f?fK}&kek׮s*#HtA{mu׸{tck$bi|ܛp?ruxRe1W#}|wsׯjd&~!mlӻBz'rrwf>MG`Ey_ښa02|sEnlHOY+~o sZMmN.Xwc.,2BN>uίB/GjJj?uj"""9sTT?? d:%_ÒNˌz߰W[0ltTl Y[ڃ_zϝkR].ŋ;cUuqQez%7 7Iz'az)$>\lLG0 3fxZٵgs"k9-x/2#֑7)VsH /\uo;gtK@ZiLjnYz|f[%Yܮڱ|+}/WM0}f7nng$wm}Wk$FD.7~)Of } kUN|T֧[?<&%38$z(c.꥝9紴}_{aDS3Gn#ڛ7vp8hOy;2ReGeSL544TY6j|#>|{mQ߬~`U)d߭ȕ-bϊi0O1 =~s[8O, "SqWٶTZsoXmټ]MuGV̿W{#nwV\Ӊ}VۊuuⳅhJoojt#ן)*0Z%FDmLG3TU/Ȥ?ŚzׂEpp[!˻_*KIuQGԉI={Q,g}G޵cDҘ9S^ۮLs2w\J;X==mŞx W>;'5BC9hX:,M&Gդ~O~v"O{x/- )_EZҹ} ^?x߃cbKD`p>NδaޠRM4}H䍙Ӿ`.j6l+TϴVOB /6Oz Cw(])ͿZ4ڴXžR;Q]{yrGO 8ꮛR"o^~hy.wˮ q+ݨ1RRٌYaSo 5LG6ǹ!yu$nOZW1#I5fM:}"8OM9;4}Aڨ^6ࡊ|[}1Tcj75lyC&_24KW4c}s kB03tU_#ZrGbm]pT fvE?#"8UeE֧痌8+pp?pw՞}^=xvy'crEmԯ #!a~mGVM=c==;O"psk6og_VcBJMIz6r1bC0U5fF 泪iwj 9x C~pv`2H&FQc/b ?MMTTbĘe按͡Ӧ/2ל;'D͇%sw*UԿ;c>jTXzjNa:>}Ur*JW*1RiPSMnݹ̎E]pDfȜUl6$Q=~ℱI"} 񗄩knv  3y/J_42Q-ts.XPL9/}}{T"R7=+;fݲ,+hL驏j]LrMizҼs,8PR0mmUm6$ud`(N d4;;)G*b -+ߘR4$H.Y=-ef,D>RM&qA.(k`2R1bLjնJD$uדQ/͖lf*pC#SO!=ʺvޒS4|`Kƶn+LuԒ6>h=/}S=>55X6 xv}UQu-GMƼ}) ;  'ܕͬL>6S)-kˢGd2\9VZQb+ >}"JLyҧ dKwiSR͎5}U-)ܝ5!e[buOyV|6_U`Dd͗Loe:C]#kk #[DDm/d,{Ý3Vo#m%X\^pf%$Ңk?mԬm>232"y]M*SċJ3~7dC/ `4 ^(J]=^/9Oxӓ.YM5D}kȃ K(wے>cZmE^7ySwݸyZg]QT^#H_|ᙺcƘCWs<6|~K0%H'8m0'aY{9;8za_/@ 9Z![2*]Pz @@2bȑ⋟+RT3Ԯ3ӻtd ;e'9B]KL@:"5h3.LnR+ z2سU>YDEUzxRr*&D }dBִG%@:!!xhIvIh{miOS:}O~qP4:?g&=w{B>@u ﷓giYDDqI<ڣb|v.-ZN!D"~戁q2uQ(vämc KMiy!F""kĐN/A)DD8oxYFPS.q-7н..1 DDJvsٔ!"ɡjeǜy)Y!m4Ov!уK͝>+?sj͛vBY.+ ݘLox!>^ND_ 7aD@OM^sWZ+~}-HQosoYFz"\7#P7*kk?_K)uuZ6ðQzyaO "Zή,k#֮-y|vӗ=ډDD$oywSv_ey{]zChŦ[V~R3Dު1J{U m5g}^L,"9EE H;득RyK\d5Yg)=/jz`ML#8~=#S-gcDDlzo?/LP6 n{#eآ˦uz3+ytSuVm:FUWjyh/+cD$}jEjz}ekKF Du2FD}aʹT"R{%7 HGm{49خ.fM =G-g[x{f*y/%eRuC_NG._D#f|Ue-~6D~oKNO%|=5lr/??GӋ;g%$\zs!#*/N5Cg;{VWz>:}4yſ̢s~I@!I4rQ DDj;&M0WO6h1.[(}s~cui)z!kifG:EKLQGFDD$. >lQ=?|s/ˇh`mTvv*ё׽'ZiۑziEz"D@V?cVbOp3[<+Ǒ^~8Re2Z.G42 )[yDjϐA\Ѥ/*I)䮙N;(7H{Ѩ<"cOC>Ѡ (R""qc"C-];t\1+2]DD,b1s0f$7a.)%࿱t]bLglj_ 4B`pV{'HRSURDDڏOR?z 'S4-Nd 5#%Kp *tl:׺Is\70i5B@t$#"S^Q""QևݽoHyۏҠD(HD҉t!+b)=\4 tCQz)?*vDF!pW@Ԧxs>b)Xq)n""@,PDDm6fc1 7wC-z!iDb! wu<౬*+<VmvxHfuٺ)W7RW_xz}]~)בQ6y҅#h>*$E-1L&"-D4"қ' \5,1F .uDFF)/S$9Hd1 $ Y?u>9stmYsES#m4>׾79}cdTL$J?ps^@^Tʹ㲊'xr!9Zc{+6Þ\$N׷;&CrX~{D3UȽ<>Ǭ_vE{k=,%~@V(CDz=Č$Ko "/uO P*c)"*@DX(@4=Z#)l~ig3GL׽їVg\;[bt~Yӱvk[Tgp )=l"L?kV\w ";b/4c"|RZ5,"ۢ1UDJUFT"NKOgQtEp:TnLKm.ED;tV u6Q>'F$Dޮɂ{m8RhPY&jl UGD"Bt D|Tv}#~]yt#咭ؾWzֺumuV*Yåj?ڈl@."m<DD.)""JQC~V̪C4!#"h$kJ#(0N!j J3);4TZT3-kg q4HBt$j9ZGMlDVf| 9)067ȡIV X|r!9PETH@qtI"uX&߼ED.9s888i%"ߛoā ٕn+bUuGv(wRH됈&"j'B<"j'聭~@DD."b3>DgM?.kw~绫NWUjz\[d4Z?NTv6 `+LwўiU:"UjyquyDR[>L35 fnlQ"=remxFj}hkv0"_m:Q[]Gdˉh31}2LS?zcx9~apU\**N"ؤwF]:]f7,Y"S6պqMi&>9:vs-TQcdFW9BƯ>Y2xSo[O}kΐbfvM&eXveWPg3vEW'үa:QdYuT6d'2vghP1kāYFrH&A4o&9h˚s՚÷cdxqjj0=SSLƨ3}Ĩ3'X(sgdڐo9Tj52QTl\ DTR|=μGk: *ih:? [ި)Co,eP^UĈ|/ߣx DD{gxYger[N91@ɘrH=zxGU.r杦)!x҆Ԕ N;tܾ#͚^̨kK Xlm*}"} 9HiK* 99Cr4LDn,)g=65-u_qHum-+:=>[:UaW]9F7JSfNkq)* fJČR,DDL:.b&R[lDD|3QG3e4&Zӑ6X5QjV͵O K`dmVәOckig~߂*%5Ytuѷ[S 詽5H$"R5ޤ/E]I+2[d"+n}[+vzn![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![@n![m ~;(%tEXtdate:create2022-08-24T14:20:45+00:00}%tEXtdate:modify2022-08-24T14:20:45+00:00w(tEXtdate:timestamp2022-08-24T14:21:07+00:00"EtEXtSoftwarewww.inkscape.org<IENDB`flask-limiter-3.12/doc/source/_static/logo.png000066400000000000000000001352361476516161100213650ustar00rootroot00000000000000PNG  IHDREpgAMA a cHRMz&u0`:pQ<bKGD̿ pHYs+tIME ,!vIDATxexG׆oIff3 )333s2Wff03:'c;vLv۱J۽{>s9```````````````````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` LA&tI!SK !f``AbDtMr#0վCt-N&)'jcRJ`L"NDKwbdQA DI 4PJ[hxb]9= ze~&'1 " |;-L<=znIP{X{aiXlR3a!ar= 'A88znmxC~|+\k̠օ]#K6gPS 냝noϞ(R!~d{//*jf!z6bCx!Be &E''ԍع#ڔWl )0{]Y?*<*nsC%rl5#QPS_rXhTSb}f윌$u54wk[S\SJ 4&ik L޳!6m*M]JȝLe.WB(^p1t萡`ϝc({5dmgq͡& b˃+B"5loYVԫ40Ac?{^SXqоrՕ QuQ -1( . *)=X|g󶠞5[J𡹹Mo\7&Vyڟz6i1Mc²Ԟ̍ N՝9M\/b18Ѧu|pSGZb TYVTYWS]WX\h/0ώX6buKҚ'-O{)YflItB?R{֞qAnoFbBgsaxjsJ KNZtYsBВ̚Kw7G77>~`\7s XhSm_zŴyy[yȘ]KFMmҹSwU#B Q6ߝ&-O *Xf2%;'=u+.DhΓC"Be8ׁ:0xźK me74p90wܘKwV€[?XX |i]Ɇ‚GWM O@1mBY\ֶ0E&yD 30&7r7G\{u#DО%ۓji?6O8_Xڐ@@ԾRubU}6NN}gLbd+/>b+"z}>ܯZ>>.XNbޥuUjzX'+{ʇkrO}KL~e c8+])2 nѵU‘5%}#?1GOi3n7hǰLz_5MNƮFeذ5{J{ [d3dAwX5i` PҖWWNԶ>L\_v d% @!Tb"((_~c,hM-wq/bӯvo`LMH_.JRECvR亞Cam 0͈~/ie;vvP Dу1O)JKi`"p![,2?4ow^]5,$ d.ou:a1@$&. pC~;#*DJlIK-!&ԮC+f.,4< ;ص, laJBF&9Lwܔ_jjc7*ShSX_mEQRȝz"! 3dN`FXğ05`9@e\}d'EE;L;g>,H4$)r|Id$K@A@{&DԮ tf֩Xic{! 1 vMWF3Ɍj˭l"]gn&zI8v̌f*1?vbR^ !m !f``F.MT`B$*EBk```````" 0>h;bvavh-F Dn6-T s! :c,v>ADtw !f``Б"MAwHqd``Б+y|?C9UM !f``Б!aS,jM i3,bbLMtm:E[*#1Դ|Ov*|M! :RAvm UCtd~~y/ئ :b9 IJ|+YݮT c1Ԛ[_-$bQ1ƪp)L4A{|=1k\&h'Ovn ~a c(NhX H'8RJXo|. A*dn_w{cuTd#|a'8 &d>;ۍ[3Wh̐=XaJp!@BڔlfnB ZmX+RM%S wGm9 :#O0uEecXw#f)a(.:dK$}l*2JVىL"Db't4L4(a b&ē@qN= )`;lfK5\)pɉ뢌Oq܍)Lj]W[s<ϊ~T%cXΟ]&.E&?** <#4xQmt$xN`DbR y,`|7ZJNK=k}8Ѭq;iеp,n/Z+bZ^~D~t Zv< xE?R+[ݱBzS V0^Mz4i&]+BQfSO8[H%0N> 1T{#J͕[4a (oZ4Ed\&ߤg/lh-]yfC3LmJ9o)WtBFE.)rRwFpRk198Sl7X j6]SD+*X ߌS#L YʉmX~ P"+d=%wXJO(f#+IJq|Mkx%j~ȯ8K|JܡYgy[\N_wՠY,u!䥪OD*6,b]qIĪ)*L2X߁2~yD-f"©Z(bܳ?4kY( B>[yi~/.)H&BgF !fpg&@6 }E{ȯTUe !ֵYq/' !֑p#RmS'Py_&>P :w/C>|VLt*Yȕ|r**K&] +\"*jr$W 5T uW[}L +6K.}jxTWb&xASAբ6fWt%ٸR6kiL6&vRG-QMD#$ iP9O& V~.kt1Oie!w6}RLdHBOH#Mk>sC5ׄWN\>껁v~FV3VY-@fj\|F(K)4(gd +^7XDLbba`r*)yX>qnvOjuzi ͻ*m] JOV4$ ڠ&őkeq/Kkby1uxr' uJ9Sa̐X;g,oOʠF?IZryx%LL m|>ΕJ\C( ]U-FJQtȸFmQksa +8\^\ d KL A6Qš\8 %;V c$&(V8GvW"P jlG_w?NV.$k5JM]=zY!n'Ͷ7WȥNWi!ĺbE=@VbRGs5Y(f+J!5D+>b;0gKyJz8_~>~Pmކ[~jC.UQ] }0b]&3Ee5,ױyI簵2'<-1\:H"_;؁!m۹p2/䲚8gO2E#RI\N2aT$̿rPBCJt q3ͤ1v^Dna! ڢ"69߮ /O]Y!f8w 15ٞ|,k8Kކ H&q)aP  2o( IJ9\Fcıl/gjix0DwlB6p!|3"$⛬lCGBC>97=oY" k۪X;=۬Sbd[uCdf '#EƲBXG#yc5=~j)rF{;d-0]SFY),u Zl%xhTSםnnSbF@&E-^1˹_+ƴar,>Gs|@~}zl*פɆ%tu8J>'6pV{ȖoʃY~pp U4W r*WxרL,v BcMB̠Ͱ<q !R89Ts?5|H)Ab"^f:Tr>=mLVUnE7ȭL^$h` k=([*nif$Q@=a6?^W~>Mmt 1~۹X&az+{~V 180XBLVu>;`s ?:R(MH>bhIP>ԚTknK4PlΙ8_uݟ4ƨ {ֆ,Ji WQ5Q75-&)&q;kOv,b˻`X\AֈPA[~ae ~ym"URbZZ|ꄘ]ɠ{3[8Bk6:^J1Cu !:q.@Eawl ./(61u % )qv(ejʓ,b1u 'r!<+b1>(MH1pjǯLK67zۧ:PؐʡC]1<1: yQȇ|ܭ"}'pE)ѽas3A?H%ok)|ֲkuMc1&9=fͣ֋_w_5eKvWbue7чl`sX#j@b]W҃Ƅ&*|wIL΋ ֲuldGc)ݛF#GԬM4'7Ą-Fgd4U(ɽM/; 1 ?Q TQХ# "'@BRrӡ|,01\ e5YMWύ?H""`w4R.~"b}!6ØPN629謘88ND$V9V_>R7to忇,vKBB--TtÖM鑳jĺb,C"$vۓs|!q & H Ϲԭ'^(ӹUQ~D(ڳe|` yiGEaQ{ OdDN!&)\ r ?k|NVa"~#YC|DDŁυUTl)U/fʥ<ʱL#KM(_b_wB^q(r80q,[p dO(@NT1X&H|LJ,P&(B,AW\,ҹr}/ ^'Yty,gU)ݛ,Ng#Eb{L@xv=TuWva_m>.!}-Xvvs(~AE;s9g8jCI?DA ]4n Әha,g+XVGq9g(6|@\L+f8Bso+RK3tI1J+,-~׽E'te!BIxTz"m\#\@dPluM G"?!V 'C1~^ˑ0q'O9qIsa')M' l]Jypwuo<̱*VA,СQ;x LPӆ NR&<(0.OyXm'3بq-mGb4;?uOr2SE,p*ʏTeqH FJ?-QQ^fvg NnW$Z<y4?\>enVuU]/x4an$nnL<<2P~-Id:t}Cُ!IΉ{@ ?p:yBIΕ\l1yR3<*MZur Mcu}yN1?g\-\Kׇ҆n d\*qsy!z+*Kb7&2Ule+9,c3x;8$L:_+nS;q+B2M3ų.0WHütvT^?7qh{lG0ϰAh{|B 1K!:]&cԟ,(ZĄI7NEkH\7jsOyMw"V<{oR伈g [( !~d kDM5o\d,>#mIqk qn 1LM\q> +1yF;}EELb5A-&'s-e>(MMve!'ݍ* p>8|22.*m]b(C|3VC(qu/W 6ȹb۵3igHpn>6#xNfp Y r#%ubdY!v6ƃqNq't/L˃jMڲJ*i&IIF9_ۄ%x͉>#BUܪz4 qZ9Erǽ%[7Mt^d:ds9!VHm 爸ʨ B>, 1=Up|E[˸[]Ca0W<ȱZ?q ͜`)2YFNq|47C囚|DnK>eNIkVֱI_ QcHSSRZR(etbת>5.}~)Ճ2\jS FGr)ڞ7S_Yɪ6a(fCN2T99m* .;{<ʺ:*y?cz7J2oD"a31PK?tcf ޝSV77<{Ԟ 61\%Bϭw%*ۺ}18ORÁdkK^+T\v(T˵Wѫ>  x_:1t.Uu|pS2 ^ŋZGD=FϢJvN^Tdt."f%&dshT 8kfDs>r(~bj<(I$7)ݛ S[P6ȋɰCSFRZUt p̜ lT2Pvn'M^#TJʒAŗ'GX钣>|-w4tg7 -1[rͽJd]^/{3{ua=Hɕr ء \DJf.8B59͟!+^f8CZՋFՔO('h~QqͲTB:Vl0SZ_.:럜F2QpgKТ.$\p7^]1Yġ|V(jyjjom\5ج9tyPRk.&t Pd'&R@PSPD:&NiZD_1o(b8;m%]p*|Y9a1Gɋ16:y;i+й_=#:*g*_o+3H*?I_P#o:0!{>oA,Uk+nG/"oDOTeb] =}4dr h-CI_wqKvk9]>)p5-7wA+1Uθr '|2N=LIۏ\=:9cI#'%;}ll+[X9ԖTn) Z7v@%/:G`w !hn˯,[8YL('%NGHq #=JsU rxG~vߵ>Kxq[H/ѳVk8MS .x=%' |e!ȐOs=A\c.U!<&@r-0"39d9PK"8H#IՏ#T٪Y1% v!NjBe$t#ϰ$P/^ۿ0?l]+p-Acƚ'&$Lu{a(iz6Wc-=;}j{E <&^kcϵe!v[{KxJ[A'LV02t-VC(]typoɑz>ljUO= ;O}IB.0.I9ߏ;vWM>' T-6zFY#U.y֝[#yF fƒ5J!(<0t]f%E6C\م@3a+6%kܬnafVF/!pM.-7uVq/fJ7s4*ߛ+IG 'wuȡ<<Ǐ-j= {LハJMb_bO]dǪYoA +څfhW \ת]풬djLjUNH7kAU)]I1s; 7t><ڵR㼎(Q(4Ir|_.?V[1uv %jz.qkԩVE'8#@f5n]{Hw&iWq{Y'õ̤T%ۮQ'XTbƆ%wEm"Mgw3TPŶTۧܬrN۱q5h)8wU= 0`Fnu;!^yJ59r]SadvַgoYdUȳJySJeCAsou!!xϙ߰f4A.׼Ϸ.}-*p`kS)ӅXn.T/ź=+3_d7q,ɅSLm&o o<$if:rҗRg_/9I UCVqEM@iAef;6W@孿¦O(% Kx&&YS_*ļimQaG?𻋵H{jxMNwRXXwgx",bzΨIDRp v\ϐqmN/"aD G1M0>a yLHTl)RCy潖BꭖaA KDoZ|iYRlc[ڭ͗pVRy oZzloSŶtkCDlr1UT9-+ױ.egD􎯸UaH,rVf w Jl}5U;FN_oԼ| X~c:$8{0lNk[ߚ|%qV_IpUkտKJN.IAON-jBmL%ϖԼ){jkwUA\!oAyMd*?wg dtxn~ []MF9CK+Tqmm+lOuVƐw<HE|rWzNcSr)<5?j^Pg3Sn_~p8)i.t* 5?uyڊdAo>hI@ؠeZS#ֱ, xqJg??6όC/{)< ݩ<׳P#i£^n}0BLiJTԢ,zP:F< 1Ofl{1e9KnwG͗ i(+jr˥:y}ŧz|}h)gʡϿ)GcرWXK6 2[R.Ӭ)@S}%Aa4]kYơlsozMBl-t'{uiVFYw)"VsN vVUWAw<,f5)&""(*w Υ[B9yaVy<ת˨aM&zaA: L~(+*,\ s^D{F}P}oRSN6H/|%f ~U0(Zy o";Uדa#:$?Nsl5L`W4WkʭRJL{ͅEEeqI il2[qdoMBUǿYyii!CCYwx8b,t޼Fjbfea M?N*v-! N5+R,P+5ԣ?!]T8D/"[ma/23</#w1,cCFp5YÕ)73At \im}P!T/-M|j3&`n֗UV65ww \gdQΚ#uSaBΣ9齧RnwRC<7RUI 1iׂb4M$e@-49J DIQ rjPt?w9.l[kB,#Ne2/*ʹΕr)'&0A:opSFI/:'?Š?4v Ee ޑ"oJW|]ikRffu`ڀUu쬉in۱e/Tim{啦|^{/E{M,bf/W-%+t7( !aqYǹnmRLaZ]x"9)W$>ΩpHWq׏"C9L~HZ"Ծ|ΗyϽiL!Ntm˃ś B@CøS[Vԧľ>֔\Zą:"꾊;xfQӉ2[߰w%Td59,9*ו7t?%4m(VjgIdOsI%,ڹT\B{ 1Zߚ&v}7-bJm/Px9\VfBLZTSB̝+9\/\w=k!x0F W^z1.#ծ*F|y+*ԚU"2+oٯ͖xWRl [-;%f:*wިB ]8pUD Wdw:짆ѯcEGoOҋB,5#[ҡiwY;NEk65 o |IOdbԤ8&2.+wjBS=ݯB;]lc+X2bzW*W]|Nv-!?q,-QNTx5+VG`驕ZH R{p͉ ʘ3odWqc-rr.bGJO#Kd Xd7(˶qtv/tk+#&ݨYOZ %omkb2|ĜELkF95z f{)AJ 15dS"'Ξb3)OKܫDb983t4 He}=L3xIX#o}P R>2h\Ыͮ_\,5W]d?jo{{KTb.mĂGΟvTܽ6'im!_˫G ?!“h--yqjR!M(uTگ=:ȥ]lp@q^ |:ᛩɣyMR.&W532[=H3r*sm"np{CSd#K!ň~4DWӷxyvTnדS+#-xPl^ {I[3sO#%}wUf鷛$UkCɾZkr vމȷ&~7.(?tԦZk;`ƮAg7Zʀ_z2r`^x%u&ݏX]?R9;z<הg&}R޷?+0Y._ɓ 2L|GlcPF\^b2=%.gj?i"ȢSS%uG.O4W?{$`;78%/+(u V݇*O+I 7 /`21M|ʻcL(L {,S|a۶ei꠰f_ k b6&%f}PcX AW1Mej֫gU^yfj;$-čέG2OD*ױZUNz둰%.G -G(OŊd>' _B77 OSuXW'+V<ՑGP_d|%oxad!&mK8D8G\"&p5JuTB=KE7lӱ$ړ&Q/ؓ,E#RBaӀz~eNWbna]E!cվ)<֝#{F3(='E?'D Q6bFJ_^Uӛ\smwq$/վc+9XGc5غ) M{wi$~lOd_@Y#_|'C,լR7|_uA-SޝTw']25Sk^ߝ?-v"Y\nt>SU+52"Y9!<#x!taٲh[M\Ѿ R! ū\2hIP]{tg٩ ճj/䇿g^gwߥpwqg+ع9aIC-,'.tQLpkC]t?mz.њg| ⟿f@LԶC[z͡V/JW0S`mo)~M1}NB̝x2!Ĝˇ;[>}@9х&cSXf:&?0|bvXפN/1XdҷcB绉s9sc@˷M|sK6L? v2zu i{ᲰKjF5{O{ i@euD @ dl Ĭ7.(sY%um_kBS0;K 06:F6g߈>xNwNMS;aG$}*+ *8y O+57_:!CKQ '%[sw$6q-inYe'm.a*fS,|wեl2Gܽ&Mb' "4<<餌ؠi >w﵉sǢa69Yb^)L[] ۻVdwbj)[aSzjJIe!]8?{wDg1ŚN|{-xf% .{5,Q *"­PO3-_>MIYV2I͉ɳ̎nZol(b?k~/uΌ 89i[G[\ۦVkp}+Gז5HiKg?VK@s"whwYޅxf 浱%&㹓]{/7CBL_FW) 1-bOs币"Ll>}!v8f%w\ݐS R]qə.NVd[>9RR)g,j 9ト֜7k܂ukY=V:!MΆ_TU7S됬_f5혵gmrͦAPywwM=asNqBld"Ҽnߜk 1?]oBRr֋f8kO2=rl< !*޵)ͣmI mz53]3>;5Ɠ6˵\FTRKuI=OO~.Nt.pG\놧ۇ7M:+ߢkAeSq/'ۯ(.v̹&ZܔF>-IѡP]b4msG+wޥLsԽ7?$?fCo?SIOS#rvy|`=z*/i 0wlKo΄ysR^p!FVꌺ'o$fd&s߳~/y翧65\U:7 1%x-bqP]X隺b] I %nRZ{X E./jt>^ k\6o1Х>TK{K޴/_?.*`N-7šy{y3f-_ۜ373Q\ya: 7{\A S8ۘNGљQ.(/Zel{h~Θk<,b¬kLMkIp ҳm:fdž{"/w&_1A@NT#T|?7ǾndWZ`JC|"gʈkVw}klL]Ve­1pÝؿ"x uuۢ ܥuY޳(s묚^PkSOW J!ۛk]F[gXTxNXF'8Qu%@.G+NPQUSGԼUT b'Mȳ>Xajbc,7YΙ}nH@{K}u"TȂ?)>6y?vTC w`M}74 P%jXikYH'}7>Bs` MWV N_\~ '̂3L~9[?6iSY`{pBNZYs܁Ϻ~ܫ+/fJ/y~J'Xױ+Ĕހ>X_][ب4!{z'`)1=ڵ\*ncP8u"廫QkknlԆ %,MözI˧ϮܙZ,`єWдvuol[ёe#+XYs_pjc{%bIX:|5l)qsޜ/ocskyWo |}q嬍Y#7P|'_QPY9lݐ&K~y៪23fuj.9!^>{%{OxޥJzyXs:]#Pbi ;3˜s|UjGY. q${]}7sޕN'\tVC;gwdy#ӹ:0qON߆_Xb/y~Aa tЛ{p xe'ɖW>;dE_]wP{&k;K걵㚣G>>mņ\ɲ*7weٽNJ2ٓ7혴+ub+#~˱W>3=9SnL1ÖMr% jeJߚ?۬lrgUO)s_RMJ/U^Jg{Sz=N¢ ūBL=:,fVW:ۄG1S1(m͑ZyX/g3WcĐ+w&Y[{"ױ-ĊI_!t0!M /[ #Dxjjki1\Akkf̜A #8^UEj$)开H;S"2&- ð߹,x{HQc%gPe gV˚Meiʇ~3 @\?alKM/y)e5W UR,SSջQɵlN\JQ]^ϝ2ppz MAحSgY?. yeuѱWr^eg~Rm!KA/VhC^"t̿7J.b ;y,u+EXv 1=|~.W Tס3B*^]&օ,bbm(]^pᆬiPz&z v?;be5 5 mJӕVx1*O@A6dbրO.g,p"ל:9!5`o)qCI .̱?=?=QkV?I˶Q}넱s3tY#C?;qX g}mK2 ~oG*+c{M5z ^rqʜm2u i;cAS oPL>N*xwNl(MsM-Ez䃄ҋ}!B'ĔZĔumEkhW?ELOw7qqtu?%\zĭu,+խӶR3T IF(hw[ߜu{vTڠK[|!kYwg6~匩hߚ9ၩl9Ʃ}6_3q=g̍jY7z༒F{׺_2MEc{wvj,*+7n۲$7d/&v2t支&TWU<֟Uj zvJ;\v]}0|+:붺PUWD)p_} ˦:F6ZPz]0 )w(y3CYyMhbj;=jZQ$J:ƩHC,"tB%Nb)T{ib1qW)ka1?/g2< ^1im:]w_t cF6 P&-<& '&.Qa,Kp`xe=&LjF]48%}zkfQqҐQ,/OV7}oGuzD`joYXV39%0Pty@>5]3Q}uj5l5B"HsZ%?:7g%ݏY5OLȳqHy&ۧs>-6I$ux,Ct)ldhj;z[[la^K X8QC<ԋp>:-Zd'ň"^)6](b'pǤHxCQ'br1麶 PNȷ.=;'BWŚ%\.EՖ)ZjKhk\fy^rBWEY8^N1)&8}4SU5W'vIܐv@pY-M~yJ `fyJN[Q/FRUܝۜwKX|;4 8b!o۞6,,<􏭢uiDB@˪n.5+Y}auϘSW~zGd2!nސis[D``G@S*{3B!qpm_?UrWUE1 n 3CTuӶXڥʅD?r ĹlbmjQw"Z\[7:g"35i%qhv4z|s\8 sxHi-MeQꊉ4\\OutqMq+ݾ*$kBe=cN ~6'DJKN}[ݍQ%o Ο¬30U: &"h"Gâ,'gc/h~_I1KkJY o0}eŦ3 StNֺ5^[]=_Kaamu >fW!8(t*W npj 1TE]UL*ʓkԞR&acnns&︕R,"n%WfXz'&rUs'yn!pKm(e|N&uZv~|y0918r Wzqb;n }!ZX: 3jpsBO^hs|ICkU g^w(4X Nы\I~IE?=j|^D9f'(t6TmUמE-cu<:!X#jo\) sULl6 9 ;.2 z~Q5wq}zޚOë*[f.<*~Wl=qrXkNYwfӕD2/sgm0E,7$C9lW uǺޥ`"ōqs!6G"[RLf>!6*Q'q$k˓V GFL6R!ozߖ*[= s&!FxCK(MǴSS(_seăGRlŪXT8/pK_ri W_RCxE1Luyʲ,i#öʠ%$TM(ڕ0g9kŦ{ci]7+s\% dʿeSMF.^=q̣0iM8( qO2rF k8"*#4M:3_]a 2va ϥ6am$ weN4WiHڴ* (gtƒ_Tn3A297~h()"6W`5u,fKK+:;1&lU>d-!/*8*Yǣi%ǹű H^QğP~Rߊ4U<,|*ZQZpŐI590h_ yP˫l' ,h [T`r9Ϲ[>M8'~ΘM A隹^_eJzfH I+^ANvZcRܭ`ס\ƻ{C=j^TDMR0BKOudKoV0Y"qG[.Zў2r6NlFv@U/ r;f p-QڣRG␅<*v bɧCa9I״%]ms7/*/('2M&W_|rHۖ/8~F}lxgE+žXzI\HTPqo0$Vc26hGF#;/~r!ծ8 <$wZT HE[+EM+;o=bdܪ1`&jYc貓G{*BI(>{JiY|I G*@p5_{=^ Wq2 <}7yp2 D{,zxǁH_𝆵VWJ3N%)dFɥnT_#UDT^~QNa<M/ͥrdS.0Uq\)?Ҍ*WpxM 1|YNt~M/xG%97_#)cF^|m'qwaNJ=]a Wk]j͡Nn=4!EZ36ܹD_9yA XYn B'5Y>0dX0wE q6AűE}q~H;Opwg ?\Y_-ejrrj-9^j39sBTڸ} ׅiqi{U(0"䄃fqtW7칅ѓ۹؝Z>Sg?q¸/Q,o_4 7 JS7#nQb7Ճ;R RUxV9%׸Y!|<h15L6Jmj:K|r^E+-1ה[4wX.-fe܎fN9 ;2.WΩ)K 8LX-dq]ś ΢hlyʃ6\J+RUӪfK dz\XCBJMYs ~&ڬ&`ğ!%kZ6=SG5\ u*'oM9 (y#&mBk8eq[(,6?ezs60ΑZUG& ͚W,Ua&RE@nn͞bţ0X1Gnl*ͼTh~/Q>}Aַ>٫m/'$'ˁ>>>u^J==BW90#\zvENӳ:y?ZbX%DQ[^p( N#%r-^ҺR cnu)zS cu ]kYsu"XZj 6'NFVuy@s*͛kH(]^0[fgr~Jyw5]4m .:ៀڸu͵(dC9_ŐԥP -W09_hʿ)EKؐ@.4EYN  䳓upe|Kk|R# jz2C[Fڙ.%9`IDAT3X KxXҠ܎|^m>u8\ D1ztQ3zh\q+kd /2eN [d)VN~PtꈖrKܬ$9JTlc|)\) hʇ霛pmKTZ`H>㠯^K}D>ae za.h2=|&[0>KbWG3OSP,T4ڜ 0I$w[sgL$ADhukTRE[ΥRk\p FDyYRh>nL8Beg?.0r,DBob]V ND 4z٨ * >@2@ )Y2ȇZ.=}_7dBe,9`8aOzl,#9`OsdU߾lq`њO]UBxEwϞ]e=RΚ)fkvk'dI$ ̭28"lVX Sb0p\|aRUq &~e|i6ϹyE%L&'1=먧RwG}3CO$I}6ZA0">v3u "80(,=q Ǔ6wmj~!D@TQI-[YMٟS9:Ej|O p"n(Kɏ۽՛JVYK)e\S XxnJ#Ə}Dab* 4ruxNG\g.# " A f8C%4?ut09]qeԪ/ck+t_6n.s &;:[;I 6 "zrMmǙ/`}':m~F?Dp18S?ҭb7p4#l[SJ֧8hNsJVb-K.$,6]mYh~완r:S70Yvv.̹ vS1#HǙb)2=Qk M86G=>zCb![J !f_yl9Wdү7o(S60CCt~̦<5.`MuoJÕv\)"b6,1(D96vjyȈ΄Xm#l$c@}{ ̹ڳap2RL3'[rDm܊Snv锍 ܥ 0|ccj?EQd\aͮ`7drHWܛ}[WSz&MNJ̊4 !vk%o-yޗe;z̘v^ǀi O|e`z,6}kI&7l2Kum)ጁb@ _B?D`ӷEMgZ77~q ꄐMmk\ t-qKoWSHl3g,hwAAK  {wUڽJrJϭ+e ܳi +ǹYiwqEjsYvճa6Fd)7Kc2p;Ӕx]g ;{30Mܔ 93,ox G| -5ozߓ";vɆ+Ԧ S؝E)` -[},Ct.D7 />bar١l~e/S7@kʠاgbΤw8gE l.wnWߧ@aS(&8@sPlےz–,rb b3aVTT[[o"o-^y7z&ǝ#C.7rfxZ6|vgh#Cj1T"=(Wgh򉕯5~郍ڃBou#z+?/e? 3[>:/agq>!摩IeL[*놎p0KؙT1XӠʞ!'n1Ӽ/-Xmr;T[JmnHYͬuќ`ԳBfu>ݮ_*@zF|Չ39b5ꟁ1-LOZ_Vy? ?Ӷ$Yc0Fr=!/+غa{p^AVж\=B-GgIOMnK'SbH?7q̔Q٦쩧gbM=>cxaei˳5Mro[wYSL[?Wy6Iմ&۸{Y}ӅV|OoQ7ɼYXDUq%k-^v9pŵ/_cs?i+n'BRv{Ȯ~u}Y6ZM!#5-'w6*Įʮ+-ūojө^bh>W6ޕك,yn`Zl5~[!-Ms{dHa4%k/]xUfPbcOs%oSsFi=dI뎭Ejv3Ӵ*qd3$6>noњ0C0rۿm?!wIR~^z@ȾER;~=nߋƞYHk\^oݯ[)jM6nZUW^m7miωrZu94 f~1k =|" [c޽{Hs f;ze/f~T"#M'XڱaQL!OA!Ęsr5ۯd5swmzqe0;s̕[NBTӃ0L?^s<wg1v{)3eY.3hiˬb>q^ALɦDSuM/Mz5oڼcqk.#Vv&2 Lq10RTRHs~p2]< Xm /lA$L3 2yR+}f/bS.36~1F覇< ,64`t2{W:Eڦ 3/_؜vLӏXb _髎}Oy#f$ Si[UJOn?tbg~F#+[#SkmCL[?sdi&vnzfJ&ѡo8u_dzη4z e~>|έpWLa 㒅~'ZX2wqƚߜQZ_5YgߧS_B s՝'g#>b=C{t;q)FuIwjVLF_ιWPІ6isDž<:cdQl_o|ަn^p5ݸ/~݆G0| :++qpƽkaU,b{02ٯ zfHO2JЀ4Ӆ Fqf:m-cdn^ذx? ֦M+*H0L&a9[[kj7|2m әq\aκ6PLGf.62 &i6G}yy ;=<sg/ԝf~;Y[z.ًHLAy1Kf3uuIi99iڗlg|IfJ)~EwH6OW>%r)3 R$0 ^":yXfVViaQmK6ïTP#T0Slarfgւ67+;vԛ>kh:I|ﰨ373]qYG6e'236 fnn$bm*#żt4כV` 8}122L9_͹>omi)}%Hrg|TӖvo/4 j""<*oiŇڇ&ۍ#'؊`՘ Tu;3k[ s&ߒl[ ;O0z)$x3}T~e-稶2z 24ez~#MϴEprVLcOZee<0~ygwM6p.ȒqAA[(VLH(/oZ ɣᫌGkdB=45쒞eaU""]N36JIr]l1nϨ=uq@Ʀ6'P=Tip)-! &WՐe~grkإVEF[4ɉTA94P~ovC25OɨQ2J*DqXɭ@^_ݡ!.10o/v d]>CYl/\ϨzIet0#8FqKߨcI2]5G`1ْo. %.a6jny#jm OuZB}%}u8EPI6n,̥+pNV. 2;FMR| @ l^gnh2F=i\6ծIʽ"e壼E2q(~`FƑvЀUܩf+x*]dvA I)8bp~`5Tr9KXflPAa x<L@h.J$)v}<7A@8!AdHk(8bD3}IA DLx(2X {56b GsL;ٳ;N8n}i+FW )XQ >~`o(HIK-Nn; mg|D8aԼe-mӻgwy֒/fTgw(-xl@i e WN;͆NFSqYfڒ:vg׷ڔwD%VmdlKSA>]#UkbuUU2eP#zk9T(Y+eY8Sr&Q˦Lsg< ]DGKw_-Җv{(Ec%fZqj_^!DG{vП!}ΐ.(l<-.GF\:Tc!X wio4ہ N8rQRϤ9$[.wAG?,/}H,&Ȗe\L?0 1' J6Moqw4߁mFzw _iP kDQ>Rֶ=tɄ Vn\۹T֡3B5D ucSFFZYWz u:#͚腖[+KctM!RLRO<9o6 y{ө =A@z-s$XPmf{q#qVL=]1jc>bA]]P%=/s' 5[paO9jhr-S?D,mPRSiFM(&yl֞{onfAHC#Et sgnX1Mfv)x *[M*LzFu}e⍾'=S1 V/*86& x'@)alK{=&ĂJa AQk ĺe:hCBJi0U$P{iXb9%8qr"_9:evFTZ,O ;Ŕz=T:IN&P 9AqzfzWdK8Z-qVT#;jc}᠚dDdq 0 LF06Y<f{sޱ A<-!0}e_.f8՘N s>j5! bIC Y{t624eJ0R,6ty-@u)r9~0eG?u9r'b VԆ\|YX`ϥb*D&Vˉ b=6xp|PvTlQ)^;NXur!aAc{nL XK.p;ZmКjU T6w~|!76 [ͩpg'b0iu"8[eq5 95 UiLכgiq0:ɉѮIQ':BG)2`oUuGYд#;{Q|v"./BH°XW{IM%b:_f1;?;ByJP-ir[@aoѽY" q:+L^IL4{1<S];~N䚾y4CODo`l^FHr0F0#ṬWv8ާ-xWMXԿIu(PAMϰY5AWImŔ#$88POY}"ŚKUrGEZCU"0Eo&aI.W"i5()B5 7UQuo^^H5`g`,O]RQMamSotf b!$@k9{#b}R{Zd kdXU9spvKξhP#%Qr 2ڽ }ڲH>RbL`zPىjW5TqM)Jk?1E~'!wZ?gq_[ rGLu_u,9}dV^/[ER}/4 N8P7[ ɸxlq\W(},/˛TSeV䞋 ogi( Q #]kH:6'^[{ܰX{o[z6 MƼ]Бc{EѭB+|X_()J}:$o6^,"6,?ur}KĨ'LrjJ؂n#b^F]4Y\2LѪZ 93$䶠3 eqٞd)}.3쥧ۈ 6r( l{Nu`47ѓG/Xlw"]_DlbYb2s ^Ysp 4!جdcDqe+IĘ9\F1~'b Y&PxE|Ұ S|Iu1*4OEL77H6Cę,-KW,%ˎMC: T{-(#waەx\)RuiEM>r%݀BjA;є}K,I,EYL u ^Q'ROs`ݲ9@'iF\:k]oiW'% Pr"b-;N tl9LuS9 iצ$_UyY^MS-,I胈Uޯ_h k]EgR.U'Vߕ&>*=!bgoۮ^|.U"fK&#_'b l INd$b u^kUabEނ[R\*VT@iɋ*oS#"yRк0Z <{uY[)Qm_tc`FߞMnd"jqRcU|U, Ӳ9fbYb_Ѫ~ښ.ЦCb max%FFU\N}|HU;(+FS2!{W}N,Bų~"Y\+6ni'T3^S{E= /iɘyr5I88ж r6ݛ~L!KB?@k}fǯw(h Ff6F/ #o™9't뾆88P6ٜ˅|}0/7-8@G}y'߹lMɓ4CYpEQ+ʹN҉t>B؆'# .F222&*IڎhhE>L"J]$_u=%[G༌Em9%g_Բi!`Z(5^Q ̱<~X O1>IP2H$=/͟U6V'*!6_V.ѥb)2 5Tg%ҡ4=5\+G3mʨi RV)< B7I@yG"FF޴Jv_6~XbՑqjq)4Ft_OM1mGfdS(m a*Hq]B&J5L˫s#Mk6(b,V$eC f t1|N>rQ:!48`g8h^n  (.&"%i\x4/nXV`Kʩ dV\1Bj-G%GkLש .3%!']=f^YŒ^|E g3J\42J+͟/ĥzYpZzB-KuC5[r-[KOuFV DsCeuQWq#}4KnPzXfaR~J$ps0qra r8rUpp|!N~x@)ygaB\h8bNʠ̐)+[CS ؒhҟyl&i^CHh1xGWpXi}0Zչh'P., ¶JIkx L ;ÁDGjZ&p'Ћ䵢2৙_{<籥\7I:G?? #S\.׵B@%C+қ0wO'=;ԫT@\+S|[|Q_'{.t{.ȯ[ȅK,kP9$>N'ΔU&H/Ǜ2% L>5Yu*'֪IiBY( rY F(YO[\ʍ^r uz^S1%L)H<ĺ+֝8K: E˜i}ڔry܌܄ƞ#պ$-TtGj,72"RǏ ')eĔ}1'C->Qr)^'._3#D\jph'D32Y)I0n*NDp8,B0#oS>\x>Q-(1OMREϱK;]2 "fɗxA:.yFP¾z_Pa# ƒ#l("xAŅ}It9{R5Xa8!@pkrڂɅ0t#d0P"̪wzqf10SzSri}Q/?jwTz/#;M?A9r'b :Ӳ/IF MmUPmƟ-oKyt y0|z%Д W* hѣ l1~HzVC9HU "@kcd rŇcGwƒ$1U*XYrѐV>"⇦QN޸ 0vԡ"fa)/L;NլERNod5Y/? #&%6HȓjJGpkUM.jK#sFj:ayNŃNP/2"\exWεKS"p/]n\c/.=AV3 G޶>|P?M^}9m#OE\o)+s"s@< u6H7MU3.C e]xu`NDÍp;8G 2&vit;;ڕނ[çNܢOßT+NXx6 PͿbHx;2C- [<\$t!P1HJPL ЋZCE*FYߠhN}sD#g!v_?$9%%ioyݒw-qZƢ UJ 415分I]m;Z Zywa-x)$Qou/9%:roTvW4&G/QdM{k HAYܻ+ӣHbZjv7Thh ;^Zo?&{:#?$AFwL}N? CPbK%V,mhF+ZЪmhC+da~=R-vAT1>O"XH BS-b?8A5~'8%bA0ɇӇsp|[ M|~J8 qp| 10o#BtQRǁN88cTAs{]iCp̯_ǁN88ca2ۃ@ 1DW3Fఃۈqpppp'L,KLbrppd%b =?0,e 888l1BRHK@h۱5)ǷvKW^%tEXtdate:create2021-12-20T08:44:33-08:00y[%tEXtdate:modify2021-12-20T08:44:33-08:00tEXtSoftwarewww.inkscape.org<IENDB`flask-limiter-3.12/doc/source/_static/logo.svg000066400000000000000000005464041476516161100214030ustar00rootroot00000000000000 image/svg+xml Flask Limiter flask, limited drops at a time flask-limiter-3.12/doc/source/_static/tap-icon.ico000066400000000000000000000354231476516161100221220ustar00rootroot00000000000000 :PNG  IHDR{`:IDATxw`[W[Nl$d BPچ(@B(e eSZ ) PBg<=)ɖ}?$۲㕁?{s?~Ǐ?~Ǐ?~Ǐ?~Ǐ?~Ǐ?/UXUX%(8.G_jj9`V8"iX8X|WPv$LgH<$#w'zЍӔw@$t !55]z/4#?d3$C<P]mhG ~@Yw;XA:v!@ eemK!l!BA[)^~@+u}kyY\qG~{; B|3ɤ<:&0 вzBƳK |(ٳ9MDdHR XpoJOB>OBro'#S<:Ta a+]2<$Ӕ8rbz@оk %rT@K3F<V4ƹxSުXtA("1 ~ǽ{6@GQ rHmM4 JLj<UxiUبkNK(@ʐ4UǴ9j?YD'將]WmejK _V%`z/Zi^vԣh٘*$Ƃ_e;\OؙX 0J3g'g%jk<{hT+AٳU_ȶ.\vcwU!l9@V2F9?/Ɓ|-w칶DFKǬBVU f~ϝ{o=b!Om$<O$9By#Q)jL4 ^Z7Z+,l,g6-,,2 yu/6pQ\ $+*S ۠+f-[dH66R]seCN^!%L:#6R5@c =ʚx"w;LxocO\h:^3wD"^&_m6ȉ|(V$zXQ^5Q|;] vlqj-z;Mhm:ۯXYV;'q7Zy_&y;y4纴ᏹR8PY$7ō'h@i ]\ ލQ25; NU9J=Sp,xlA1]Ej[(&@5_tްAWVFD,pG1/v5T(13aI^ qTD~WU|Zͫ.FtTuQz<-_Lk]&~MTo$gD3v G.I;8RP3,h~CY[ $^, aNg\4Q%́%̧#]mM@N-a .X sklq~&yHLR/Z:֑1_&hLKy>ߓz5R [OU*ay}:s 后 1ud˜8C;O^^sH%=/6ֵukDjs?S_ћfwu9Jc;wS L$OΛ 8EkZwO9sM BgB"WyXBH@uܺQZN3/'?K.8F8xUIP b嫃Xt>fsC:y5v nU&wQz?6TL>N-Hz7Tĕ)xYNWbi7eb3!'gۼ3f"3i #6sX{ ;}/Xc8`I#DZa=BuyE |R!XTBehaskDTΦ`3OSapp `16yvqyM)n/u^l U-A" `J<~ BZ\3S{P2l2N&yE˯:o YYaLf2/Y2/ c1ni/DYM12Q"Mxj1^E)5vxA!>G%L (aT`X6%t ¹#6#$'V5ՙ284>4g;g 0aRʲu` a8RA:(;i{]ZcPn ݪj旭;;¢pipl'HX,bso_plv5oG=|ݎ|af] 0{BBa=.ť7naduFC pq>B;0u}1]L#oq 0> >|̆ W{@5#\MY_ 9}`k{E Ńnipf0c$^b*`3_b D*f`"r u~/X-xV@w?6 b*y J N2i_~mg_|ׂ4zO:iLU;{BI8M 0bh7WCzSU&зq՟<S@G M3]a-$<33W*s{oh!a3Tfq !q Y>f22؂Q􍘌A?6ڇWgzpG^gWohc'.g݉ڂ>JA 8=, Ñv7jmif\*<>tyH.@?^A3ą&x3Ca0.D*b ; CZh܏b܆m~>f,<᪆zT\tOGޅR4 o T  BY &aHaՆ5yPS,t(rCkhu[,E*ЃQ3>|" p$Q9΢ @r9Eo, c'~^.D62PkpA0a%9 E@!VZ?Q82l=PsU\o s Ў8TBs .5؊xT‰6lXZ,sq8 h#wV܄wWH( 6~ 4p+*6c  Nq{;$1g^?C Bnb'5FwÁvOq.=\bXXxH}NkCorܚ5Ex=Rm!6^oSp " H$o] v@@{[oiZ7Ь`j'~Nh}AADt3{}6He2Џrж.ߡß}D|ɨYcZx*+ 8몞Q*U*kS9@2d  K$@k% w\{@^ mnц"\"v6t!IX0/)(g T'8`IpijB4ʥf}XbՂX/ǝO5UDta_;+nmeb(aȍ'F~Uك?MǤνH - Z]sU4yTND4}xPkɰlRҚa;EiBt аMa_LWb Hl:fݜ) Xq@˜"õOyS5+~Z&t+[cOmcB#8.MQwg_T\2מq 6w$ o)~1LcuZ}OFd_9G  it.z<#xOx}v;凑-K9%RWxo2Կ+)v$|ü#EFb!vҐ K판fVpkߪz]Ks_Qq4D+Qe-iiܕv;8MҔ؇=\l$0uUnzhK#eCZ.kf/ʍ~{Aӫ-\ R{ `e5 _T?ܸ!P?iY6Hη5?Սڀ +f # -2QeNA]AwgUwv]U<s~{+u!|CGf`>,8#m]Tߕ3[inMvQL!Myif"Jv Ԗט"¸e9 DDU\2;܂A7N9vb%[39^Tg+K28%ɺ;{wMP:Lbo%jPpZ}9Sj[&GFk\Kvvk\Q ZuM_N9=Շ@ ?8__S!8]/'"-*OapAmY{jkG28=+Pyoּs[rqzW9Ad% 溎VO)e9 @glD3ڻڃAӏ =!Moß(Tݛeop+ X,в%*' k -?囎P0V@8G!0je2WG_+jn[ !Ay;dU9:ʢĥu8`Um$$`w+^]+Rx_*so˭\MQ;5O:'dQ]8|~;7fݼ#efHz yѲ꿑Md?9ܕſ}#:O'"jz'b]񙼷GVg6sU7i:⺄ZuPj Ğ~d<񎜈(fZ^ʼXv CDQ]yqݽgsno9OU$Q+D&`#m@M%3׿-J^ 鸽թ?iɄsDiMӏ&z!zفrݕX,`:[' [&d~Fgŝ =D Hrr '1beQA./D_/HL8=:/c摳WITZ?]( 帾OdR*-KjJْʸc "Õ9 6`~Af y 7v!0BD+MwG=ϹFުhl XCikwby={hmI#efs>'l3vA3HaO)/RB5&:oظg":7M,״{?Ʋ^P,7 ;y?(t̋eD:It9kj>I E[*B uٹJ$1ۈ Py}&Ki҃`guw:;+2~_a6جyʋ*ޓ-ix]BnDEl6 9+Q]rP*D=s,\st\qkNķF =ID Ugo͵>yh_t:hEdubvVEMO* tT[^;? քqܻ6>ގ&( XKS}_F6QZDڲ#[9NU>~uW2ײa}2UDž}1d}.2q]g߯l| 2Kꯒ&ӪCR{snmchȾ$Վ<^:@x9!ȵ'Ԗ\% )U}l-d^T<zO&5>z.#$9^%Jj\_1E_ysnlw-\Guе˥#Z2FCHwİ+}4Q̿dz۾+OXY`Dsq9D5-`Aq]I+Y,D9JY&gG3@7ޘJ*|% tXDvC Kоd6גiKQyo?gʫPƷs#JVZR8=;󷕃b{\5vmߒu@W})J]8Ȯ,WV.x#[S2ץԬۮ*/wg9 tR9 l}x̯Ct*?@~+hUSw7 DkLPnYd%"z=gЈ{Xx2&;3ʭNԬ2vPT8: }#X Ґ.rv-Ud=ړ輴txK'"AOrկ_ҤY'F4HdJmRFDT{'|:ewliʫ7#ay\WO1oUhJWHFO)HFB!'AӍq{^RHۑ9="{h}+J:uZn K嗗\TƼG2r5Ο*>t[ޜh'Q.뫒:,fbS!ПwY@KrȵSa;OMcNZH6(õ*Ee/~&KiKVuڙTjM6ս5e-zQ8َt#Z sȳ' S9 7uZRP@N粑Uя7> p&hW5'2PpbЀ \)&R*M̳Umyj#ǿrEfazkގb^v+qBD:IOw'9Z潢-9(蚢mՙ9\%e55bwrW.VbR𺷣Wxhx k]v̐Q+*<$Vz"}VڿsrM)k{]|c[L.2+HDQ\WA֙[%T4d&?<"<ڷOڨAAeS2ݠIa0 ~k3 .|gq tpG)}{ZC6%Oʑ:cG$(qCGA'\`&hz{|y!?Ly-d9yp;`yl`r)% =]9k|i!YN9vg~*af$[qQ8YUSjޝ=jAQE6_SS%T#v`\([Jv;5gtL*W_4e;;/)ow-ǒ: g&tv`qJ"VZ99].7B[[Yգ_zK[8e-1$Aܢi*kD0DŽX+ط=w:ƿ|!cmOF?H!-,"wL}$\eT P~ւBsa7hTɻ2 bTO_p>*ŕw ;[cݰm1Lg=A TVfWadωTB}.#P%am!d̦ X^ ~W~kn7Xm1Vq]c8k6wx~3$Λ(Ybm{p~c+O䎖W?r`9!nm$nR/a} niFh҂w[H 4F}#1M ׾?,7w}v|ؐ`;!Y2eF w;VW&_cM?`Y ZmIcfsW]*rY0c^*~X-Bj\V=7-ڙW@78̖Pg&s#5$tZNnP5pH\|;r&X]BZl[iKF5JkI`8&#h6a;#|ؤM`m H:ovfU(r`VC7FUL櫅6c^bKa0c)ev;pb]vRR;J79 Põo7q%`~'~5bz|bGh_.+(m?- Mlz$\J{i=Ue\"ߢ >| k: ?IԞύ N% 0\s@ H,uΝ7jxgT Ds9sҷ3}N:ܷĸvQ%xc`E8,) &}vۛp2s#TH{E١e:}+}URM낦V+% ύ J_;BsKo8,0G3 eTl]zHj!g{[3o'c bhYxz93dA87-24P|`:1ZϏl6 t:`0>)5vYD£XdpM#1R/)8 M"^g482yϮBvWbcҐx$"#xиV4h<!EBRTb R]jxUǏ?~Ǐ?~Ǐ?~Ǐ?~Ǐ?~4㥫ؔfIENDB`flask-limiter-3.12/doc/source/_static/tap-icon.png000066400000000000000000000357661476516161100221460ustar00rootroot00000000000000PNG  IHDR{`gAMA a cHRMz&u0`:pQ<bKGD#2 pHYs+tIME :3t{:IDATxw`[W[Nl$d BPچ(@B(e eSZ ) PBg<=)ɖ}?$۲㕁?{s?~Ǐ?~Ǐ?~Ǐ?~Ǐ?~Ǐ?/UXUX%(8.G_jj9`V8"iX8X|WPv$LgH<$#w'zЍӔw@$t !55]z/4#?d3$C<P]mhG ~@Yw;XA:v!@ eemK!l!BA[)^~@+u}kyY\qG~{; B|3ɤ<:&0 вzBƳK |(ٳ9MDdHR XpoJOB>OBro'#S<:Ta a+]2<$Ӕ8rbz@оk %rT@K3F<V4ƹxSުXtA("1 ~ǽ{6@GQ rHmM4 JLj<UxiUبkNK(@ʐ4UǴ9j?YD'將]WmejK _V%`z/Zi^vԣh٘*$Ƃ_e;\OؙX 0J3g'g%jk<{hT+AٳU_ȶ.\vcwU!l9@V2F9?/Ɓ|-w칶DFKǬBVU f~ϝ{o=b!Om$<O$9By#Q)jL4 ^Z7Z+,l,g6-,,2 yu/6pQ\ $+*S ۠+f-[dH66R]seCN^!%L:#6R5@c =ʚx"w;LxocO\h:^3wD"^&_m6ȉ|(V$zXQ^5Q|;] vlqj-z;Mhm:ۯXYV;'q7Zy_&y;y4纴ᏹR8PY$7ō'h@i ]\ ލQ25; NU9J=Sp,xlA1]Ej[(&@5_tްAWVFD,pG1/v5T(13aI^ qTD~WU|Zͫ.FtTuQz<-_Lk]&~MTo$gD3v G.I;8RP3,h~CY[ $^, aNg\4Q%́%̧#]mM@N-a .X sklq~&yHLR/Z:֑1_&hLKy>ߓz5R [OU*ay}:s 后 1ud˜8C;O^^sH%=/6ֵukDjs?S_ћfwu9Jc;wS L$OΛ 8EkZwO9sM BgB"WyXBH@uܺQZN3/'?K.8F8xUIP b嫃Xt>fsC:y5v nU&wQz?6TL>N-Hz7Tĕ)xYNWbi7eb3!'gۼ3f"3i #6sX{ ;}/Xc8`I#DZa=BuyE |R!XTBehaskDTΦ`3OSapp `16yvqyM)n/u^l U-A" `J<~ BZ\3S{P2l2N&yE˯:o YYaLf2/Y2/ c1ni/DYM12Q"Mxj1^E)5vxA!>G%L (aT`X6%t ¹#6#$'V5ՙ284>4g;g 0aRʲu` a8RA:(;i{]ZcPn ݪj旭;;¢pipl'HX,bso_plv5oG=|ݎ|af] 0{BBa=.ť7naduFC pq>B;0u}1]L#oq 0> >|̆ W{@5#\MY_ 9}`k{E Ńnipf0c$^b*`3_b D*f`"r u~/X-xV@w?6 b*y J N2i_~mg_|ׂ4zO:iLU;{BI8M 0bh7WCzSU&зq՟<S@G M3]a-$<33W*s{oh!a3Tfq !q Y>f22؂Q􍘌A?6ڇWgzpG^gWohc'.g݉ڂ>JA 8=, Ñv7jmif\*<>tyH.@?^A3ą&x3Ca0.D*b ; CZh܏b܆m~>f,<᪆zT\tOGޅR4 o T  BY &aHaՆ5yPS,t(rCkhu[,E*ЃQ3>|" p$Q9΢ @r9Eo, c'~^.D62PkpA0a%9 E@!VZ?Q82l=PsU\o s Ў8TBs .5؊xT‰6lXZ,sq8 h#wV܄wWH( 6~ 4p+*6c  Nq{;$1g^?C Bnb'5FwÁvOq.=\bXXxH}NkCorܚ5Ex=Rm!6^oSp " H$o] v@@{[oiZ7Ь`j'~Nh}AADt3{}6He2Џrж.ߡß}D|ɨYcZx*+ 8몞Q*U*kS9@2d  K$@k% w\{@^ mnц"\"v6t!IX0/)(g T'8`IpijB4ʥf}XbՂX/ǝO5UDta_;+nmeb(aȍ'F~Uك?MǤνH - Z]sU4yTND4}xPkɰlRҚa;EiBt аMa_LWb Hl:fݜ) Xq@˜"õOyS5+~Z&t+[cOmcB#8.MQwg_T\2מq 6w$ o)~1LcuZ}OFd_9G  it.z<#xOx}v;凑-K9%RWxo2Կ+)v$|ü#EFb!vҐ K판fVpkߪz]Ks_Qq4D+Qe-iiܕv;8MҔ؇=\l$0uUnzhK#eCZ.kf/ʍ~{Aӫ-\ R{ `e5 _T?ܸ!P?iY6Hη5?Սڀ +f # -2QeNA]AwgUwv]U<s~{+u!|CGf`>,8#m]Tߕ3[inMvQL!Myif"Jv Ԗט"¸e9 DDU\2;܂A7N9vb%[39^Tg+K28%ɺ;{wMP:Lbo%jPpZ}9Sj[&GFk\Kvvk\Q ZuM_N9=Շ@ ?8__S!8]/'"-*OapAmY{jkG28=+Pyoּs[rqzW9Ad% 溎VO)e9 @glD3ڻڃAӏ =!Moß(Tݛeop+ X,в%*' k -?囎P0V@8G!0je2WG_+jn[ !Ay;dU9:ʢĥu8`Um$$`w+^]+Rx_*so˭\MQ;5O:'dQ]8|~;7fݼ#efHz yѲ꿑Md?9ܕſ}#:O'"jz'b]񙼷GVg6sU7i:⺄ZuPj Ğ~d<񎜈(fZ^ʼXv CDQ]yqݽgsno9OU$Q+D&`#m@M%3׿-J^ 鸽թ?iɄsDiMӏ&z!zفrݕX,`:[' [&d~Fgŝ =D Hrr '1beQA./D_/HL8=:/c摳WITZ?]( 帾OdR*-KjJْʸc "Õ9 6`~Af y 7v!0BD+MwG=ϹFުhl XCikwby={hmI#efs>'l3vA3HaO)/RB5&:oظg":7M,״{?Ʋ^P,7 ;y?(t̋eD:It9kj>I E[*B uٹJ$1ۈ Py}&Ki҃`guw:;+2~_a6جyʋ*ޓ-ix]BnDEl6 9+Q]rP*D=s,\st\qkNķF =ID Ugo͵>yh_t:hEdubvVEMO* tT[^;? քqܻ6>ގ&( XKS}_F6QZDڲ#[9NU>~uW2ײa}2UDž}1d}.2q]g߯l| 2Kꯒ&ӪCR{snmchȾ$Վ<^:@x9!ȵ'Ԗ\% )U}l-d^T<zO&5>z.#$9^%Jj\_1E_ysnlw-\Guе˥#Z2FCHwİ+}4Q̿dz۾+OXY`Dsq9D5-`Aq]I+Y,D9JY&gG3@7ޘJ*|% tXDvC Kоd6גiKQyo?gʫPƷs#JVZR8=;󷕃b{\5vmߒu@W})J]8Ȯ,WV.x#[S2ץԬۮ*/wg9 tR9 l}x̯Ct*?@~+hUSw7 DkLPnYd%"z=gЈ{Xx2&;3ʭNԬ2vPT8: }#X Ґ.rv-Ud=ړ輴txK'"AOrկ_ҤY'F4HdJmRFDT{'|:ewliʫ7#ay\WO1oUhJWHFO)HFB!'AӍq{^RHۑ9="{h}+J:uZn K嗗\TƼG2r5Ο*>t[ޜh'Q.뫒:,fbS!ПwY@KrȵSa;OMcNZH6(õ*Ee/~&KiKVuڙTjM6ս5e-zQ8َt#Z sȳ' S9 7uZRP@N粑Uя7> p&hW5'2PpbЀ \)&R*M̳Umyj#ǿrEfazkގb^v+qBD:IOw'9Z潢-9(蚢mՙ9\%e55bwrW.VbR𺷣Wxhx k]v̐Q+*<$Vz"}VڿsrM)k{]|c[L.2+HDQ\WA֙[%T4d&?<"<ڷOڨAAeS2ݠIa0 ~k3 .|gq tpG)}{ZC6%Oʑ:cG$(qCGA'\`&hz{|y!?Ly-d9yp;`yl`r)% =]9k|i!YN9vg~*af$[qQ8YUSjޝ=jAQE6_SS%T#v`\([Jv;5gtL*W_4e;;/)ow-ǒ: g&tv`qJ"VZ99].7B[[Yգ_zK[8e-1$Aܢi*kD0DŽX+ط=w:ƿ|!cmOF?H!-,"wL}$\eT P~ւBsa7hTɻ2 bTO_p>*ŕw ;[cݰm1Lg=A TVfWadωTB}.#P%am!d̦ X^ ~W~kn7Xm1Vq]c8k6wx~3$Λ(Ybm{p~c+O䎖W?r`9!nm$nR/a} niFh҂w[H 4F}#1M ׾?,7w}v|ؐ`;!Y2eF w;VW&_cM?`Y ZmIcfsW]*rY0c^*~X-Bj\V=7-ڙW@78̖Pg&s#5$tZNnP5pH\|;r&X]BZl[iKF5JkI`8&#h6a;#|ؤM`m H:ovfU(r`VC7FUL櫅6c^bKa0c)ev;pb]vRR;J79 Põo7q%`~'~5bz|bGh_.+(m?- Mlz$\J{i=Ue\"ߢ >| k: ?IԞύ N% 0\s@ H,uΝ7jxgT Ds9sҷ3}N:ܷĸvQ%xc`E8,) &}vۛp2s#TH{E١e:}+}URM낦V+% ύ J_;BsKo8,0G3 eTl]zHj!g{[3o'c bhYxz93dA87-24P|`:1ZϏl6 t:`0>)5vYD£XdpM#1R/)8 M"^g482yϮBvWbcҐx$"#xиV4h<!EBRTb R]jxUǏ?~Ǐ?~Ǐ?~Ǐ?~Ǐ?~4㥫ؔf%tEXtdate:create2021-12-20T09:04:58-08:008ǎ!%tEXtdate:modify2021-12-20T09:04:58-08:00I6tEXtSoftwarewww.inkscape.org<IENDB`flask-limiter-3.12/doc/source/_static/tap-icon.svg000066400000000000000000004730111476516161100221460ustar00rootroot00000000000000 ]> flask-limiter-3.12/doc/source/_static/tap-logo.png000066400000000000000000000507061476516161100221450ustar00rootroot00000000000000PNG  IHDRj6q AiCCPICC ProfileH wTSϽ7" %z ;HQIP&vDF)VdTG"cE b PQDE݌k 5ޚYg}׺PtX4X\XffGD=HƳ.d,P&s"7C$ E6<~&S2)212 "įl+ɘ&Y4Pޚ%ᣌ\%g|eTI(L0_&l2E9r9hxgIbטifSb1+MxL 0oE%YmhYh~S=zU&ϞAYl/$ZUm@O ޜl^ ' lsk.+7oʿ9V;?#I3eE妧KD d9i,UQ h A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf2:Y~ pHYs  @IDATx^U6'@:"`U76 Eտ b[Ep bA]i !L$93gª/.{{[={='x̣_W&밮XGzz%]t/>\ }sb=_]&G9oM{ ,J#@$T D{Oi⹝mmGxO)Rc7,]vmQG9ǪsTj;g?kG+**.r#)TO$ZC彶M^M0XL}0p/p6]QE(On$DDn v+"v,ssL&3ƶ>tC?>66ֺnݺ /g=Ydv/|j#ykj nDر vR=yB/>gY`W w XώS)K9:`C,co eC8vZh;_UVVv~piO{SԱ;=_87_z䯿6^w]ve=ցJM ٳgbWŶ^TD@faRCCCzKSB}}}7HOnxCPJ$Q'*GiLKQlnn.Κ5XSS;Ni6*qo|TS---S/'>1<__=| lܸq][[[+}wqzf)1_ xkomw*{/_~+^9UU3X9DW@m:.T_S.\N%K-[VZo%9ĭ-~ H,\4o޼hJO&Xhj* .fD*ϧ4(:b1ŋٶm[n}_<蠃?SyI'^`AEggg??pO<1d;|[s~@' c r`)K}HMz=a6yWV/+y{^wagM<u >8e#$ /ϡp)u:o#{ZQb}y^.g{WiUӨ-vX Uyᇧ@GVӟTغu:I5tjz;~`[S1}m6!TG;}9$1?6o\`6$f,ETաFqCa淾s!Z#ަ恑0QJ!._Jp(:C^ß6/"Hx(H/zыFN=N$CPY/3+m[ " P%x&c@$ennd&$u$|=94{饗* |Gy_dNHrrE؂$CՍ\2:lްaCuO^J3}FaN]J}sӀ ;!V`:4 D*|_6Taj Sxdzb]kV_ {7QSre jsgtm7;PC}{7fs}q75^4zuוpHRAyf͚9ݹ)_3#Uy w#y_<۝AVЛl4s6YI6(8mcwC?e""R4/#%@[« ޏbX KRI_| ~X ,A(8饉(. 9%Ef>#JJ/kd.buֈxq$A|zw}՜OIbym^ws¹`K9ֈ}R IZq2֨>³9SL}qbLakW֠&^\..L"$ʕS_KX'5#'˖#Z"I jH]/QӫpuT7?ș'L/W[D)LIb6\jE< wH2V/jjs\flt,qp lXU57{=v+,(x\Lq,!~ H n `&x1&H,oJO~4оHHU He@V,30f VKhGvؾ^a rێ6DYmCF~#Up"^umj~0 bto$]%8ǻ!]N۲c[m8]=&y{衇@c7ȣ$S:5hndd{~Dmwu ƆұxC!{!u=1lo@ i Jpz OÀn$ģ+x߅J-ٍy6%ehb'cO/AvREsWy}!%Iy~v7'GDv|:d1#RCz]w]">HZ@(W4EՏ7PBDfXݮ1nPJXĎn}D@ u=RH8%ꅼmDf<~#^i3,r^Q^tTk,$D蔄*"_B}`J'g絧TNc?"%ǀj2C{O{!Tt|t%/^De#;Q-zl6۫ c&M)y|{:)^;yio@vJob_ɱcQNᘄӽD/U-hQist"6< N$:D~]Yнdop4` bcXKg#_硫[]q% 8zn<:~|x= /!ZRٟcߜEi13t=g7=7Ye["ʶ46d=5 eᕀW0Dv$Ǵ]f`+ x݀ҥFew1s.s#7] <x"0=kG-\!Ljq;XʎˮASӶȫ~MB_VmUvw[ @I& H@`$@Iv/{to%&>~,"}0\?H1q`g ͽ-l3_saOFBμ6% `:Q~?7kÀ])D M>kћ[;շMvJ/Z!@ir SҖ6pH/:od.!X1ꉌ" #f.N\(mJϯsrڦ7;>@p G@d$_'50p% r׺U2{{y?eWr.9l@G)Q5@OvCiwp!` %TmUVʔ:x>7hϒGDux=~x\N6p<,K=Iy.pM؛O;0hD~sٗ@|醻x!R`>`K:b<.:.-YW·Ȝ9F*u(ٍjf{IazR{DbVFnI|n9$Wyf+,V@j!vr}DK.3/^ VԙH{+_v;@37ƽy 6,{Y"=JHE}={.Ǡ 崠)>vn 3g6dq盅 $@p_MT '[bmHH)LɛÑẚgJ&Z`cկ$e)K1LE7HgFO8.$+{[=&()n wӄC|;S9¬yV&QV0U%{{ N/6s83 3F<+d#Qxz_a@xu1Gfy.frXhY&ʢMM]13IfE*R֍ ֢vPE*GF&`Ǚ+I +`(I g7A4=Xeo"D2d_D*;3UM*T45PkNBU&8'R寇ONJU]~7&y^fή1uAd'o7%?~&w#$zAvfժUDaV1gLOU1M𹝞{RTtFI3oƩJ);A eo#g2*c$ bc,yo~(K}GZڸĔsaw&(3Ev ZA1L!%&%ys"]=lL67 GR:#H9"C{QHy智$g%PX:UF Խ54yuA5\_ʕrQsRVĊzi+ Ft]eQUK"]e sHJԐP0Ui.l+^f29΍1'g!\U-_ͺSԧB!9} , }.Iʔ lO&V)NoyGipѭj\b,r $@Q1XyCmK,BUD1˺=I?(Ϧ~ bP,eQ%~fA0[nQE@b _j$KZ0sg-69˗/''x()E`ꐴJH1d5쏊r"͌)  $^ײ[ 9jIWUjX4܉.c4j0IKt XgJ^= <367"-pעFXB-nv[ p8^AU׳9WL`p4=C(|Ѣny+o=$'0tln"IvMAa7S%kjeQӱKC\P"T'|I`~I Gc3$TݓR߬vA3f0VZu b߼w1o z,7F$O稨 Dc)riq0z޶"ILQpHLӺ3<3>:xi HG\ mXC<1u}Y[l $:IvܡՌ#qXZQ>uElhy\KGb ]Wۿ[d%ޚH%1dX$x97,<<:"5yrxUxm)$0l 4/qXBa)&MnB4i Z+HHH* mj|jr_aA|3qa9Ʃ2h?kǴj> 6TΩr %"ڎOAZtuUM& R1@c@ vlQ{;  =HAQ W`31A" CM*p4K7KJwqG ?Erhej[ؠ`6~wI'#3Ϭ&On5v$U`pW^y%lWK$H 90Gd#Od,iVAՓ+9搯$v7_¾Me;J$w=_a??~}kL2.*t=}t5%>n!TSu&LJHcB[;:s3\JHHZҩZMʷk"a !1w54x}S_\o$!V*p$0#x=&JTdB )>?"BGc"BQ_+t*M28sKhbqxG6#Qy$b+At駟ޤE֏Tkw?OG ٨nSUEܮm{DO99ѣzusA/'(N9 ]윯H0SوT ryxoMQ腳x@mdʜO8°sHo"9~5/}+e| e]zV-,̇z-a[ Ma|<q qP%~u>n'݊[wk=v1orÀ]ꪫ}c w}WgcjiqpTbwwz;8P*w?Vv'NaiK.Ę*J}_m4: ;M`~򖷼9NțTPrSO=)+I94$z$AD, j+aŸha3THQ:Ӌ<Ce61>b}_ d(|H{iYSWqW Z81 4q(j(@sa-= P; pJ*b- p. 1 8CJo$Ikv Ӵ$!FȄ#5@1x|ETq\҉ʮ1aH} wRb>s跿m#b_U@s;{J#y)Xio*HR/jkmv2/XدKAN8BbKrku?^W7%D]18* Qq炰&‚%v ^b}0E~2 xr#ئJxIlB2KT+9޿cfK^Zͻ!~S"=_ii"waP烤mpZ͛~+0cHu`Tv[ʮ^z5vU? ~0͌g%\B-pq :FRH86+&0ÖI*]r$j3Yv4JGGzjEv̢! FnqɣN3z酶iO[uH%6q/ 3R =e@O>8 q8ޣ!f5/[ö;5ը1I)qO2S^{A,\y:EÔd%ZY_"Y4Dqz"o;s8%[!uK0ybbVw)"UzQ-87q /.T1vH;ʡZ#PjTYqU[:w?y-A=@u%HJ- '=4k0M˖^Ǚ/8oAj38"ԓI%ޙJTtwir B20Y " "u)ZÎ8lh+0qjpXF%% 7Chi i*]|F5;LF. YB~rr4KhAH3lA r.T R+)z9a<7o76 Z)|&8*sJrwwI%ž!M 6 sh f*`;p2+_5J1dzu^ 11!ԛV8Htw)ٻ!1Z逤ZWR]bLz^xhF%f܅m8[rmNkrxmx.asn `̰j%N\G>Dt0oQ͗E=zpgOgНցdBmf&uA%z'kxvn ؽ5f0[B7`nǫkEB n 눵L]ٮŽjk)c_ǩNTjT.,eC(xG';py8 h3Y-/z<~/p B;lEiAS"beC Z3EՋ-{ +AQm`z`+B7>պCjwwZ#kwݗqSvet<\nu:opl "0T)Pxܼ_XvTfD[6|FѵĆx !ؠu[M0J7WT`?G7?ExPD-ς qJiدsi{.O:G2H{>HȔXK8 !LC% D~/zszWN!gX/R/i+° A"E5!a53zWϏtw7$uMį|&Gb\A|XMbpjp"8rMF%dzQw u xs^Uzh9qڰSxpN|kʎ[~Ln`bŗv8v#*)r#5u?Q[k߮#+eIm%ARda`V*Lω#{7"uwiCcc}^wـwy.{q8}VAb?AJwkmoč/'5;|goPc/Ǧʘ󌔈cϲVP$63 '˜.n53\Adwz/h0i IzLv2ew,Qz'k鎱TqVyo|v\m$/+:<}%c׻s L 2iQgg=ɨY{:>'ݕd0Ӈh C;"(XMɛl=`&Q@Z)s< ٍǙ$PEg_<4FȐ#m'2xv8+VndÝ/:GlK՞"6@L r73+@,[1gt@  q 98ǝxgB{^GiF.%離w&vWqMgnW$"2 Lt|68 \hh{{Dqz$D"r QbQDڨL qlzƱr,†FT[?lNߥh\c(VgUNG$.IM/K\k#nxZagh㰠&pE} t饻A::J=U:̳p%+Y3N@[›}WGE&f#`׋A!Cb~jsX\!h.'`xyDZ9t?39!/ATdT|hSHeCr}I U5ONʡl_WPċkP9`4AˏƸ[5Fuغ$ 'VeXmT+FNC@sc2,' m_T$IB8?KMs3{ǸYb,M_%sa^H}# Q>53Bc^vgE4*DD~uq582E\pՙ*tGƓAyxpx "tgӨ5YT^5[86RQ\29fdFq5)ScI*T`'/0IDAT7K+"@0MOg `^"j+wҷ4^j9P19BᄺN:*S87U0MB6KǗ5G }d"-< Zbj]Gl&>H2W!ɹi1u C1LDRgmV"5a,9|BSմM7x:UA1abzb 9|Ag)2xSBG،+nv+Y!Tv $}ɑgJNtF2_ <^a H As@f^b*?UW 5 cSpnM8qCr) v l^٢j3-USW6Tu 7%PxCqtQt-gޛu/ّ<׏q ;ci .\qn+Kf\lT|}Cc.Ww .cdb֝Y Ajz{3۸[T`LEىVõÖS5՘FBviMwb!-eA$Rne{D>=15 ~`B!yH6@iBJdq͛8Fr"V=D${f?I>s g;\ ՂZjC:@ZE9sYDxgWU5eܺ N;g 08?G2d> x rz9aufS7֏ 83NrS}!gCM^Cvah?qQm?taK~;m[f=O s!7l˜ B8pH*Y?s0hd^֢D;^".Ĺ1؊=Ըv皡pe=#o(]@]:b {=ĝ=voĭ_\3&u2끲8ebb=`wS!Qf FQ\M@ J&U!Q]8 cHGXaf~A}COg>uPJb,4ٌ/&QPO@RR>7Y RbWB8_ iJǜ 8H\7讯!ZEP  }J|vLr$8p:>ϧ>CQ{!;E#WNnTQblZuyr:꡷O@3|ہ[?J j)ޣ)%`krJ*"' W;H. qZTS;NA/HC_EU-꫎DHN9׹F2r*qpB*+pP ئT6"k.Q[ǾTRYr%TVՃ8L2LlwzAK:R#RĎN#t ~t@Z&ϱQ;'1=.;!PT_!]Yr#barvIF9ʵl_4b뢓b (sdH&t<aY5Ñ+Wu6rf*?w613ׂ,4?OOɲR ZM0vǶ!H E4"*9E][n#I2 3^r`5\ñ^% vM)7BUj?uve2zm8 9py鎔He/#ӐbryT.&$|XMv1t3D,F(5afJ1 8,(?ڜ^ p<:#ڃDb,˦oIENDB`flask-limiter-3.12/doc/source/api.rst000066400000000000000000000005001476516161100175550ustar00rootroot00000000000000API === .. currentmodule:: flask_limiter Extension --------- .. autoclass:: Limiter Utilities --------- .. autoclass:: ExemptionScope .. autoclass:: RequestLimit .. automodule:: flask_limiter.util Exceptions ---------- .. currentmodule:: flask_limiter .. autoexception:: RateLimitExceeded :no-inherited-members: flask-limiter-3.12/doc/source/changelog.rst000066400000000000000000000000371476516161100207400ustar00rootroot00000000000000.. include:: ../../HISTORY.rst flask-limiter-3.12/doc/source/cli.rst000066400000000000000000000042701476516161100175630ustar00rootroot00000000000000Command Line Interface ====================== .. versionadded:: 2.4.0 Flask-Limiter adds a few subcommands to the Flask :doc:`flask:cli` for maintenance & diagnostic purposes. These can be accessed under the **limiter** sub-command as follows .. program-output:: FLASK_APP=../../examples/kitchensink.py:app flask limiter --help :shell: Example ------- The examples below use the following example application: .. literalinclude:: ../../examples/kitchensink.py :language: py Extension Config ^^^^^^^^^^^^^^^^ Use the subcommand **config** to display the active configuration .. code-block:: shell $ flask limiter config .. command-output:: FLASK_APP=../../examples/kitchensink.py:app flask limiter config :shell: List limits ^^^^^^^^^^^ .. code-block:: shell $ flask limiter limits Use the subcommand **limits** to display all configured limits .. command-output:: FLASK_APP=../../examples/kitchensink.py:app flask limiter limits :shell: ======================= Filter by endpoint name ======================= .. command-output:: FLASK_APP=../../examples/kitchensink.py:app flask limiter limits --endpoint=root :shell: ============== Filter by path ============== .. command-output:: FLASK_APP=../../examples/kitchensink.py:app flask limiter limits --path=/health/ :shell: ================== Check limit status ================== .. command-output:: FLASK_APP=../../examples/kitchensink.py:app flask limiter limits --key=127.0.0.1 :shell: Clear limits ^^^^^^^^^^^^ .. code-block:: shell $ flask limiter clear The CLI exposes a subcommand **clear** that can be used to clear either all limits or limits for specific endpoints or routes by a ``key`` which represents the value returned by the :paramref:`~flask_limiter.Limiter.key_func` (i.e. a specific user) callable configured for your application. .. command-output:: FLASK_APP=../../examples/kitchensink.py:app flask limiter clear --help :shell: By default this is an interactive command which requires confirmation, however it can also be used in automations by using the ``-y`` flag to force confirmation. .. command-output:: FLASK_APP=../../examples/kitchensink.py:app flask limiter clear --key=127.0.0.1 -y :shell: flask-limiter-3.12/doc/source/conf.py000066400000000000000000000060061476516161100175600ustar00rootroot00000000000000# import os import re import sys sys.path.insert(0, os.path.abspath("../../")) sys.path.insert(0, os.path.abspath("./")) from theme_config import * import flask_limiter description = "Flask-Limiter adds rate limiting to flask applications." copyright = "2023, Ali-Akber Saifee" project = "Flask-Limiter" ahead = 0 if ".post0.dev" in flask_limiter.__version__: version, ahead = flask_limiter.__version__.split(".post0.dev") else: version = flask_limiter.__version__ release = version html_title = f"{project} {{{release}}}" try: ahead = int(ahead) if ahead > 0: html_theme_options[ "announcement" ] = f""" This is a development version. The documentation for the latest stable version can be found here """ html_title = f"{project} {{dev}}" except: pass html_favicon = "_static/tap-icon.ico" html_static_path = ["./_static"] templates_path = ["./_templates"] html_css_files = [ "custom.css", "colors.css", "https://fonts.googleapis.com/css2?family=Fira+Code:wght@300;400;700&family=Fira+Sans:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&family=Be+Vietnam+Pro:wght@500&display=swap", ] html_theme_options.update({"light_logo": "tap-icon.png", "dark_logo": "tap-icon.png"}) extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosectionlabel", "sphinx.ext.autosummary", "sphinx.ext.extlinks", "sphinx.ext.intersphinx", "sphinxext.opengraph", "sphinx.ext.todo", "sphinx.ext.viewcode", "sphinxcontrib.programoutput", "sphinx_issues", "sphinx_inline_tabs", "sphinx_paramlinks", ] autodoc_default_options = { "members": True, "inherited-members": True, "inherit-docstrings": True, "member-order": "bysource", } add_module_names = False autoclass_content = "both" autodoc_typehints_format = "short" autodoc_preserve_defaults = True autosectionlabel_maxdepth = 3 autosectionlabel_prefix_document = True issues_github_path = "alisaifee/flask-limiter" ogp_image = "_static/logo-og.png" extlinks = { "pypi": ("https://pypi.org/project/%s", "%s"), "githubsrc": ("https://github.com/alisaifee/flask-limiter/blob/master/%s", "%s"), } intersphinx_mapping = { "python": ("http://docs.python.org/", None), "limits": ("https://limits.readthedocs.io/en/stable/", None), "redis-py-cluster": ("https://redis-py-cluster.readthedocs.io/en/latest/", None), "redis-py": ("https://redis-py.readthedocs.io/en/latest/", None), "pymemcache": ("https://pymemcache.readthedocs.io/en/latest/", None), "pymongo": ("https://pymongo.readthedocs.io/en/stable/", None), "flask": ("https://flask.palletsprojects.com/en/latest/", None), "werkzeug": ("https://werkzeug.palletsprojects.com/en/latest/", None), "flaskrestful": ("http://flask-restful.readthedocs.org/en/latest/", None), } flask-limiter-3.12/doc/source/configuration.rst000066400000000000000000000310251476516161100216610ustar00rootroot00000000000000.. _RFC2616: https://tools.ietf.org/html/rfc2616#section-14.37 .. _ratelimit-conf: Configuration ============= Using Flask Config ------------------ The following :doc:`Flask Configuration ` values are honored by :class:`~flask_limiter.Limiter`. If the corresponding configuration value is also present as an argument to the :class:`~flask_limiter.Limiter` constructor, the constructor argument will take priority. .. list-table:: * - .. data:: RATELIMIT_ENABLED Constructor argument: :paramref:`~flask_limiter.Limiter.enabled` - Overall kill switch for rate limits. Defaults to ``True`` * - .. data:: RATELIMIT_KEY_FUNC Constructor argument: :paramref:`~flask_limiter.Limiter.key_func` - A callable that returns the domain to rate limit (e.g. username, ip address etc) * - .. data:: RATELIMIT_KEY_PREFIX Constructor argument: :paramref:`~flask_limiter.Limiter.key_prefix` - Prefix that is prepended to each stored rate limit key and app context global name. This can be useful when using a shared storage for multiple applications or rate limit domains. For multi-instance use cases, explicitly pass ``key_prefix`` keyword argument to :class:`~flask_limiter.Limiter` constructor instead. * - .. data:: RATELIMIT_APPLICATION Constructor argument: :paramref:`~flask_limiter.Limiter.application_limits` - A comma (or some other delimiter) separated string that will be used to apply limits to the application as a whole (i.e. shared by all routes). * - .. data:: RATELIMIT_APPLICATION_PER_METHOD Constructor argument: :paramref:`~flask_limiter.Limiter.application_limits_per_method` - Whether application limits are applied per method, per route or as a combination of all method per route. * - .. data:: RATELIMIT_APPLICATION_EXEMPT_WHEN Constructor argument: :paramref:`~flask_limiter.Limiter.application_limits_exempt_when` - A function that should return a truthy value if the application rate limit(s) should be skipped for the current request. This callback is called from the :doc:`flask request context ` :meth:`~flask.Flask.before_request` hook. * - .. data:: RATELIMIT_APPLICATION_DEDUCT_WHEN Constructor argument: :paramref:`~flask_limiter.Limiter.application_limits_deduct_when` - A function that should return a truthy value if a deduction should be made from the application rate limit(s) for the current request. This callback is called from the :doc:`flask request context ` :meth:`~flask.Flask.after_request` hook. * - .. data:: RATELIMIT_APPLICATION_COST Constructor argument: :paramref:`~flask_limiter.Limiter.application_limits_cost` - The cost of a hit to the application wide shared limit as an integer or a function that takes no parameters and returns the cost as an integer (Default: 1) * - .. data:: RATELIMIT_DEFAULT Constructor argument: :paramref:`~flask_limiter.Limiter.default_limits` - A comma (or some other delimiter) separated string that will be used to apply a default limit on all routes that are otherwise not decorated with an explicit rate limit. If not provided, the default limits can be passed to the :class:`~flask_limiter.Limiter` constructor as well (the values passed to the constructor take precedence over those in the config). :ref:`ratelimit-string` for details. * - .. data:: RATELIMIT_DEFAULTS_PER_METHOD Constructor argument: :paramref:`~flask_limiter.Limiter.default_limits_per_method` - Whether default limits are applied per method, per route or as a combination of all method per route. * - .. data:: RATELIMIT_DEFAULTS_COST Constructor argument: :paramref:`~flask_limiter.Limiter.default_limits_cost` - The cost of a hit to the default limits as an integer or a function that takes no parameters and returns the cost as an integer (Default: 1) * - .. data:: RATELIMIT_DEFAULTS_EXEMPT_WHEN Constructor argument: :paramref:`~flask_limiter.Limiter.default_limits_exempt_when` - A function that should return a truthy value if the default rate limit(s) should be skipped for the current request. This callback is called from the :doc:`flask request context ` :meth:`~flask.Flask.before_request` hook. * - .. data:: RATELIMIT_DEFAULTS_DEDUCT_WHEN Constructor argument: :paramref:`~flask_limiter.Limiter.default_limits_deduct_when` - A function that should return a truthy value if a deduction should be made from the default rate limit(s) for the current request. This callback is called from the :doc:`flask request context ` :meth:`~flask.Flask.after_request` hook. * - .. data:: RATELIMIT_STORAGE_URI Constructor argument: :paramref:`~flask_limiter.Limiter.storage_uri` - A storage location conforming to the scheme in :ref:`limits:storage:storage scheme`. A basic in-memory storage can be used by specifying ``memory://`` but it should be used with caution in any production setup since: #. Each application process will have it's own storage #. The state of the rate limits will not persist beyond the process' life-time. Other supported backends include: - Memcached: ``memcached://host:port`` - MongoDB: ``mongodb://host:port`` - Redis: ``redis://host:port`` For specific examples and requirements of supported backends please refer to :ref:`limits:storage:storage scheme` and the :doc:`limits ` library. * - .. data:: RATELIMIT_STORAGE_OPTIONS Constructor argument: :paramref:`~flask_limiter.Limiter.storage_options` - A dictionary to set extra options to be passed to the storage implementation upon initialization. * - .. data:: RATELIMIT_REQUEST_IDENTIFIER Constructor argument: :paramref:`~flask_limiter.Limiter.request_identifier` - A callable that returns the unique identity of the current request. Defaults to :attr:`flask.Request.endpoint` * - .. data:: RATELIMIT_STRATEGY Constructor argument: :paramref:`~flask_limiter.Limiter.strategy` - The rate limiting strategy to use. :ref:`ratelimit-strategy` for details. * - .. data:: RATELIMIT_HEADERS_ENABLED Constructor argument: :paramref:`~flask_limiter.Limiter.headers_enabled` - Enables returning :ref:`ratelimit-headers`. Defaults to ``False`` * - .. data:: RATELIMIT_HEADER_LIMIT Constructor argument: :paramref:`~flask_limiter.Limiter.header_name_mapping` - Header for the current rate limit. Defaults to ``X-RateLimit-Limit`` * - .. data:: RATELIMIT_HEADER_RESET Constructor argument: :paramref:`~flask_limiter.Limiter.header_name_mapping` - Header for the reset time of the current rate limit. Defaults to ``X-RateLimit-Reset`` * - .. data:: RATELIMIT_HEADER_REMAINING Constructor argument: :paramref:`~flask_limiter.Limiter.header_name_mapping` - Header for the number of requests remaining in the current rate limit. Defaults to ``X-RateLimit-Remaining`` * - .. data:: RATELIMIT_HEADER_RETRY_AFTER Constructor argument: :paramref:`~flask_limiter.Limiter.header_name_mapping` - Header for when the client should retry the request. Defaults to ``Retry-After`` * - .. data:: RATELIMIT_HEADER_RETRY_AFTER_VALUE Constructor argument: :paramref:`~flask_limiter.Limiter.retry_after` - Allows configuration of how the value of the ``Retry-After`` header is rendered. One of ``http-date`` or ``delta-seconds``. (`RFC2616`_). * - .. data:: RATELIMIT_SWALLOW_ERRORS Constructor argument: :paramref:`~flask_limiter.Limiter.swallow_errors` - Whether to allow failures while attempting to perform a rate limit such as errors with downstream storage. Setting this value to ``True`` will effectively disable rate limiting for requests where an error has occurred. * - .. data:: RATELIMIT_IN_MEMORY_FALLBACK_ENABLED Constructor argument: :paramref:`~flask_limiter.Limiter.in_memory_fallback_enabled` - ``True``/``False``. If enabled an in memory rate limiter will be used as a fallback when the configured storage is down. Note that, when used in combination with ``RATELIMIT_IN_MEMORY_FALLBACK`` the original rate limits will not be inherited and the values provided in * - .. data:: RATELIMIT_IN_MEMORY_FALLBACK Constructor argument: :paramref:`~flask_limiter.Limiter.in_memory_fallback` - A comma (or some other delimiter) separated string that will be used when the configured storage is down. * - .. data:: RATELIMIT_FAIL_ON_FIRST_BREACH Constructor argument: :paramref:`~flask_limiter.Limiter.fail_on_first_breach` - Whether to stop processing remaining limits after the first breach. Default to ``True`` * - .. data:: RATELIMIT_ON_BREACH_CALLBACK Constructor argument: :paramref:`~flask_limiter.Limiter.on_breach` - A function that will be called when any limit in this extension is breached. * - .. data:: RATELIMIT_META Constructor argument: :paramref:`~flask_limiter.Limiter.meta_limits` - A comma (or some other delimiter) separated string that will be used to control the upper limit of a requesting client hitting any configured rate limit. Once a meta limit is exceeded all subsequent requests will raise a :class:`~flask_limiter.RateLimitExceeded` for the duration of the meta limit window. * - .. data:: RATELIMIT_ON_META_BREACH_CALLBACK Constructor argument: :paramref:`~flask_limiter.Limiter.on_meta_breach` - A function that will be called when a meta limit in this extension is breached. .. _ratelimit-string: Rate limit string notation -------------------------- Rate limits are specified as strings following the format:: [count] [per|/] [n (optional)] [second|minute|hour|day|month|year][s] You can combine multiple rate limits by separating them with a delimiter of your choice. Examples ^^^^^^^^ * ``10 per hour`` * ``10 per 2 hours`` * ``10/hour`` * ``5/2 seconds;10/hour;100/day;2000 per year`` * ``100/day, 500/7 days`` .. warning:: If rate limit strings that are provided to the :meth:`~flask_limiter.Limiter.limit` decorator are malformed and can't be parsed the decorated route will fall back to the default rate limit(s) and an ``ERROR`` log message will be emitted. Refer to :ref:`logging` for more details on capturing this information. Malformed default rate limit strings will however raise an exception as they are evaluated early enough to not cause disruption to a running application. .. _ratelimit-headers: Rate-limiting Headers --------------------- If the configuration is enabled, information about the rate limit with respect to the route being requested will be added to the response headers. Since multiple rate limits can be active for a given route - the rate limit with the lowest time granularity will be used in the scenario when the request does not breach any rate limits. .. tabularcolumns:: |p{8cm}|p{8.5cm}| ============================== ================================================ ``X-RateLimit-Limit`` The total number of requests allowed for the active window ``X-RateLimit-Remaining`` The number of requests remaining in the active window. ``X-RateLimit-Reset`` UTC seconds since epoch when the window will be reset. ``Retry-After`` Seconds to retry after or the http date when the Rate Limit will be reset. The way the value is presented depends on the configuration value set in :data:`RATELIMIT_HEADER_RETRY_AFTER_VALUE` and defaults to `delta-seconds`. ============================== ================================================ The header names can be customised if required by either using the flask configuration ( :attr:`RATELIMIT_HEADER_LIMIT`, :attr:`RATELIMIT_HEADER_RESET`, :attr:`RATELIMIT_HEADER_RETRY_AFTER`, :attr:`RATELIMIT_HEADER_REMAINING` ) values or by providing the :paramref:`~flask_limiter.Limiter.header_name_mapping` argument to the extension constructor as follows:: from flask_limiter import Limiter, HEADERS limiter = Limiter(header_name_mapping={ HEADERS.LIMIT : "X-My-Limit", HEADERS.RESET : "X-My-Reset", HEADERS.REMAINING: "X-My-Remaining" } ) flask-limiter-3.12/doc/source/development.rst000066400000000000000000000011721476516161100213340ustar00rootroot00000000000000Development =========== The source is available on `Github `_ To get started .. code:: console $ git clone git://github.com/alisaifee/flask-limiter.git $ cd flask-limiter $ pip install -r requirements/dev.txt Tests ----- Since some of the tests rely on having a redis & memcached instance available, you will need a working docker installation to run all the tests. .. code:: console $ pytest Running the tests will automatically invoke :program:`docker-compose` with the following config (:githubsrc:`docker-compose.yml`) .. literalinclude:: ../../docker-compose.yml flask-limiter-3.12/doc/source/index.rst000066400000000000000000000335011476516161100201220ustar00rootroot00000000000000.. _pymemcache: https://pypi.python.org/pypi/pymemcache .. _redis: https://pypi.python.org/pypi/redis .. _github issue #41: https://github.com/alisaifee/flask-limiter/issues/41 .. _flask apps and ip spoofing: http://esd.io/blog/flask-apps-heroku-real-ip-spoofing.html .. image:: _static/logo.png :target: / :width: 600px :align: center :class: logo ============= Flask-Limiter ============= .. currentmodule:: flask_limiter .. toctree:: :maxdepth: 2 :hidden: strategies configuration recipes cli api development changelog misc .. container:: badges .. image:: https://img.shields.io/github/last-commit/alisaifee/flask-limiter?logo=github&style=for-the-badge&labelColor=#282828 :target: https://github.com/alisaifee/flask-limiter :class: header-badge .. image:: https://img.shields.io/github/actions/workflow/status/alisaifee/flask-limiter/main.yml?logo=github&style=for-the-badge&labelColor=#282828 :target: https://github.com/alisaifee/flask-limiter/actions/workflows/main.yml :class: header-badge .. image:: https://img.shields.io/codecov/c/github/alisaifee/flask-limiter?logo=codecov&style=for-the-badge&labelColor=#282828 :target: https://app.codecov.io/gh/alisaifee/flask-limiter :class: header-badge .. image:: https://img.shields.io/pypi/pyversions/flask-limiter?style=for-the-badge&logo=pypi :target: https://pypi.org/project/flask-limiter :class: header-badge **Flask-Limiter** adds rate limiting to :class:`~flask.Flask` applications. By adding the extension to your flask application, you can configure various rate limits at different levels (e.g. application wide, per :class:`~flask.Blueprint`, routes, resource etc). **Flask-Limiter** can be configured to persist the rate limit state to many commonly used storage backends via the :doc:`limits:index` library. Let's get started! Installation ============ **Flask-Limiter** can be installed via :program:`pip`. .. code:: console $ pip install Flask-Limiter To include extra dependencies for a specific storage backend you can add the specific backend name via the ``extras`` notation. For example: .. tab:: Redis .. code:: console $ pip install Flask-Limiter[redis] .. tab:: Memcached .. code:: console $ pip install Flask-Limiter[memcached] .. tab:: MongoDB .. code:: console $ pip install Flask-Limiter[mongodb] .. tab:: Valkey .. code:: console $ pip install Flask-Limiter[valkey] Quick start =========== A very basic setup can be achieved as follows: .. literalinclude:: ../../examples/sample.py :language: py The above Flask app will have the following rate limiting characteristics: * Use an in-memory storage provided by :class:`limits.storage.MemoryStorage`. .. note:: This is only meant for testing/development and should be replaced with an appropriate storage of your choice before moving to production. * Rate limiting by the ``remote_address`` of the request * A default rate limit of 200 per day, and 50 per hour applied to all routes. * The ``slow`` route having an explicit rate limit decorator will bypass the default rate limit and only allow 1 request per day. * The ``medium`` route inherits the default limits and adds on a decorated limit of 1 request per second. * The ``ping`` route will be exempt from any default rate limits. .. tip:: The built in flask static files routes are also exempt from rate limits. Every time a request exceeds the rate limit, the view function will not get called and instead a `429 `_ http error will be raised. The extension adds a ``limiter`` subcommand to the :doc:`Flask CLI ` which can be used to inspect the effective configuration and applied rate limits (See :ref:`cli:Command Line Interface` for more details). Given the quick start example above: .. code-block:: shell $ flask limiter config .. program-output:: FLASK_APP=../../examples/sample.py:app flask limiter config :shell: .. code-block:: shell $ flask limiter limits .. program-output:: FLASK_APP=../../examples/sample.py:app flask limiter limits :shell: The Flask-Limiter extension --------------------------- The extension can be initialized with the :class:`flask.Flask` application in the usual ways. Using the constructor .. code-block:: python from flask_limiter import Limiter from flask_limiter.util import get_remote_address .... limiter = Limiter(get_remote_address, app=app) Deferred app initialization using :meth:`~flask_limiter.Limiter.init_app` .. code-block:: python limiter = Limiter(get_remote_address) limiter.init_app(app) At this point it might be a good idea to look at the configuration options available in the extension in the :ref:`configuration:using flask config` section and the :class:`flask_limiter.Limiter` class documentation. ----------------------------- Configuring a storage backend ----------------------------- The extension can be configured to use any storage supported by :pypi:`limits`. Here are a few common examples: .. tab:: Memcached Any additional parameters provided in :paramref:`~Limiter.storage_options` will be passed to the constructor of the memcached client (either :class:`~pymemcache.client.base.PooledClient` or :class:`~pymemcache.client.hash.HashClient`). For more details see :class:`~limits.storage.MemcachedStorage`. .. code-block:: python from flask_limiter import Limiter from flask_limiter.util import get_remote_address .... limiter = Limiter( get_remote_address, app=app, storage_uri="memcached://localhost:11211", storage_options={} ) .. tab:: Redis Any additional parameters provided in :paramref:`~Limiter.storage_options` will be passed to :meth:`redis.Redis.from_url` as keyword arguments. For more details see :class:`~limits.storage.RedisStorage` .. code-block:: python from flask_limiter import Limiter from flask_limiter.util import get_remote_address .... limiter = Limiter( get_remote_address, app=app, storage_uri="redis://localhost:6379", storage_options={"socket_connect_timeout": 30}, strategy="fixed-window", # or "moving-window" or "sliding-window-counter" ) .. tab:: Redis (reused connection pool) If you wish to reuse a :class:`redis.connection.ConnectionPool` instance you can pass that in :paramref:`~Limiter.storage_option` .. code-block:: python import redis from flask_limiter import Limiter from flask_limiter.util import get_remote_address .... pool = redis.connection.BlockingConnectionPool.from_url("redis://.....") limiter = Limiter( get_remote_address, app=app, storage_uri="redis://", storage_options={"connection_pool": pool}, strategy="fixed-window", # or "moving-window" or "sliding-window-counter" ) .. tab:: Redis Cluster Any additional parameters provided in :paramref:`~Limiter.storage_options` will be passed to :class:`~redis.cluster.RedisCluster` as keyword arguments. For more details see :class:`~limits.storage.RedisClusterStorage` .. code-block:: python from flask_limiter import Limiter from flask_limiter.util import get_remote_address .... limiter = Limiter( get_remote_address, app=app, storage_uri="redis+cluster://localhost:7000,localhost:7001,localhost:7002", storage_options={"socket_connect_timeout": 30}, strategy="fixed-window", # or "moving-window" or "sliding-window-counter" ) .. tab:: MongoDB .. code-block:: python from flask_limiter import Limiter from flask_limiter.util import get_remote_address .... limiter = Limiter( get_remote_address, app=app, storage_uri="mongodb://localhost:27017", strategy="fixed-window", # or "moving-window" or "sliding-window-counter" ) The :paramref:`~Limiter.storage_uri` and :paramref:`~Limiter.storage_options` parameters can also be provided by :ref:`configuration:using flask config` variables. The different configuration options for each storage can be found in the :doc:`storage backend documentation for limits ` as that is delegated to the :pypi:`limits` library. .. _ratelimit-domain: Rate Limit Domain ----------------- Each :class:`~flask_limiter.Limiter` instance must be initialized with a :paramref:`~Limiter.key_func` that returns the bucket in which each request is put into when evaluating whether it is within the rate limit or not. For simple setups a utility function is provided: :func:`~flask_limiter.util.get_remote_address` which uses the :attr:`~flask.Request.remote_addr` from :class:`flask.Request`. Please refer to :ref:`deploy-behind-proxy` for an example. Decorators to declare rate limits ================================= Decorators made available as instance methods of the :class:`~flask_limiter.Limiter` instance to be used with the :class:`flask.Flask` application. .. _ratelimit-decorator-limit: Route specific limits --------------------- .. automethod:: Limiter.limit :noindex: There are a few ways of using the :meth:`~flask_limiter.Limiter.limit` decorator depending on your preference and use-case. ---------------- Single decorator ---------------- The limit string can be a single limit or a delimiter separated string .. code-block:: python @app.route("....") @limiter.limit("100/day;10/hour;1/minute") def my_route() ... ------------------- Multiple decorators ------------------- The limit string can be a single limit or a delimiter separated string or a combination of both. .. code-block:: python @app.route("....") @limiter.limit("100/day") @limiter.limit("10/hour") @limiter.limit("1/minute") def my_route(): ... ---------------------- Custom keying function ---------------------- By default rate limits are applied based on the key function that the :class:`~flask_limiter.Limiter` instance was initialized with. You can implement your own function to retrieve the key to rate limit by when decorating individual routes. Take a look at :ref:`keyfunc-customization` for some examples.. .. code-block:: python def my_key_func(): ... @app.route("...") @limiter.limit("100/day", my_key_func) def my_route(): ... .. note:: The key function is called from within a :doc:`flask request context `. ---------------------------------- Dynamically loaded limit string(s) ---------------------------------- There may be situations where the rate limits need to be retrieved from sources external to the code (database, remote api, etc...). This can be achieved by providing a callable to the decorator. .. code-block:: python def rate_limit_from_config(): return current_app.config.get("CUSTOM_LIMIT", "10/s") @app.route("...") @limiter.limit(rate_limit_from_config) def my_route(): ... .. warning:: The provided callable will be called for every request on the decorated route. For expensive retrievals, consider caching the response. .. note:: The callable is called from within a :doc:`flask request context ` during the `before_request` phase. -------------------- Exemption conditions -------------------- Each limit can be exempted when given conditions are fulfilled. These conditions can be specified by supplying a callable as an :attr:`exempt_when` argument when defining the limit. .. code-block:: python @app.route("/expensive") @limiter.limit("100/day", exempt_when=lambda: current_user.is_admin) def expensive_route(): ... .. _ratelimit-decorator-shared-limit: Reusable limits --------------- For scenarios where a rate limit should be shared by multiple routes (For example when you want to protect routes using the same resource with an umbrella rate limit). .. automethod:: Limiter.shared_limit :noindex: ------------------ Named shared limit ------------------ .. code-block:: python mysql_limit = limiter.shared_limit("100/hour", scope="mysql") @app.route("..") @mysql_limit def r1(): ... @app.route("..") @mysql_limit def r2(): ... -------------------- Dynamic shared limit -------------------- When a callable is passed as scope, the return value of the function will be used as the scope. Note that the callable takes one argument: a string representing the request endpoint. .. code-block:: python def host_scope(endpoint_name): return request.host host_limit = limiter.shared_limit("100/hour", scope=host_scope) @app.route("..") @host_limit def r1(): ... @app.route("..") @host_limit def r2(): ... .. _ratelimit-decorator-exempt: Decorators for skipping rate limits ----------------------------------- Registering exemptions from rate limits ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automethod:: Limiter.exempt :noindex: .. _ratelimit-decorator-request-filter: Skipping a rate limit based on a request ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This decorator marks a function as a filter for requests that are going to be tested for rate limits. If any of the request filters return ``True`` no rate limiting will be performed for that request. This mechanism can be used to create custom white lists. .. automethod:: Limiter.request_filter :noindex: .. code-block:: python @limiter.request_filter def header_whitelist(): return request.headers.get("X-Internal", "") == "true" @limiter.request_filter def ip_whitelist(): return request.remote_addr == "127.0.0.1" In the above example, any request that contains the header ``X-Internal: true`` or originates from localhost will not be rate limited. For more complex use cases, refer to the :ref:`recipes:recipes` section. flask-limiter-3.12/doc/source/misc.rst000066400000000000000000000004711476516161100177460ustar00rootroot00000000000000======== Appendix ======== References ========== * `Redis rate limiting pattern #2 `_ * `DomainTools redis rate limiter `_ * `limits: python rate limiting utilities `_ .. include:: ../../CONTRIBUTIONS.rst flask-limiter-3.12/doc/source/recipes.rst000066400000000000000000000461341476516161100204530ustar00rootroot00000000000000Recipes ======= .. currentmodule:: flask_limiter .. _keyfunc-customization: Rate Limit Key Functions ------------------------- You can easily customize your rate limits to be based on any characteristic of the incoming request. Both the :class:`~Limiter` constructor and the :meth:`~Limiter.limit` decorator accept a keyword argument ``key_func`` that should return a string (or an object that has a string representation). Rate limiting a route by current user (using Flask-Login):: @route("/test") @login_required @limiter.limit("1 per day", key_func = lambda : current_user.username) def test_route(): return "42" Rate limiting all requests by country:: from flask import request, Flask import GeoIP gi = GeoIP.open("GeoLiteCity.dat", GeoIP.GEOIP_INDEX_CACHE | GeoIP.GEOIP_CHECK_CACHE) def get_request_country(): return gi.record_by_name(request.remote_addr)['region_name'] app = Flask(__name__) limiter = Limiter(get_request_country, app=app, default_limits=["10/hour"]) Custom Rate limit exceeded responses ------------------------------------ The default configuration results in a :exc:`RateLimitExceeded` exception being thrown (**which effectively halts any further processing and a response with status `429`**). The exceeded limit is added to the response and results in an response body that looks something like: .. code:: html 429 Too Many Requests

Too Many Requests

1 per 1 day

For all routes that are rate limited ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you want to configure the response you can register an error handler for the ``429`` error code in a manner similar to the following example, which returns a json response instead:: @app.errorhandler(429) def ratelimit_handler(e): return make_response( jsonify(error=f"ratelimit exceeded {e.description}") , 429 ) .. versionadded:: 2.6.0 The same effect can be achieved by using the :paramref:`~Limiter.on_breach` parameter when initializing the :class:`Limiter`. If the callback passed to this parameter returns an instance of :class:`~flask.Response` that response will be the one embedded into the :exc:`RateLimitExceeded` exception that is raised. For example:: from flask import make_response, render_template from flask_limiter import Limiter, RequestLimit def default_error_responder(request_limit: RequestLimit): return make_response( render_template("my_ratelimit_template.tmpl", request_limit=request_limit), 429 ) app = Limiter( key_func=..., default_limits=["100/minute"], on_breach=default_error_responder ) .. tip:: If you have specified both an :paramref:`~Limiter.on_breach` callback and registered a callback using the :meth:`~flask.Flask.errorhandler` decorator, the one registered for ``429`` errors will still be called and could end up ignoring the response returned by the :paramref:`~Limiter.on_breach` callback. There may be legitimate reasons to do this (for example if your application raises ``429`` errors by itself or through another middleware). This can be managed in the callback registered with :meth:`~flask.Flask.errorhandler` by checking if the incoming error has a canned response and using that instead of building a new one:: @app.errorhandler(429) def careful_ratelimit_handler(error): return error.get_response() or make_response( jsonify( error=f"ratelimit exceeded {e.description}" ), 429 ) .. note:: .. versionchanged:: 2.8.0 Any errors encountered when calling an :paramref:`~Limiter.on_breach` callback will be re-raised unless :paramref:`~Limiter.swallow_errors` is set to ``True`` For specific rate limit decorated routes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. versionadded:: 2.6.0 If the objective is to only customize rate limited error responses for certain rate limited routes this can be achieved in a similar manner as above, through the :paramref:`~Limiter.limit.on_breach` parameter of the rate limit decorator. Following the example from above where the extension was initialized with an :paramref:`~Limiter.on_breach` callback, the ``index`` route below declares it's own :paramref:`~Limiter.limiter.on_breach` callback which instead of rendering a template returns a json response (with a ``200`` status code):: app = Limiter( key_func=..., default_limits=["100/minute"], on_breach=default_error_responder ) def index_ratelimit_error_responder(request_limit: RequestLimit): return jsonify({"error": "rate_limit_exceeded"}) @app.route("/") @limiter.limit("10/minute", on_breach=index_ratelimit_error_responder) def index(): ... The above example also demonstrates the subtle implementation detail that the response from :paramref:`Limiter.limiter.on_breach` callback (if provided) will take priority over the response from the :paramref:`Limiter.on_breach` callback if there is one. Meta limits ----------- .. versionadded:: 3.5.0 Meta limits can be used for an additional layer of protection (for example against denial of service attacks) by limiting the number of times a requesting client can hit any rate limit in the application within configured time slices. These can be configured by using the :paramref:`~flask_limiter.Limiter.meta_limits` constructor argument (or the associated :data:`RATELIMIT_META` flask config attribute). Consider the following application & limiter configuration:: app = Limiter( key_func=get_remote_address, meta_limits=["2/hour", "4/day"], default_limits=["10/minute"], ) @app.route("/fast") def fast(): return "fast" @app.route("/slow") @limiter.limit("1/minute") def slow(): return "slow" The ``2/hour, 4/day`` value of :paramref:`~flask_limiter.Limiter.meta_limits` ensures that if any of the ``default_limits`` or per route limit of ``1/minute`` is exceeded more than **twice an hour** or **four times a day**, a :class:`~flask_limiter.RateLimitExceeded` exception will be raised (i.e. a ``429`` response will be returned) for any subsequent request until the ``meta_limit`` is reset. For example .. code-block:: shell $ curl localhost:5000/fast fast $ curl localhost:5000/slow slow $ curl localhost:5000/slow 429 Too Many Requests

Too Many Requests

1 per 1 minute

After a minute the ``slow`` endpoint can be accessed again once per minute .. code-block:: shell $ sleep 60 $ curl localhost:5000/slow slow $ curl localhost:5000/slow 429 Too Many Requests

Too Many Requests

1 per 1 minute

Now, even after waiting a minute both the ``slow`` and ``fast`` endpoints are rejected due to the ``2/hour`` meta limit. .. code-block:: shell $ sleep 60 $ curl localhost:5000/slow 429 Too Many Requests

Too Many Requests

2 per 1 hour

$ curl localhost:5000/fast 429 Too Many Requests

Too Many Requests

2 per 1 hour

Customizing the cost of a request --------------------------------- By default whenever a request is served a **cost** of ``1`` is charged for each rate limit that applies within the context of that request. There may be situations where a different value should be used. The :meth:`~flask_limiter.Limiter.limit` and :meth:`~flask_limiter.Limiter.shared_limit` decorators both accept a ``cost`` parameter which accepts either a static :class:`int` or a callable that returns an :class:`int`. As an example, the following configuration will result in a double penalty whenever ``Some reason`` is true :: from flask import request, current_app def my_cost_function() -> int: if .....: # Some reason return 2 return 1 @app.route("/") @limiter.limit("100/second", cost=my_cost_function) def root(): ... A similar approach can be used for both default and application level limits by providing either a cost function to the :class:`~flask_limiter.Limiter` constructor via the :paramref:`~flask_limiter.Limiter.default_limits_cost` or :paramref:`~flask_limiter.Limiter.application_limits_cost` parameters. Customizing rate limits based on response ----------------------------------------- For scenarios where the decision to count the current request towards a rate limit can only be made after the request has completed, a callable that accepts the current :class:`flask.Response` object as its argument can be provided to the :meth:`~Limiter.limit` or :meth:`~Limiter.shared_limit` decorators through the ``deduct_when`` keyword argument. A truthy response from the callable will result in a deduction from the rate limit. As an example, to only count non `200` responses towards the rate limit .. code-block:: python @app.route("..") @limiter.limit( "1/second", deduct_when=lambda response: response.status_code != 200 ) def route(): ... `deduct_when` can also be provided for default limits by providing the :paramref:`~flask_limiter.Limiter.default_limits_deduct_when` parameter to the :class:`~flask_limiter.Limiter` constructor. .. note:: All requests will be tested for the rate limit and rejected accordingly if the rate limit is already hit. The provision of the `deduct_when` argument only changes whether the request will count towards depleting the rate limit. .. _using-flask-pluggable-views: Rate limiting Class-based Views ------------------------------- If you are taking a class based approach for defining views, the recommended method (:doc:`flask:views`) of adding decorators is to add the :meth:`~Limiter.limit` decorator to :attr:`~flask.views.View.decorators` in your view subclass as shown in the example below .. code-block:: python app = Flask(__name__) limiter = Limiter(get_remote_address, app=app) class MyView(flask.views.MethodView): decorators = [limiter.limit("10/second")] def get(self): return "get" def put(self): return "put" .. note:: This approach is limited to either sharing the same rate limit for all http methods of a given :class:`flask.views.View` or applying the declared rate limit independently for each http method (to accomplish this, pass in ``True`` to the ``per_method`` keyword argument to :meth:`~Limiter.limit`). Alternatively, the limit can be restricted to only certain http methods by passing them as a list to the `methods` keyword argument. Rate limiting all routes in a :class:`~flask.Blueprint` ------------------------------------------------------- .. warning:: :class:`~flask.Blueprint` instances that are registered on another blueprint instead of on the main :class:`~flask.Flask` instance had not been considered upto :ref:`changelog:v2.3.0`. Effectively **they neither inherited** the rate limits explicitly registered on the parent :class:`~flask.Blueprint` **nor were they exempt** from rate limits if the parent had been marked exempt. (See :issue:`326`, and the :ref:`recipes:nested blueprints` section below). :meth:`~Limiter.limit`, :meth:`~Limiter.shared_limit` & :meth:`~Limiter.exempt` can all be applied to :class:`flask.Blueprint` instances as well. In the following example the ``login`` Blueprint has a special rate limit applied to all its routes, while the ``doc`` Blueprint is exempt from all rate limits. The ``regular`` Blueprint follows the default rate limits. .. code-block:: python app = Flask(__name__) login = Blueprint("login", __name__, url_prefix = "/login") regular = Blueprint("regular", __name__, url_prefix = "/regular") doc = Blueprint("doc", __name__, url_prefix = "/doc") @doc.route("/") def doc_index(): return "doc" @regular.route("/") def regular_index(): return "regular" @login.route("/") def login_index(): return "login" limiter = Limiter(get_remote_address, app=app, default_limits = ["1/second"]) limiter.limit("60/hour")(login) limiter.exempt(doc) app.register_blueprint(doc) app.register_blueprint(login) app.register_blueprint(regular) Nested Blueprints ^^^^^^^^^^^^^^^^^ .. versionadded:: 2.3.0 `Nested Blueprints `__ require some special considerations. ===================================== Exempting routes in nested Blueprints ===================================== Expanding the example from the Flask documentation:: parent = Blueprint('parent', __name__, url_prefix='/parent') child = Blueprint('child', __name__, url_prefix='/child') parent.register_blueprint(child) limiter.exempt(parent) app.register_blueprint(parent) Routes under the ``child`` blueprint **do not** automatically get exempted by default and have to be marked exempt explicitly. This behavior is to maintain backward compatibility and can be opted out of by adding :attr:`~flask_limiter.ExemptionScope.DESCENDENTS` to :paramref:`~Limiter.exempt.flags` when calling :meth:`Limiter.exempt`:: limiter.exempt( parent, flags=ExemptionScope.DEFAULT | ExemptionScope.APPLICATION | ExemptionScope.DESCENDENTS ) =========================================================== Explicitly setting limits / exemptions on nested Blueprints =========================================================== Using combinations of :paramref:`~Limiter.limit.override_defaults` parameter when explicitly declaring limits on Blueprints and the :paramref:`~Limiter.exempt.flags` parameter when exempting Blueprints with :meth:`~Limiter.exempt` the resolution of inherited and descendent limits within the scope of a Blueprint can be controlled. Here's a slightly involved example:: limiter = Limiter( ..., default_limits = ["100/hour"], application_limits = ["100/minute"] ) parent = Blueprint('parent', __name__, url_prefix='/parent') child = Blueprint('child', __name__, url_prefix='/child') grandchild = Blueprint('grandchild', __name__, url_prefix='/grandchild') health = Blueprint('health', __name__, url_prefix='/health') parent.register_blueprint(child) parent.register_blueprint(health) child.register_blueprint(grandchild) child.register_blueprint(health) grandchild.register_blueprint(health) app.register_blueprint(parent) limiter.limit("2/minute")(parent) limiter.limit("1/second", override_defaults=False)(child) limiter.limit("10/minute")(grandchild) limiter.exempt( health, flags=ExemptionScope.DEFAULT|ExemptionScope.APPLICATION|ExemptionScope.ANCESTORS ) Effectively this means: #. Routes under ``parent`` will override the application defaults and will be limited to ``2 per minute`` #. Routes under ``child`` will respect both the parent and the application defaults and effectively be limited to ``At most 1 per second, 2 per minute and 100 per hour`` #. Routes under ``grandchild`` will not inherit either the limits from `child` or `parent` or the application defaults and allow ``10 per minute`` #. All calls to ``/health/`` will be exempt from all limits (including any limits that would otherwise be inherited from the Blueprints it is nested under due to the addition of the :class:`~ExemptionScope.ANCESTORS` flag). .. note:: Only calls to `/health` will be exempt from the application wide global limit of `100/minute`. .. _logging: Logging ------- Each :class:`~Limiter` instance has a registered :class:`~logging.Logger` named ``flask-limiter`` that is by default **not** configured with a handler. This can be configured according to your needs:: import logging limiter_logger = logging.getLogger("flask-limiter") # force DEBUG logging limiter_logger.setLevel(logging.DEBUG) # restrict to only error level limiter_logger.setLevel(logging.ERROR) # Add a filter limiter_logger.addFilter(SomeFilter) # etc .. Custom error messages --------------------- :meth:`~Limiter.limit` & :meth:`~Limiter.shared_limit` can be provided with an `error_message` argument to over ride the default `n per x` error message that is returned to the calling client. The `error_message` argument can either be a simple string or a callable that returns one. .. code-block:: python app = Flask(__name__) limiter = Limiter(get_remote_address, app=app) def error_handler(): return app.config.get("DEFAULT_ERROR_MESSAGE") @app.route("/") @limiter.limit("1/second", error_message='chill!') def index(): .... @app.route("/ping") @limiter.limit("10/second", error_message=error_handler) def ping(): .... Custom rate limit headers ------------------------- Though you can get pretty far with configuring the standard headers associated with rate limiting using configuration parameters available as described under :ref:`configuration:rate-limiting headers` - this may not be sufficient for your use case. For such cases you can access the :attr:`~Limiter.current_limit` property from the :class:`~Limiter` instance from anywhere within a :doc:`request context `. As an example you could leave the built in header population disabled and add your own with an :meth:`~flask.Flask.after_request` hook:: app = Flask(__name__) limiter = Limiter(get_remote_address, app=app) @app.route("/") @limiter.limit("1/second") def index(): .... @app.after_request def add_headers(response): if limiter.current_limit: response.headers["RemainingLimit"] = limiter.current_limit.remaining response.headers["ResetAt"] = limiter.current_limit.reset_at response.headers["MaxRequests"] = limiter.current_limit.limit.amount response.headers["WindowSize"] = limiter.current_limit.limit.get_expiry() response.headers["Breached"] = limiter.current_limit.breached return response This will result in headers along the lines of:: < RemainingLimit: 0 < ResetAt: 1641691205 < MaxRequests: 1 < WindowSize: 1 < Breached: True .. _deploy-behind-proxy: Deploying an application behind a proxy --------------------------------------- If your application is behind a proxy and you are using werkzeug > 0.9+ you can use the :class:`werkzeug.middleware.proxy_fix.ProxyFix` fixer to reliably get the remote address of the user, while protecting your application against ip spoofing via headers. .. code-block:: python from flask import Flask from flask_limiter import Limiter from flask_limiter.util import get_remote_address from werkzeug.middleware.proxy_fix import ProxyFix app = Flask(__name__) # for example if the request goes through one proxy # before hitting your application server app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1) limiter = Limiter(get_remote_address, app=app) flask-limiter-3.12/doc/source/strategies.rst000066400000000000000000000045631476516161100211730ustar00rootroot00000000000000.. _ratelimit-strategy: Rate limiting strategies ======================== Flask-Limiter delegates the implementation of rate limiting strategies to the :doc:`limits:index` library. The strategy can be selected by setting the :paramref:`flask_limiter.Limiter.strategy` constructor argument or the :data:`RATELIMIT_STRATEGY` config. .. note:: For more details about the implementation of each strategy refer to the :pypi:`limits` documentation for :doc:`limits:strategies`. Fixed Window ------------ This strategy is the most memory‑efficient because it uses a single counter per resource and rate limit. When the first request arrives, a window is started for a fixed duration (e.g., for a rate limit of 10 requests per minute the window expires in 60 seconds from the first request). All requests in that window increment the counter and when the window expires, the counter resets See the :ref:`limits:strategies:fixed window` documentation in the :doc:`limits:index` library for more details. To select this strategy, set :paramref:`flask_limiter.Limiter.strategy` or :data:`RATELIMIT_STRATEGY` to ``fixed-window`` Moving Window ------------- This strategy adds each request’s timestamp to a log if the ``nth`` oldest entry (where ``n`` is the limit) is either not present or is older than the duration of the window (for example with a rate limit of ``10 requests per minute`` if there are either less than 10 entries or the 10th oldest entry is atleast 60 seconds old). Upon adding a new entry to the log "expired" entries are truncated. See the :ref:`limits:strategies:moving window` documentation in the :doc:`limits:index` library for more details. To select this strategy, set :paramref:`flask_limiter.Limiter.strategy` or :data:`RATELIMIT_STRATEGY` to ``moving-window`` Sliding Window -------------- This strategy approximates the moving window while using less memory by maintaining two counters: - **Current bucket:** counts requests in the ongoing period. - **Previous bucket:** counts requests in the immediately preceding period. A weighted sum of these counters is computed based on the elapsed time in the current bucket. See the :ref:`limits:strategies:sliding window counter` documentation in the :doc:`limits:index` library for more details. To select this strategy, set :paramref:`flask_limiter.Limiter.strategy` or :data:`RATELIMIT_STRATEGY` to ``sliding-window-counter`` flask-limiter-3.12/doc/source/theme_config.py000066400000000000000000000026601476516161100212640ustar00rootroot00000000000000colors = { "bg0": " #fbf1c7", "bg1": " #ebdbb2", "bg2": " #d5c4a1", "bg3": " #bdae93", "bg4": " #a89984", "gry": " #928374", "fg4": " #7c6f64", "fg3": " #665c54", "fg2": " #504945", "fg1": " #3c3836", "fg0": " #282828", "red": " #cc241d", "red2": " #9d0006", "orange": " #d65d0e", "orange2": " #af3a03", "yellow": " #d79921", "yellow2": " #b57614", "green": " #98971a", "green2": " #79740e", "aqua": " #689d6a", "aqua2": " #427b58", "blue": " #458588", "blue2": " #076678", "purple": " #b16286", "purple2": " #8f3f71", } html_theme = "furo" html_theme_options = { "light_css_variables": { "font-stack": "Fira Sans, sans-serif", "font-stack--monospace": "Fira Code, monospace", "color-brand-primary": colors["purple2"], "color-brand-content": colors["blue2"], }, "dark_css_variables": { "color-brand-primary": colors["purple"], "color-brand-content": colors["blue"], "color-background-primary": colors["fg1"], "color-background-secondary": colors["fg0"], "color-foreground-primary": colors["bg0"], "color-foreground-secondary": colors["bg1"], "color-highlighted-background": colors["yellow"], "color-highlight-on-target": colors["fg2"], }, } highlight_language = "python3" pygments_style = "gruvbox-light" pygments_dark_style = "gruvbox-dark" flask-limiter-3.12/docker-compose.yml000066400000000000000000000003001476516161100176400ustar00rootroot00000000000000services: memcached: image: memcached ports: - 31211:11211 redis: image: redis ports: - 46379:6379 mongodb: image: mongo ports: - '47017:27017' flask-limiter-3.12/examples/000077500000000000000000000000001476516161100160305ustar00rootroot00000000000000flask-limiter-3.12/examples/kitchensink.py000066400000000000000000000064551476516161100207260ustar00rootroot00000000000000from __future__ import annotations import os import jinja2 from flask import Blueprint, Flask, jsonify, make_response, render_template, request from flask.views import View import flask_limiter from flask_limiter import ExemptionScope, Limiter from flask_limiter.util import get_remote_address def index_error_responder(request_limit): error_template = jinja2.Environment().from_string( """

Breached rate limit of: {{request_limit.limit}}

Path: {{request.path}}

""" ) return make_response(render_template(error_template, request_limit=request_limit)) def app(): def default_limit_extra(): if request.headers.get("X-Evil"): return "100/minute" return "200/minute" def default_cost(): if request.headers.get("X-Evil"): return 2 return 1 limiter = Limiter( get_remote_address, default_limits=["20/hour", "1000/hour", default_limit_extra], default_limits_exempt_when=lambda: request.headers.get("X-Internal"), default_limits_deduct_when=lambda response: response.status_code == 200, default_limits_cost=default_cost, application_limits=["5000/hour"], meta_limits=["2/day"], headers_enabled=True, storage_uri=os.environ.get("FLASK_RATELIMIT_STORAGE_URI", "memory://"), ) app = Flask(__name__) app.config.from_prefixed_env() @app.errorhandler(429) def handle_error(e): return e.get_response() or make_response( jsonify(error="ratelimit exceeded %s" % e.description) ) @app.route("/") @limiter.limit("10/minute", on_breach=index_error_responder) def root(): """ Custom rate limit of 10/minute which overrides the default limits. The error page displayed on rate limit breached is also customized by using an `on_breach` callback to render a template """ return "42" @app.route("/version") @limiter.exempt def version(): """ Exempt from all rate limits """ return flask_limiter.__version__ health_blueprint = Blueprint("health", __name__, url_prefix="/health") @health_blueprint.route("/") def health(): return "ok" app.register_blueprint(health_blueprint) #: Exempt from default, application and ancestor rate limits (effectively all) limiter.exempt( health_blueprint, flags=ExemptionScope.DEFAULT | ExemptionScope.APPLICATION | ExemptionScope.ANCESTORS, ) class ResourceView(View): methods = ["GET", "POST"] @staticmethod def json_error_responder(request_limit): return jsonify({"limit": str(request_limit.limit)}) #: Custom rate limit of 5/second by http method type for all routes under this #: resource view. The error response is also customized by using the `on_breach` #: callback to return a json error response decorators = [ limiter.limit("5/second", per_method=True, on_breach=json_error_responder) ] def dispatch_request(self): return request.method.lower() app.add_url_rule("/resource", view_func=ResourceView.as_view("resource")) limiter.init_app(app) return app if __name__ == "__main__": app().run() flask-limiter-3.12/examples/sample.py000066400000000000000000000011401476516161100176570ustar00rootroot00000000000000from __future__ import annotations from flask import Flask from flask_limiter import Limiter from flask_limiter.util import get_remote_address app = Flask(__name__) limiter = Limiter( get_remote_address, app=app, default_limits=["200 per day", "50 per hour"], storage_uri="memory://", ) @app.route("/slow") @limiter.limit("1 per day") def slow(): return ":(" @app.route("/medium") @limiter.limit("1/second", override_defaults=False) def medium(): return ":|" @app.route("/fast") def fast(): return ":)" @app.route("/ping") @limiter.exempt def ping(): return "PONG" flask-limiter-3.12/flask_limiter/000077500000000000000000000000001476516161100170375ustar00rootroot00000000000000flask-limiter-3.12/flask_limiter/__init__.py000066400000000000000000000007531476516161100211550ustar00rootroot00000000000000"""Flask-Limiter extension for rate limiting.""" from __future__ import annotations from . import _version from .constants import ExemptionScope, HeaderNames from .errors import RateLimitExceeded from .extension import Limiter from .wrappers import RequestLimit __all__ = [ "ExemptionScope", "HeaderNames", "Limiter", "RateLimitExceeded", "RequestLimit", ] #: Aliased for backward compatibility HEADERS = HeaderNames __version__ = _version.get_versions()["version"] flask-limiter-3.12/flask_limiter/_compat.py000066400000000000000000000005731476516161100210400ustar00rootroot00000000000000from __future__ import annotations import flask from flask.ctx import RequestContext # flask.globals.request_ctx is only available in Flask >= 2.2.0 try: from flask.globals import request_ctx except ImportError: request_ctx = None def request_context() -> RequestContext: if request_ctx is None: return flask._request_ctx_stack.top return request_ctx flask-limiter-3.12/flask_limiter/_version.py000066400000000000000000000601431476516161100212410ustar00rootroot00000000000000# This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. # Generated by versioneer-0.29 # https://github.com/python-versioneer/python-versioneer """Git implementation of _version.py.""" import errno import functools import os import re import subprocess import sys from typing import Any, Callable, Dict, List, Optional, Tuple def get_keywords() -> Dict[str, str]: """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = " (grafted, HEAD, tag: 3.12)" git_full = "8b111eb5f0298e5b095272027bf3194d2c999aa8" git_date = "2025-03-14 19:18:49 -0700" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" VCS: str style: str tag_prefix: str parentdir_prefix: str versionfile_source: str verbose: bool def get_config() -> VersioneerConfig: """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440-pre" cfg.tag_prefix = "" cfg.parentdir_prefix = "flask-limiter-" cfg.versionfile_source = "flask_limiter/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command( commands: List[str], args: List[str], cwd: Optional[str] = None, verbose: bool = False, hide_stderr: bool = False, env: Optional[Dict[str, str]] = None, ) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs: Dict[str, Any] = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen( [command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs, ) break except OSError as e: if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, process.returncode return stdout, process.returncode def versions_from_parentdir( parentdir_prefix: str, root: str, verbose: bool, ) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return { "version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None, "date": None, } rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print( "Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix) ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords: Dict[str, str] = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords( keywords: Dict[str, str], tag_prefix: str, verbose: bool, ) -> Dict[str, Any]: """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r"\d", r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix) :] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r"\d", r): continue if verbose: print("picking %s" % r) return { "version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date, } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return { "version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None, } @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs( tag_prefix: str, root: str, verbose: bool, runner: Callable = run_command ) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner( GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", f"{tag_prefix}[[:digit:]]*", ], cwd=root, ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces: Dict[str, Any]) -> str: """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces: Dict[str, Any]) -> str: """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) else: rendered += ".post0.dev%d" % (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return { "version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None, } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return { "version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date"), } def get_versions() -> Dict[str, Any]: """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None, } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None, } flask-limiter-3.12/flask_limiter/commands.py000066400000000000000000000556701476516161100212270ustar00rootroot00000000000000from __future__ import annotations import itertools import time from functools import partial from typing import Any from urllib.parse import urlparse import click from flask import Flask, current_app from flask.cli import with_appcontext from limits.strategies import RateLimiter from rich.console import Console, group from rich.live import Live from rich.pretty import Pretty from rich.prompt import Confirm from rich.table import Table from rich.theme import Theme from rich.tree import Tree from typing_extensions import TypedDict from werkzeug.exceptions import MethodNotAllowed, NotFound from werkzeug.routing import Rule from flask_limiter import Limiter from flask_limiter.constants import ConfigVars, ExemptionScope, HeaderNames from flask_limiter.typing import Callable, Generator, cast from flask_limiter.util import get_qualified_name from flask_limiter.wrappers import Limit limiter_theme = Theme( { "success": "bold green", "danger": "bold red", "error": "bold red", "blueprint": "bold red", "default": "magenta", "callable": "cyan", "entity": "magenta", "exempt": "bold red", "route": "yellow", "http": "bold green", "option": "bold yellow", } ) def render_func(func: Any) -> str | Pretty: if callable(func): if func.__name__ == "": return f"[callable]({func.__module__})[/callable]" return f"[callable]{func.__module__}.{func.__name__}()[/callable]" return Pretty(func) def render_storage(ext: Limiter) -> Tree: render = Tree(ext._storage_uri or "N/A") if ext.storage: render.add(f"[entity]{ext.storage.__class__.__name__}[/entity]") render.add(f"[entity]{ext.storage.storage}[/entity]") # type: ignore render.add(Pretty(ext._storage_options or {})) health = ext.storage.check() if health: render.add("[success]OK[/success]") else: render.add("[error]Error[/error]") return render def render_strategy(strategy: RateLimiter) -> str: return f"[entity]{strategy.__class__.__name__}[/entity]" def render_limit_state( limiter: Limiter, endpoint: str, limit: Limit, key: str, method: str ) -> str: args = [key, limit.scope_for(endpoint, method)] if not limiter.storage or (limiter.storage and not limiter.storage.check()): return ": [error]Storage not available[/error]" test = limiter.limiter.test(limit.limit, *args) stats = limiter.limiter.get_window_stats(limit.limit, *args) if not test: return ( f": [error]Fail[/error] ({stats[1]} out of {limit.limit.amount} remaining)" ) else: return f": [success]Pass[/success] ({stats[1]} out of {limit.limit.amount} remaining)" def render_limit(limit: Limit, simple: bool = True) -> str: render = str(limit.limit) if simple: return render options = [] if limit.deduct_when: options.append(f"deduct_when: {render_func(limit.deduct_when)}") if limit.exempt_when: options.append(f"exempt_when: {render_func(limit.exempt_when)}") if options: render = f"{render} [option]{{{', '.join(options)}}}[/option]" return render def render_limits( app: Flask, limiter: Limiter, limits: tuple[list[Limit], ...], endpoint: str | None = None, blueprint: str | None = None, rule: Rule | None = None, exemption_scope: ExemptionScope = ExemptionScope.NONE, test: str | None = None, method: str = "GET", label: str | None = "", ) -> Tree: _label = None if rule and endpoint: _label = f"{endpoint}: {rule}" label = _label or label or "" renderable = Tree(label) entries = [] for limit in limits[0] + limits[1]: if endpoint: view_func = app.view_functions.get(endpoint, None) source = ( "blueprint" if blueprint and limit in limiter.limit_manager.blueprint_limits(app, blueprint) else ( "route" if limit in limiter.limit_manager.decorated_limits( get_qualified_name(view_func) if view_func else "" ) else "default" ) ) else: source = "default" if limit.per_method and rule and rule.methods: for method in rule.methods: rendered = render_limit(limit, False) entry = f"[{source}]{rendered} [http]({method})[/http][/{source}]" if test: entry += render_limit_state( limiter, endpoint or "", limit, test, method ) entries.append(entry) else: rendered = render_limit(limit, False) entry = f"[{source}]{rendered}[/{source}]" if test: entry += render_limit_state( limiter, endpoint or "", limit, test, method ) entries.append(entry) if not entries and exemption_scope: renderable.add("[exempt]Exempt[/exempt]") else: [renderable.add(entry) for entry in entries] return renderable def get_filtered_endpoint( app: Flask, console: Console, endpoint: str | None, path: str | None, method: str | None = None, ) -> str | None: if not (endpoint or path): return None if endpoint: if endpoint in current_app.view_functions: return endpoint else: console.print(f"[red]Error: {endpoint} not found") elif path: adapter = app.url_map.bind("dev.null") parsed = urlparse(path) try: filter_endpoint, _ = adapter.match( parsed.path, method=method, query_args=parsed.query ) return cast(str, filter_endpoint) except NotFound: console.print( f"[error]Error: {path} could not be matched to an endpoint[/error]" ) except MethodNotAllowed: assert method console.print( f"[error]Error: {method.upper()}: {path}" " could not be matched to an endpoint[/error]" ) raise SystemExit @click.group(help="Flask-Limiter maintenance & utility commmands") def cli() -> None: pass @cli.command(help="View the extension configuration") @with_appcontext def config() -> None: with current_app.test_request_context(): console = Console(theme=limiter_theme) limiters = list(current_app.extensions.get("limiter", set())) limiter = limiters and list(limiters)[0] if limiter: extension_details = Table(title="Flask-Limiter Config") extension_details.add_column("Notes") extension_details.add_column("Configuration") extension_details.add_column("Value") extension_details.add_row( "Enabled", ConfigVars.ENABLED, Pretty(limiter.enabled) ) extension_details.add_row( "Key Function", ConfigVars.KEY_FUNC, render_func(limiter._key_func) ) extension_details.add_row( "Key Prefix", ConfigVars.KEY_PREFIX, Pretty(limiter._key_prefix) ) limiter_config = Tree(ConfigVars.STRATEGY) limiter_config_values = Tree(render_strategy(limiter.limiter)) node = limiter_config.add(ConfigVars.STORAGE_URI) node.add("Instance") node.add("Backend") limiter_config.add(ConfigVars.STORAGE_OPTIONS) limiter_config.add("Status") limiter_config_values.add(render_storage(limiter)) extension_details.add_row( "Rate Limiting Config", limiter_config, limiter_config_values ) if limiter.limit_manager.application_limits: extension_details.add_row( "Application Limits", ConfigVars.APPLICATION_LIMITS, Pretty( [ render_limit(limit) for limit in limiter.limit_manager.application_limits ] ), ) extension_details.add_row( None, ConfigVars.APPLICATION_LIMITS_PER_METHOD, Pretty(limiter._application_limits_per_method), ) extension_details.add_row( None, ConfigVars.APPLICATION_LIMITS_EXEMPT_WHEN, render_func(limiter._application_limits_exempt_when), ) extension_details.add_row( None, ConfigVars.APPLICATION_LIMITS_DEDUCT_WHEN, render_func(limiter._application_limits_deduct_when), ) extension_details.add_row( None, ConfigVars.APPLICATION_LIMITS_COST, Pretty(limiter._application_limits_cost), ) else: extension_details.add_row( "ApplicationLimits Limits", ConfigVars.APPLICATION_LIMITS, Pretty([]), ) if limiter.limit_manager.default_limits: extension_details.add_row( "Default Limits", ConfigVars.DEFAULT_LIMITS, Pretty( [ render_limit(limit) for limit in limiter.limit_manager.default_limits ] ), ) extension_details.add_row( None, ConfigVars.DEFAULT_LIMITS_PER_METHOD, Pretty(limiter._default_limits_per_method), ) extension_details.add_row( None, ConfigVars.DEFAULT_LIMITS_EXEMPT_WHEN, render_func(limiter._default_limits_exempt_when), ) extension_details.add_row( None, ConfigVars.DEFAULT_LIMITS_DEDUCT_WHEN, render_func(limiter._default_limits_deduct_when), ) extension_details.add_row( None, ConfigVars.DEFAULT_LIMITS_COST, render_func(limiter._default_limits_cost), ) else: extension_details.add_row( "Default Limits", ConfigVars.DEFAULT_LIMITS, Pretty([]) ) if limiter._meta_limits: extension_details.add_row( "Meta Limits", ConfigVars.META_LIMITS, Pretty( [ render_limit(limit) for limit in itertools.chain(*limiter._meta_limits) ] ), ) if limiter._headers_enabled: header_configs = Tree(ConfigVars.HEADERS_ENABLED) header_configs.add(ConfigVars.HEADER_RESET) header_configs.add(ConfigVars.HEADER_REMAINING) header_configs.add(ConfigVars.HEADER_RETRY_AFTER) header_configs.add(ConfigVars.HEADER_RETRY_AFTER_VALUE) header_values = Tree(Pretty(limiter._headers_enabled)) header_values.add(Pretty(limiter._header_mapping[HeaderNames.RESET])) header_values.add( Pretty(limiter._header_mapping[HeaderNames.REMAINING]) ) header_values.add( Pretty(limiter._header_mapping[HeaderNames.RETRY_AFTER]) ) header_values.add(Pretty(limiter._retry_after)) extension_details.add_row( "Header configuration", header_configs, header_values, ) else: extension_details.add_row( "Header configuration", ConfigVars.HEADERS_ENABLED, Pretty(False) ) extension_details.add_row( "Fail on first breach", ConfigVars.FAIL_ON_FIRST_BREACH, Pretty(limiter._fail_on_first_breach), ) extension_details.add_row( "On breach callback", ConfigVars.ON_BREACH, render_func(limiter._on_breach), ) console.print(extension_details) else: console.print( f"No Flask-Limiter extension installed on {current_app}", style="bold red", ) @cli.command(help="Enumerate details about all routes with rate limits") @click.option("--endpoint", default=None, help="Endpoint to filter by") @click.option("--path", default=None, help="Path to filter by") @click.option("--method", default=None, help="HTTP Method to filter by") @click.option("--key", default=None, help="Test the limit") @click.option("--watch/--no-watch", default=False, help="Create a live dashboard") @with_appcontext def limits( endpoint: str | None = None, path: str | None = None, method: str = "GET", key: str | None = None, watch: bool = False, ) -> None: with current_app.test_request_context(): limiters: set[Limiter] = current_app.extensions.get("limiter", set()) limiter: Limiter | None = list(limiters)[0] if limiters else None console = Console(theme=limiter_theme) if limiter: manager = limiter.limit_manager groups: dict[str, list[Callable[..., Tree]]] = {} filter_endpoint = get_filtered_endpoint( current_app, console, endpoint, path, method ) for rule in sorted( current_app.url_map.iter_rules(filter_endpoint), key=lambda r: str(r) ): rule_endpoint = rule.endpoint if rule_endpoint == "static": continue if len(rule_endpoint.split(".")) > 1: bp_fullname = ".".join(rule_endpoint.split(".")[:-1]) groups.setdefault(bp_fullname, []).append( partial( render_limits, current_app, limiter, manager.resolve_limits( current_app, rule_endpoint, bp_fullname ), rule_endpoint, bp_fullname, rule, exemption_scope=manager.exemption_scope( current_app, rule_endpoint, bp_fullname ), method=method, test=key, ) ) else: groups.setdefault("root", []).append( partial( render_limits, current_app, limiter, manager.resolve_limits(current_app, rule_endpoint, ""), rule_endpoint, None, rule, exemption_scope=manager.exemption_scope( current_app, rule_endpoint, None ), method=method, test=key, ) ) @group() def console_renderable() -> Generator: # type: ignore if ( limiter and limiter.limit_manager.application_limits and not (endpoint or path) ): yield render_limits( current_app, limiter, (list(itertools.chain(*limiter._meta_limits)), []), test=key, method=method, label="[gold3]Meta Limits[/gold3]", ) yield render_limits( current_app, limiter, (limiter.limit_manager.application_limits, []), test=key, method=method, label="[gold3]Application Limits[/gold3]", ) for name in groups: if name == "root": group_tree = Tree(f"[gold3]{current_app.name}[/gold3]") else: group_tree = Tree(f"[blue]{name}[/blue]") [group_tree.add(renderable()) for renderable in groups[name]] yield group_tree if not watch: console.print(console_renderable()) else: # noqa with Live( console_renderable(), console=console, refresh_per_second=0.4, screen=True, ) as live: while True: try: live.update(console_renderable()) time.sleep(0.4) except KeyboardInterrupt: break else: console.print( f"No Flask-Limiter extension installed on {current_app}", style="bold red", ) @cli.command(help="Clear limits for a specific key") @click.option("--endpoint", default=None, help="Endpoint to filter by") @click.option("--path", default=None, help="Path to filter by") @click.option("--method", default=None, help="HTTP Method to filter by") @click.option("--key", default=None, required=True, help="Key to reset the limits for") @click.option("-y", is_flag=True, help="Skip prompt for confirmation") @with_appcontext def clear( key: str, endpoint: str | None = None, path: str | None = None, method: str = "GET", y: bool = False, ) -> None: with current_app.test_request_context(): limiters = list(current_app.extensions.get("limiter", set())) limiter: Limiter | None = limiters[0] if limiters else None console = Console(theme=limiter_theme) if limiter: manager = limiter.limit_manager filter_endpoint = get_filtered_endpoint( current_app, console, endpoint, path, method ) class Details(TypedDict): rule: Rule limits: tuple[list[Limit], ...] rule_limits: dict[str, Details] = {} for rule in sorted( current_app.url_map.iter_rules(filter_endpoint), key=lambda r: str(r) ): rule_endpoint = rule.endpoint if rule_endpoint == "static": continue if len(rule_endpoint.split(".")) > 1: bp_fullname = ".".join(rule_endpoint.split(".")[:-1]) rule_limits[rule_endpoint] = Details( rule=rule, limits=manager.resolve_limits( current_app, rule_endpoint, bp_fullname ), ) else: rule_limits[rule_endpoint] = Details( rule=rule, limits=manager.resolve_limits(current_app, rule_endpoint, ""), ) application_limits = None if not filter_endpoint: application_limits = limiter.limit_manager.application_limits if not y: # noqa if application_limits: console.print( render_limits( current_app, limiter, (application_limits, []), label="Application Limits", test=key, ) ) for endpoint, details in rule_limits.items(): if details["limits"]: console.print( render_limits( current_app, limiter, details["limits"], endpoint, rule=details["rule"], test=key, ) ) if y or Confirm.ask( f"Proceed with resetting limits for key: [danger]{key}[/danger]?" ): if application_limits: node = Tree("Application Limits") for limit in application_limits: limiter.limiter.clear( limit.limit, key, limit.scope_for("", method), ) node.add(f"{render_limit(limit)}: [success]Cleared[/success]") console.print(node) for endpoint, details in rule_limits.items(): if details["limits"]: node = Tree(endpoint) default, decorated = details["limits"] for limit in default + decorated: if ( limit.per_method and details["rule"] and details["rule"].methods and not method ): for rule_method in details["rule"].methods: limiter.limiter.clear( limit.limit, key, limit.scope_for(endpoint, rule_method), ) else: limiter.limiter.clear( limit.limit, key, limit.scope_for(endpoint, method), ) node.add( f"{render_limit(limit)}: [success]Cleared[/success]" ) console.print(node) else: console.print( f"No Flask-Limiter extension installed on {current_app}", style="bold red", ) if __name__ == "__main__": # noqa cli() flask-limiter-3.12/flask_limiter/constants.py000066400000000000000000000055261476516161100214350ustar00rootroot00000000000000from __future__ import annotations import enum class ConfigVars: ENABLED = "RATELIMIT_ENABLED" KEY_FUNC = "RATELIMIT_KEY_FUNC" KEY_PREFIX = "RATELIMIT_KEY_PREFIX" FAIL_ON_FIRST_BREACH = "RATELIMIT_FAIL_ON_FIRST_BREACH" ON_BREACH = "RATELIMIT_ON_BREACH_CALLBACK" SWALLOW_ERRORS = "RATELIMIT_SWALLOW_ERRORS" APPLICATION_LIMITS = "RATELIMIT_APPLICATION" APPLICATION_LIMITS_PER_METHOD = "RATELIMIT_APPLICATION_PER_METHOD" APPLICATION_LIMITS_EXEMPT_WHEN = "RATELIMIT_APPLICATION_EXEMPT_WHEN" APPLICATION_LIMITS_DEDUCT_WHEN = "RATELIMIT_APPLICATION_DEDUCT_WHEN" APPLICATION_LIMITS_COST = "RATELIMIT_APPLICATION_COST" DEFAULT_LIMITS = "RATELIMIT_DEFAULT" DEFAULT_LIMITS_PER_METHOD = "RATELIMIT_DEFAULTS_PER_METHOD" DEFAULT_LIMITS_EXEMPT_WHEN = "RATELIMIT_DEFAULTS_EXEMPT_WHEN" DEFAULT_LIMITS_DEDUCT_WHEN = "RATELIMIT_DEFAULTS_DEDUCT_WHEN" DEFAULT_LIMITS_COST = "RATELIMIT_DEFAULTS_COST" REQUEST_IDENTIFIER = "RATELIMIT_REQUEST_IDENTIFIER" STRATEGY = "RATELIMIT_STRATEGY" STORAGE_URI = "RATELIMIT_STORAGE_URI" STORAGE_OPTIONS = "RATELIMIT_STORAGE_OPTIONS" HEADERS_ENABLED = "RATELIMIT_HEADERS_ENABLED" HEADER_LIMIT = "RATELIMIT_HEADER_LIMIT" HEADER_REMAINING = "RATELIMIT_HEADER_REMAINING" HEADER_RESET = "RATELIMIT_HEADER_RESET" HEADER_RETRY_AFTER = "RATELIMIT_HEADER_RETRY_AFTER" HEADER_RETRY_AFTER_VALUE = "RATELIMIT_HEADER_RETRY_AFTER_VALUE" IN_MEMORY_FALLBACK = "RATELIMIT_IN_MEMORY_FALLBACK" IN_MEMORY_FALLBACK_ENABLED = "RATELIMIT_IN_MEMORY_FALLBACK_ENABLED" META_LIMITS = "RATELIMIT_META" ON_META_BREACH = "RATELIMIT_ON_META_BREACH_CALLBACK" class HeaderNames(enum.Enum): """ Enumeration of supported rate limit related headers to be used when configuring via :paramref:`~flask_limiter.Limiter.header_name_mapping` """ #: Timestamp at which this rate limit will be reset RESET = "X-RateLimit-Reset" #: Remaining number of requests within the current window REMAINING = "X-RateLimit-Remaining" #: Total number of allowed requests within a window LIMIT = "X-RateLimit-Limit" #: Number of seconds to retry after at RETRY_AFTER = "Retry-After" class ExemptionScope(enum.Flag): """ Flags used to configure the scope of exemption when used in conjunction with :meth:`~flask_limiter.Limiter.exempt`. """ NONE = 0 #: Exempt from application wide "global" limits APPLICATION = enum.auto() #: Exempt from default limits configured on the extension META = enum.auto() #: Exempts from meta limits DEFAULT = enum.auto() #: Exempts any nested blueprints. See :ref:`recipes:nested blueprints` DESCENDENTS = enum.auto() #: Exempt from any rate limits inherited from ancestor blueprints. #: See :ref:`recipes:nested blueprints` ANCESTORS = enum.auto() MAX_BACKEND_CHECKS = 5 flask-limiter-3.12/flask_limiter/contrib/000077500000000000000000000000001476516161100204775ustar00rootroot00000000000000flask-limiter-3.12/flask_limiter/contrib/__init__.py000066400000000000000000000000341476516161100226050ustar00rootroot00000000000000"""Contributed 'recipes'""" flask-limiter-3.12/flask_limiter/contrib/util.py000066400000000000000000000004621476516161100220300ustar00rootroot00000000000000from __future__ import annotations from flask import request def get_remote_address_cloudflare() -> str: """ :return: the ip address for the current request from the CF-Connecting-IP header (or 127.0.0.1 if none found) """ return request.headers["CF-Connecting-IP"] or "127.0.0.1" flask-limiter-3.12/flask_limiter/errors.py000066400000000000000000000020761476516161100207320ustar00rootroot00000000000000"""errors and exceptions.""" from __future__ import annotations from flask.wrappers import Response from werkzeug import exceptions from .wrappers import Limit class RateLimitExceeded(exceptions.TooManyRequests): """Exception raised when a rate limit is hit.""" def __init__(self, limit: Limit, response: Response | None = None) -> None: """ :param limit: The actual rate limit that was hit. Used to construct the default response message :param response: Optional pre constructed response. If provided it will be rendered by flask instead of the default error response of :class:`~werkzeug.exceptions.HTTPException` """ self.limit = limit self.response = response if limit.error_message: description = ( limit.error_message if not callable(limit.error_message) else limit.error_message() ) else: description = str(limit.limit) super().__init__(description=description, response=response) flask-limiter-3.12/flask_limiter/extension.py000066400000000000000000001452071476516161100214360ustar00rootroot00000000000000""" Flask-Limiter Extension """ from __future__ import annotations import dataclasses import datetime import functools import itertools import logging import time import traceback import warnings import weakref from collections import defaultdict from functools import partial, wraps from types import TracebackType from typing import overload import flask import flask.wrappers from limits.errors import ConfigurationError from limits.storage import MemoryStorage, Storage, storage_from_string from limits.strategies import STRATEGIES, RateLimiter from ordered_set import OrderedSet from werkzeug.http import http_date, parse_date from ._compat import request_context from .constants import MAX_BACKEND_CHECKS, ConfigVars, ExemptionScope, HeaderNames from .errors import RateLimitExceeded from .manager import LimitManager from .typing import ( Callable, P, R, Sequence, cast, ) from .util import get_qualified_name from .wrappers import Limit, LimitGroup, RequestLimit @dataclasses.dataclass class LimiterContext: view_rate_limit: RequestLimit | None = None view_rate_limits: list[RequestLimit] = dataclasses.field(default_factory=list) conditional_deductions: dict[Limit, list[str]] = dataclasses.field( default_factory=dict ) seen_limits: OrderedSet[Limit] = dataclasses.field(default_factory=OrderedSet) def reset(self) -> None: self.view_rate_limit = None self.view_rate_limits.clear() self.conditional_deductions.clear() self.seen_limits.clear() class Limiter: """ The :class:`Limiter` class initializes the Flask-Limiter extension. :param key_func: a callable that returns the domain to rate limit by. :param app: :class:`flask.Flask` instance to initialize the extension with. :param default_limits: a variable list of strings or callables returning strings denoting default limits to apply to all routes that are not explicitely decorated with a limit. :ref:`ratelimit-string` for more details. :param default_limits_per_method: whether default limits are applied per method, per route or as a combination of all method per route. :param default_limits_exempt_when: a function that should return True/False to decide if the default limits should be skipped :param default_limits_deduct_when: a function that receives the current :class:`flask.Response` object and returns True/False to decide if a deduction should be made from the default rate limit(s) :param default_limits_cost: The cost of a hit to the default limits as an integer or a function that takes no parameters and returns an integer (Default: ``1``). :param application_limits: a variable list of strings or callables returning strings for limits that are applied to the entire application (i.e a shared limit for all routes) :param application_limits_per_method: whether application limits are applied per method, per route or as a combination of all method per route. :param application_limits_exempt_when: a function that should return True/False to decide if the application limits should be skipped :param application_limits_deduct_when: a function that receives the current :class:`flask.Response` object and returns True/False to decide if a deduction should be made from the application rate limit(s) :param application_limits_cost: The cost of a hit to the global application limits as an integer or a function that takes no parameters and returns an integer (Default: ``1``). :param headers_enabled: whether ``X-RateLimit`` response headers are written. :param header_name_mapping: Mapping of header names to use if :paramref:`Limiter.headers_enabled` is ``True``. If no mapping is provided the default values will be used. :param strategy: the strategy to use. Refer to :ref:`ratelimit-strategy` :param storage_uri: the storage location. Refer to :data:`RATELIMIT_STORAGE_URI` :param storage_options: kwargs to pass to the storage implementation upon instantiation. :param auto_check: whether to automatically check the rate limit in the before_request chain of the application. default ``True`` :param swallow_errors: whether to swallow any errors when hitting a rate limit. An exception will still be logged. default ``False`` :param fail_on_first_breach: whether to stop processing remaining limits after the first breach. default ``True`` :param on_breach: a function that will be called when any limit in this extension is breached. If the function returns an instance of :class:`flask.Response` that will be the response embedded into the :exc:`RateLimitExceeded` exception raised. :param meta_limits: a variable list of strings or callables returning strings for limits that are used to control the upper limit of a requesting client hitting any configured rate limit. Once a meta limit is exceeded all subsequent requests will raise a :class:`~flask_limiter.RateLimitExceeded` for the duration of the meta limit window. :param on_meta_breach: a function that will be called when a meta limit in this extension is breached. If the function returns an instance of :class:`flask.Response` that will be the response embedded into the :exc:`RateLimitExceeded` exception raised. :param in_memory_fallback: a variable list of strings or callables returning strings denoting fallback limits to apply when the storage is down. :param in_memory_fallback_enabled: fall back to in memory storage when the main storage is down and inherits the original limits. default ``False`` :param retry_after: Allows configuration of how the value of the `Retry-After` header is rendered. One of `http-date` or `delta-seconds`. :param key_prefix: prefix prepended to rate limiter keys and app context global names. :param request_identifier: a callable that returns the unique identity the current request. Defaults to :attr:`flask.Request.endpoint` :param enabled: Whether the extension is enabled or not """ def __init__( self, key_func: Callable[[], str], *, app: flask.Flask | None = None, default_limits: list[str | Callable[[], str]] | None = None, default_limits_per_method: bool | None = None, default_limits_exempt_when: Callable[[], bool] | None = None, default_limits_deduct_when: None | (Callable[[flask.wrappers.Response], bool]) = None, default_limits_cost: int | Callable[[], int] | None = None, application_limits: list[str | Callable[[], str]] | None = None, application_limits_per_method: bool | None = None, application_limits_exempt_when: Callable[[], bool] | None = None, application_limits_deduct_when: None | (Callable[[flask.wrappers.Response], bool]) = None, application_limits_cost: int | Callable[[], int] | None = None, headers_enabled: bool | None = None, header_name_mapping: dict[HeaderNames, str] | None = None, strategy: str | None = None, storage_uri: str | None = None, storage_options: dict[str, str | int] | None = None, auto_check: bool = True, swallow_errors: bool | None = None, fail_on_first_breach: bool | None = None, on_breach: None | (Callable[[RequestLimit], flask.wrappers.Response | None]) = None, meta_limits: list[str | Callable[[], str]] | None = None, on_meta_breach: None | (Callable[[RequestLimit], flask.wrappers.Response | None]) = None, in_memory_fallback: list[str] | None = None, in_memory_fallback_enabled: bool | None = None, retry_after: str | None = None, key_prefix: str = "", request_identifier: Callable[..., str] | None = None, enabled: bool = True, ) -> None: self.app = app self.logger = logging.getLogger("flask-limiter") self.enabled = enabled self.initialized = False self._default_limits_per_method = default_limits_per_method self._default_limits_exempt_when = default_limits_exempt_when self._default_limits_deduct_when = default_limits_deduct_when self._default_limits_cost = default_limits_cost self._application_limits_per_method = application_limits_per_method self._application_limits_exempt_when = application_limits_exempt_when self._application_limits_deduct_when = application_limits_deduct_when self._application_limits_cost = application_limits_cost self._in_memory_fallback = [] self._in_memory_fallback_enabled = in_memory_fallback_enabled or ( in_memory_fallback and len(in_memory_fallback) > 0 ) self._route_exemptions: dict[str, ExemptionScope] = {} self._blueprint_exemptions: dict[str, ExemptionScope] = {} self._request_filters: list[Callable[[], bool]] = [] self._headers_enabled = headers_enabled self._header_mapping = header_name_mapping or {} self._retry_after = retry_after self._strategy = strategy self._storage_uri = storage_uri self._storage_options = storage_options or {} self._auto_check = auto_check self._swallow_errors = swallow_errors self._fail_on_first_breach = fail_on_first_breach self._on_breach = on_breach self._on_meta_breach = on_meta_breach self._key_func = key_func self._key_prefix = key_prefix self._request_identifier = request_identifier _default_limits = ( [ LimitGroup( limit_provider=limit, key_function=self._key_func, ) for limit in default_limits ] if default_limits else [] ) _application_limits = ( [ LimitGroup( limit_provider=limit, key_function=self._key_func, scope="global", shared=True, ) for limit in application_limits ] if application_limits else [] ) self._meta_limits = ( [ LimitGroup( limit_provider=limit, key_function=self._key_func, scope="meta", shared=True, ) for limit in meta_limits ] if meta_limits else [] ) if in_memory_fallback: for limit in in_memory_fallback: self._in_memory_fallback.append( LimitGroup( limit_provider=limit, key_function=self._key_func, ) ) self._storage: Storage | None = None self._limiter: RateLimiter | None = None self._storage_dead = False self._fallback_limiter: RateLimiter | None = None self.__check_backend_count = 0 self.__last_check_backend = time.time() self._marked_for_limiting: set[str] = set() self.logger.addHandler(logging.NullHandler()) self.limit_manager = LimitManager( application_limits=_application_limits, default_limits=_default_limits, decorated_limits={}, blueprint_limits={}, route_exemptions=self._route_exemptions, blueprint_exemptions=self._blueprint_exemptions, ) if app: self.init_app(app) def init_app(self, app: flask.Flask) -> None: """ :param app: :class:`flask.Flask` instance to rate limit. """ config = app.config self.enabled = config.setdefault(ConfigVars.ENABLED, self.enabled) if not self.enabled: return if self._default_limits_per_method is None: self._default_limits_per_method = bool( config.get(ConfigVars.DEFAULT_LIMITS_PER_METHOD, False) ) self._default_limits_exempt_when = ( self._default_limits_exempt_when or config.get(ConfigVars.DEFAULT_LIMITS_EXEMPT_WHEN) ) self._default_limits_deduct_when = ( self._default_limits_deduct_when or config.get(ConfigVars.DEFAULT_LIMITS_DEDUCT_WHEN) ) self._default_limits_cost = self._default_limits_cost or config.get( ConfigVars.DEFAULT_LIMITS_COST, 1 ) if self._swallow_errors is None: self._swallow_errors = bool(config.get(ConfigVars.SWALLOW_ERRORS, False)) if self._fail_on_first_breach is None: self._fail_on_first_breach = bool( config.get(ConfigVars.FAIL_ON_FIRST_BREACH, True) ) if self._headers_enabled is None: self._headers_enabled = bool(config.get(ConfigVars.HEADERS_ENABLED, False)) self._storage_options.update(config.get(ConfigVars.STORAGE_OPTIONS, {})) storage_uri_from_config = config.get(ConfigVars.STORAGE_URI, None) if not storage_uri_from_config: if not self._storage_uri: warnings.warn( "Using the in-memory storage for tracking rate limits as no storage " "was explicitly specified. This is not recommended for production use. " "See: https://flask-limiter.readthedocs.io#configuring-a-storage-backend " "for documentation about configuring the storage backend." ) storage_uri_from_config = "memory://" self._storage = cast( Storage, storage_from_string( self._storage_uri or storage_uri_from_config, **self._storage_options ), ) self._strategy = self._strategy or config.setdefault( ConfigVars.STRATEGY, "fixed-window" ) if self._strategy not in STRATEGIES: raise ConfigurationError( "Invalid rate limiting strategy %s" % self._strategy ) self._limiter = STRATEGIES[self._strategy](self._storage) self._header_mapping = { HeaderNames.RESET: self._header_mapping.get( HeaderNames.RESET, config.get(ConfigVars.HEADER_RESET, HeaderNames.RESET.value), ), HeaderNames.REMAINING: self._header_mapping.get( HeaderNames.REMAINING, config.get(ConfigVars.HEADER_REMAINING, HeaderNames.REMAINING.value), ), HeaderNames.LIMIT: self._header_mapping.get( HeaderNames.LIMIT, config.get(ConfigVars.HEADER_LIMIT, HeaderNames.LIMIT.value), ), HeaderNames.RETRY_AFTER: self._header_mapping.get( HeaderNames.RETRY_AFTER, config.get( ConfigVars.HEADER_RETRY_AFTER, HeaderNames.RETRY_AFTER.value ), ), } self._retry_after = self._retry_after or config.get( ConfigVars.HEADER_RETRY_AFTER_VALUE ) self._key_prefix = self._key_prefix or config.get(ConfigVars.KEY_PREFIX, "") self._request_identifier = self._request_identifier or config.get( ConfigVars.REQUEST_IDENTIFIER, lambda: flask.request.endpoint or "" ) app_limits = config.get(ConfigVars.APPLICATION_LIMITS, None) self._application_limits_cost = self._application_limits_cost or config.get( ConfigVars.APPLICATION_LIMITS_COST, 1 ) if self._application_limits_per_method is None: self._application_limits_per_method = bool( config.get(ConfigVars.APPLICATION_LIMITS_PER_METHOD, False) ) self._application_limits_exempt_when = ( self._application_limits_exempt_when or config.get(ConfigVars.APPLICATION_LIMITS_EXEMPT_WHEN) ) self._application_limits_deduct_when = ( self._application_limits_deduct_when or config.get(ConfigVars.APPLICATION_LIMITS_DEDUCT_WHEN) ) if not self.limit_manager._application_limits and app_limits: self.limit_manager.set_application_limits( [ LimitGroup( limit_provider=app_limits, key_function=self._key_func, scope="global", shared=True, per_method=self._application_limits_per_method, exempt_when=self._application_limits_exempt_when, deduct_when=self._application_limits_deduct_when, cost=self._application_limits_cost, ) ] ) else: app_limits = self.limit_manager._application_limits for group in app_limits: group.cost = self._application_limits_cost group.per_method = self._application_limits_per_method group.exempt_when = self._application_limits_exempt_when group.deduct_when = self._application_limits_deduct_when self.limit_manager.set_application_limits(app_limits) conf_limits = config.get(ConfigVars.DEFAULT_LIMITS, None) if not self.limit_manager._default_limits and conf_limits: self.limit_manager.set_default_limits( [ LimitGroup( limit_provider=conf_limits, key_function=self._key_func, per_method=self._default_limits_per_method, exempt_when=self._default_limits_exempt_when, deduct_when=self._default_limits_deduct_when, cost=self._default_limits_cost, ) ] ) else: default_limit_groups = self.limit_manager._default_limits for group in default_limit_groups: group.per_method = self._default_limits_per_method group.exempt_when = self._default_limits_exempt_when group.deduct_when = self._default_limits_deduct_when group.cost = self._default_limits_cost self.limit_manager.set_default_limits(default_limit_groups) meta_limits = config.get(ConfigVars.META_LIMITS, None) if not self._meta_limits and meta_limits: self._meta_limits = [ LimitGroup( limit_provider=meta_limits, key_function=self._key_func, scope="meta", shared=True, ) ] self._on_breach = self._on_breach or config.get(ConfigVars.ON_BREACH, None) self._on_meta_breach = self._on_meta_breach or config.get( ConfigVars.ON_META_BREACH, None ) self.__configure_fallbacks(app, self._strategy) if self not in app.extensions.setdefault("limiter", set()): if self._auto_check: app.before_request(self._check_request_limit) app.after_request(partial(Limiter.__inject_headers, self)) app.teardown_request(self.__release_context) app.extensions["limiter"].add(self) self.initialized = True @property def context(self) -> LimiterContext: """ The context is meant to exist for the lifetime of a request/response cycle per instance of the extension so as to keep track of any state used at different steps in the lifecycle (for example to pass information from the before request hook to the after_request hook) :meta private: """ ctx = request_context() if not hasattr(ctx, "_limiter_request_context"): ctx._limiter_request_context = defaultdict(LimiterContext) # type: ignore return cast( dict[Limiter, LimiterContext], ctx._limiter_request_context, # type: ignore )[self] def limit( self, limit_value: str | Callable[[], str], *, key_func: Callable[[], str] | None = None, per_method: bool = False, methods: list[str] | None = None, error_message: str | None = None, exempt_when: Callable[[], bool] | None = None, override_defaults: bool = True, deduct_when: Callable[[flask.wrappers.Response], bool] | None = None, on_breach: None | (Callable[[RequestLimit], flask.wrappers.Response | None]) = None, cost: int | Callable[[], int] = 1, scope: str | Callable[[str], str] | None = None, ) -> LimitDecorator: """ Decorator to be used for rate limiting individual routes or blueprints. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param per_method: whether the limit is sub categorized into the http method of the request. :param methods: if specified, only the methods in this list will be rate limited (default: ``None``). :param error_message: string (or callable that returns one) to override the error message used in the response. :param exempt_when: function/lambda used to decide if the rate limit should skipped. :param override_defaults: whether the decorated limit overrides the default limits (Default: ``True``). .. note:: When used with a :class:`~flask.Blueprint` the meaning of the parameter extends to any parents the blueprint instance is registered under. For more details see :ref:`recipes:nested blueprints` :param deduct_when: a function that receives the current :class:`flask.Response` object and returns True/False to decide if a deduction should be done from the rate limit :param on_breach: a function that will be called when this limit is breached. If the function returns an instance of :class:`flask.Response` that will be the response embedded into the :exc:`RateLimitExceeded` exception raised. :param cost: The cost of a hit or a function that takes no parameters and returns the cost as an integer (Default: ``1``). :param scope: a string or callable that returns a string for further categorizing the rate limiting scope. This scope is combined with the current endpoint of the request. Changes - .. versionadded:: 2.9.0 The returned object can also be used as a context manager for rate limiting a code block inside a view. For example:: @app.route("/") def route(): try: with limiter.limit("10/second"): # something expensive except RateLimitExceeded: pass """ return LimitDecorator( self, limit_value, key_func, False, scope, per_method=per_method, methods=methods, error_message=error_message, exempt_when=exempt_when, override_defaults=override_defaults, deduct_when=deduct_when, on_breach=on_breach, cost=cost, ) def shared_limit( self, limit_value: str | Callable[[], str], scope: str | Callable[[str], str], *, key_func: Callable[[], str] | None = None, per_method: bool = False, methods: list[str] | None = None, error_message: str | None = None, exempt_when: Callable[[], bool] | None = None, override_defaults: bool = True, deduct_when: Callable[[flask.wrappers.Response], bool] | None = None, on_breach: None | (Callable[[RequestLimit], flask.wrappers.Response | None]) = None, cost: int | Callable[[], int] = 1, ) -> LimitDecorator: """ decorator to be applied to multiple routes sharing the same rate limit. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param scope: a string or callable that returns a string for defining the rate limiting scope. :param key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param per_method: whether the limit is sub categorized into the http method of the request. :param methods: if specified, only the methods in this list will be rate limited (default: ``None``). :param error_message: string (or callable that returns one) to override the error message used in the response. :param function exempt_when: function/lambda used to decide if the rate limit should skipped. :param override_defaults: whether the decorated limit overrides the default limits. (default: ``True``) .. note:: When used with a :class:`~flask.Blueprint` the meaning of the parameter extends to any parents the blueprint instance is registered under. For more details see :ref:`recipes:nested blueprints` :param deduct_when: a function that receives the current :class:`flask.Response` object and returns True/False to decide if a deduction should be done from the rate limit :param on_breach: a function that will be called when this limit is breached. If the function returns an instance of :class:`flask.Response` that will be the response embedded into the :exc:`RateLimitExceeded` exception raised. :param cost: The cost of a hit or a function that takes no parameters and returns the cost as an integer (default: ``1``). """ return LimitDecorator( self, limit_value, key_func, True, scope, per_method=per_method, methods=methods, error_message=error_message, exempt_when=exempt_when, override_defaults=override_defaults, deduct_when=deduct_when, on_breach=on_breach, cost=cost, ) @overload def exempt( self, obj: flask.Blueprint, *, flags: ExemptionScope = ExemptionScope.APPLICATION | ExemptionScope.DEFAULT | ExemptionScope.META, ) -> flask.Blueprint: ... @overload def exempt( self, obj: Callable[..., R], *, flags: ExemptionScope = ExemptionScope.APPLICATION | ExemptionScope.DEFAULT | ExemptionScope.META, ) -> Callable[..., R]: ... @overload def exempt( self, *, flags: ExemptionScope = ExemptionScope.APPLICATION | ExemptionScope.DEFAULT | ExemptionScope.META, ) -> ( Callable[[Callable[P, R]], Callable[P, R]] | Callable[[flask.Blueprint], flask.Blueprint] ): ... def exempt( self, obj: Callable[..., R] | flask.Blueprint | None = None, *, flags: ExemptionScope = ExemptionScope.APPLICATION | ExemptionScope.DEFAULT | ExemptionScope.META, ) -> ( Callable[..., R] | flask.Blueprint | Callable[[Callable[P, R]], Callable[P, R]] | Callable[[flask.Blueprint], flask.Blueprint] ): """ Mark a view function or all views in a blueprint as exempt from rate limits. :param obj: view function or blueprint to mark as exempt. :param flags: Controls the scope of the exemption. By default application wide limits, defaults configured on the extension and meta limits are opted out of. Additional flags can be used to control the behavior when :paramref:`obj` is a Blueprint that is nested under another Blueprint or has other Blueprints nested under it (See :ref:`recipes:nested blueprints`) The method can be used either as a decorator without any arguments (the default flags will apply and the route will be exempt from default and application limits:: @app.route("...") @limiter.exempt def route(...): ... Specific exemption flags can be provided at decoration time:: @app.route("...") @limiter.exempt(flags=ExemptionScope.APPLICATION) def route(...): ... If an entire blueprint (i.e. all routes under it) are to be exempted the method can be called with the blueprint as the first parameter and any additional flags:: bp = Blueprint(...) limiter.exempt(bp) limiter.exempt( bp, flags=ExemptionScope.DEFAULT|ExemptionScope.APPLICATION|ExemptionScope.ANCESTORS ) """ if isinstance(obj, flask.Blueprint): self.limit_manager.add_blueprint_exemption(obj.name, flags) elif obj: self.limit_manager.add_route_exemption(get_qualified_name(obj), flags) else: return functools.partial(self.exempt, flags=flags) return obj def request_filter(self, fn: Callable[[], bool]) -> Callable[[], bool]: """ decorator to mark a function as a filter to be executed to check if the request is exempt from rate limiting. :param fn: The function will be called before evaluating any rate limits to decide whether to perform rate limit or skip it. """ self._request_filters.append(fn) return fn def __configure_fallbacks(self, app: flask.Flask, strategy: str) -> None: config = app.config fallback_enabled = config.get(ConfigVars.IN_MEMORY_FALLBACK_ENABLED, False) fallback_limits = config.get(ConfigVars.IN_MEMORY_FALLBACK, None) if not self._in_memory_fallback and fallback_limits: self._in_memory_fallback = [ LimitGroup( limit_provider=fallback_limits, key_function=self._key_func, scope=None, per_method=False, cost=1, ) ] if not self._in_memory_fallback_enabled: self._in_memory_fallback_enabled = ( fallback_enabled or len(self._in_memory_fallback) > 0 ) if self._in_memory_fallback_enabled: self._fallback_storage = MemoryStorage() self._fallback_limiter = STRATEGIES[strategy](self._fallback_storage) def __should_check_backend(self) -> bool: if self.__check_backend_count > MAX_BACKEND_CHECKS: self.__check_backend_count = 0 if time.time() - self.__last_check_backend > pow(2, self.__check_backend_count): self.__last_check_backend = time.time() self.__check_backend_count += 1 return True return False def check(self) -> None: """ Explicitly check the limits for the current request. This is only relevant if the extension was initialized with :paramref:`~flask_limiter.Limiter.auto_check` set to ``False`` :raises: RateLimitExceeded """ self._check_request_limit(in_middleware=False) def reset(self) -> None: """ resets the storage if it supports being reset """ try: self.storage.reset() self.logger.info("Storage has been reset and all limits cleared") except NotImplementedError: self.logger.warning("This storage type does not support being reset") @property def storage(self) -> Storage: """ The backend storage configured for the rate limiter """ assert self._storage return self._storage @property def limiter(self) -> RateLimiter: """ Instance of the rate limiting strategy used for performing rate limiting. """ if self._storage_dead and self._in_memory_fallback_enabled: limiter = self._fallback_limiter else: limiter = self._limiter assert limiter return limiter @property def current_limit(self) -> RequestLimit | None: """ Get details for the most relevant rate limit used in this request. In a scenario where multiple rate limits are active for a single request and none are breached, the rate limit which applies to the smallest time window will be returned. .. important:: The value of ``remaining`` in :class:`RequestLimit` is after deduction for the current request. For example:: @limit("1/second") @limit("60/minute") @limit("2/day") def route(...): ... - Request 1 at ``t=0`` (no breach): this will return the details for for ``1/second`` - Request 2 at ``t=1`` (no breach): it will still return the details for ``1/second`` - Request 3 at ``t=2`` (breach): it will return the details for ``2/day`` """ return self.context.view_rate_limit @property def current_limits(self) -> list[RequestLimit]: """ Get a list of all rate limits that were applicable and evaluated within the context of this request. The limits are returned in a sorted order by smallest window size first. """ return self.context.view_rate_limits def identify_request(self) -> str: """ Returns the identity of the request (by default this is the :attr:`flask.Request.endpoint` associated by the view function that is handling the request). The behavior can be customized by initializing the extension with a callable argument for :paramref:`~flask_limiter.Limiter.request_identifier`. """ if self.initialized and self.enabled: assert self._request_identifier return self._request_identifier() return "" def __check_conditional_deductions(self, response: flask.wrappers.Response) -> None: for lim, args in self.context.conditional_deductions.items(): if lim.deduct_when and lim.deduct_when(response): try: self.limiter.hit(lim.limit, *args, cost=lim.cost) except Exception as err: if self._swallow_errors: self.logger.exception( "Failed to deduct rate limit. Swallowing error" ) else: raise err def __inject_headers( self, response: flask.wrappers.Response ) -> flask.wrappers.Response: self.__check_conditional_deductions(response) header_limit = self.current_limit if ( self.enabled and self._headers_enabled and header_limit and self._header_mapping ): try: reset_at = header_limit.reset_at response.headers.add( self._header_mapping[HeaderNames.LIMIT], str(header_limit.limit.amount), ) response.headers.add( self._header_mapping[HeaderNames.REMAINING], str(header_limit.remaining), ) response.headers.add( self._header_mapping[HeaderNames.RESET], str(reset_at) ) # response may have an existing retry after existing_retry_after_header = response.headers.get("Retry-After") if existing_retry_after_header is not None: # might be in http-date format retry_after: float | datetime.datetime | None = parse_date( existing_retry_after_header ) # parse_date failure returns None if retry_after is None: retry_after = time.time() + int(existing_retry_after_header) if isinstance(retry_after, datetime.datetime): retry_after = time.mktime(retry_after.timetuple()) reset_at = max(int(retry_after), reset_at) # set the header instead of using add response.headers.set( self._header_mapping[HeaderNames.RETRY_AFTER], str( http_date(reset_at) if self._retry_after == "http-date" else int(reset_at - time.time()) ), ) except Exception as e: # noqa: E722 if self._in_memory_fallback_enabled and not self._storage_dead: self.logger.warning( "Rate limit storage unreachable - falling back to" " in-memory storage" ) self._storage_dead = True response = self.__inject_headers(response) else: if self._swallow_errors: self.logger.exception( "Failed to update rate limit headers. Swallowing error" ) else: raise e return response def __check_all_limits_exempt( self, endpoint: str | None, ) -> bool: return bool( not endpoint or not (self.enabled and self.initialized) or endpoint.split(".")[-1] == "static" or any(fn() for fn in self._request_filters) ) def __filter_limits( self, endpoint: str | None, blueprint: str | None, callable_name: str | None, in_middleware: bool = False, ) -> list[Limit]: if callable_name: name = callable_name else: view_func = flask.current_app.view_functions.get(endpoint or "", None) name = get_qualified_name(view_func) if view_func else "" if self.__check_all_limits_exempt(endpoint): return [] marked_for_limiting = ( name in self._marked_for_limiting or self.limit_manager.has_hints(endpoint or "") ) fallback_limits = [] if self._storage_dead and self._fallback_limiter: if in_middleware and name in self._marked_for_limiting: pass else: if ( self.__should_check_backend() and self._storage and self._storage.check() ): self.logger.info("Rate limit storage recovered") self._storage_dead = False self.__check_backend_count = 0 else: fallback_limits = list(itertools.chain(*self._in_memory_fallback)) if fallback_limits: return fallback_limits defaults, decorated = self.limit_manager.resolve_limits( flask.current_app, endpoint, blueprint, name, in_middleware, marked_for_limiting, ) limits = OrderedSet(defaults) - self.context.seen_limits self.context.seen_limits.update(defaults) return list(limits) + list(decorated) def __evaluate_limits(self, endpoint: str, limits: list[Limit]) -> None: failed_limits: list[tuple[Limit, list[str]]] = [] limit_for_header: RequestLimit | None = None view_limits: list[RequestLimit] = [] meta_limits = list(itertools.chain(*self._meta_limits)) if not ( ExemptionScope.META & self.limit_manager.exemption_scope( flask.current_app, endpoint, flask.request.blueprint ) ): for lim in meta_limits: limit_key, scope = lim.key_func(), lim.scope_for(endpoint, None) args = [limit_key, scope] if not self.limiter.test(lim.limit, *args, cost=lim.cost): breached_meta_limit = RequestLimit( self, lim.limit, args, True, lim.shared ) self.context.view_rate_limit = breached_meta_limit self.context.view_rate_limits = [breached_meta_limit] meta_breach_response = None if self._on_meta_breach: try: cb_response = self._on_meta_breach(breached_meta_limit) if isinstance(cb_response, flask.wrappers.Response): meta_breach_response = cb_response except Exception as err: # noqa if self._swallow_errors: self.logger.exception( "on_meta_breach callback failed with error %s", err ) else: raise err raise RateLimitExceeded(lim, response=meta_breach_response) for lim in sorted(limits, key=lambda x: x.limit): if lim.is_exempt or lim.method_exempt: continue limit_scope = lim.scope_for(endpoint, flask.request.method) limit_key = lim.key_func() args = [limit_key, limit_scope] kwargs = {} if not all(args): self.logger.error( f"Skipping limit: {lim.limit}. Empty value found in parameters." ) continue if self._key_prefix: args = [self._key_prefix, *args] if lim.deduct_when: self.context.conditional_deductions[lim] = args method = self.limiter.test else: method = self.limiter.hit kwargs["cost"] = lim.cost request_limit = RequestLimit(self, lim.limit, args, False, lim.shared) view_limits.append(request_limit) if not method(lim.limit, *args, **kwargs): self.logger.info( "ratelimit %s (%s) exceeded at endpoint: %s", lim.limit, limit_key, limit_scope, ) failed_limits.append((lim, args)) view_limits[-1].breached = True limit_for_header = view_limits[-1] if self._fail_on_first_breach: break if not limit_for_header and view_limits: # Pick a non shared limit over a shared one if possible # when no rate limit has been hit. This should be the best hint # for the client. explicit = [limit for limit in view_limits if not limit.shared] limit_for_header = explicit[0] if explicit else view_limits[0] self.context.view_rate_limit = limit_for_header or None self.context.view_rate_limits = view_limits on_breach_response = None for limit in failed_limits: request_limit = RequestLimit( self, limit[0].limit, limit[1], True, limit[0].shared ) for cb in dict.fromkeys([self._on_breach, limit[0].on_breach]): if cb: try: cb_response = cb(request_limit) if isinstance(cb_response, flask.wrappers.Response): on_breach_response = cb_response except Exception as err: # noqa if self._swallow_errors: self.logger.exception( "on_breach callback failed with error %s", err ) else: raise err if failed_limits: for lim in meta_limits: limit_scope = lim.scope_for(endpoint, flask.request.method) limit_key = lim.key_func() args = [limit_key, limit_scope] self.limiter.hit(lim.limit, *args) raise RateLimitExceeded( sorted(failed_limits, key=lambda x: x[0].limit)[0][0], response=on_breach_response, ) def _check_request_limit( self, callable_name: str | None = None, in_middleware: bool = True ) -> None: endpoint = self.identify_request() try: all_limits = self.__filter_limits( endpoint, flask.request.blueprint, callable_name, in_middleware, ) self.__evaluate_limits(endpoint, all_limits) except Exception as e: if isinstance(e, RateLimitExceeded): raise e if self._in_memory_fallback_enabled and not self._storage_dead: self.logger.warning( "Rate limit storage unreachable - falling back to in-memory storage" ) self._storage_dead = True self.context.seen_limits.clear() self._check_request_limit( callable_name=callable_name, in_middleware=in_middleware ) else: if self._swallow_errors: self.logger.exception("Failed to rate limit. Swallowing error") else: raise e def __release_context(self, _: BaseException | None = None) -> None: self.context.reset() class LimitDecorator: """ Wrapper used by :meth:`~flask_limiter.Limiter.limit` and :meth:`~flask_limiter.Limiter.shared_limit` when wrapping view functions or blueprints. """ def __init__( self, limiter: Limiter, limit_value: Callable[[], str] | str, key_func: Callable[[], str] | None = None, shared: bool = False, scope: Callable[[str], str] | str | None = None, per_method: bool = False, methods: Sequence[str] | None = None, error_message: str | None = None, exempt_when: Callable[[], bool] | None = None, override_defaults: bool = True, deduct_when: Callable[[flask.wrappers.Response], bool] | None = None, on_breach: None | (Callable[[RequestLimit], flask.wrappers.Response | None]) = None, cost: Callable[[], int] | int = 1, ): self.limiter: weakref.ProxyType[Limiter] = weakref.proxy(limiter) self.limit_value = limit_value self.key_func = key_func or self.limiter._key_func self.scope = scope self.per_method = per_method self.methods = tuple(methods) if methods else None self.error_message = error_message self.exempt_when = exempt_when self.override_defaults = override_defaults self.deduct_when = deduct_when self.on_breach = on_breach self.cost = cost self.is_static = not callable(self.limit_value) self.shared = shared @property def limit_group(self) -> LimitGroup: return LimitGroup( limit_provider=self.limit_value, key_function=self.key_func, scope=self.scope, per_method=self.per_method, methods=self.methods, error_message=self.error_message, exempt_when=self.exempt_when, override_defaults=self.override_defaults, deduct_when=self.deduct_when, on_breach=self.on_breach, cost=self.cost, shared=self.shared, ) def __enter__(self) -> None: tb = traceback.extract_stack(limit=2) qualified_location = f"{tb[0].filename}:{tb[0].name}:{tb[0].lineno}" # TODO: if use as a context manager becomes interesting/valuable # a less hacky approach than using the traceback and piggy backing # on the limit manager's knowledge of decorated limits might be worth it. self.limiter.limit_manager.add_decorated_limit( qualified_location, self.limit_group, override=True ) self.limiter.limit_manager.add_endpoint_hint( self.limiter.identify_request(), qualified_location ) self.limiter._check_request_limit( in_middleware=False, callable_name=qualified_location ) def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: ... @overload def __call__(self, obj: Callable[P, R]) -> Callable[P, R]: ... @overload def __call__(self, obj: flask.Blueprint) -> None: ... def __call__(self, obj: Callable[P, R] | flask.Blueprint) -> Callable[P, R] | None: if isinstance(obj, flask.Blueprint): name = obj.name else: name = get_qualified_name(obj) if isinstance(obj, flask.Blueprint): self.limiter.limit_manager.add_blueprint_limit(name, self.limit_group) return None else: self.limiter._marked_for_limiting.add(name) self.limiter.limit_manager.add_decorated_limit(name, self.limit_group) @wraps(obj) def __inner(*a: P.args, **k: P.kwargs) -> R: if ( self.limiter._auto_check and not getattr(obj, "__wrapper-limiter-instance", None) == self.limiter ): identity = self.limiter.identify_request() if identity: view_func = flask.current_app.view_functions.get(identity, None) if view_func and not get_qualified_name(view_func) == name: self.limiter.limit_manager.add_endpoint_hint(identity, name) self.limiter._check_request_limit( in_middleware=False, callable_name=name ) return cast(R, flask.current_app.ensure_sync(obj)(*a, **k)) # mark this wrapper as wrapped by a decorator from the limiter # from which the decorator was created. This ensures that stacked # decorations only trigger rate limiting from the inner most # decorator from each limiter instance (the weird need for # keeping track of the instance is to handle cases where multiple # limiter extensions are registered on the same application). setattr(__inner, "__wrapper-limiter-instance", self.limiter) return __inner flask-limiter-3.12/flask_limiter/manager.py000066400000000000000000000246671476516161100210420ustar00rootroot00000000000000from __future__ import annotations import itertools import logging from collections.abc import Iterable import flask from ordered_set import OrderedSet from .constants import ExemptionScope from .util import get_qualified_name from .wrappers import Limit, LimitGroup class LimitManager: def __init__( self, application_limits: list[LimitGroup], default_limits: list[LimitGroup], decorated_limits: dict[str, OrderedSet[LimitGroup]], blueprint_limits: dict[str, OrderedSet[LimitGroup]], route_exemptions: dict[str, ExemptionScope], blueprint_exemptions: dict[str, ExemptionScope], ) -> None: self._application_limits = application_limits self._default_limits = default_limits self._decorated_limits = decorated_limits self._blueprint_limits = blueprint_limits self._route_exemptions = route_exemptions self._blueprint_exemptions = blueprint_exemptions self._endpoint_hints: dict[str, OrderedSet[str]] = {} self._logger = logging.getLogger("flask-limiter") @property def application_limits(self) -> list[Limit]: return list(itertools.chain(*self._application_limits)) @property def default_limits(self) -> list[Limit]: return list(itertools.chain(*self._default_limits)) def set_application_limits(self, limits: list[LimitGroup]) -> None: self._application_limits = limits def set_default_limits(self, limits: list[LimitGroup]) -> None: self._default_limits = limits def add_decorated_limit( self, route: str, limit: LimitGroup | None, override: bool = False ) -> None: if limit: if not override: self._decorated_limits.setdefault(route, OrderedSet()).add(limit) else: self._decorated_limits[route] = OrderedSet([limit]) def add_blueprint_limit(self, blueprint: str, limit: LimitGroup | None) -> None: if limit: self._blueprint_limits.setdefault(blueprint, OrderedSet()).add(limit) def add_route_exemption(self, route: str, scope: ExemptionScope) -> None: self._route_exemptions[route] = scope def add_blueprint_exemption(self, blueprint: str, scope: ExemptionScope) -> None: self._blueprint_exemptions[blueprint] = scope def add_endpoint_hint(self, endpoint: str, callable: str) -> None: self._endpoint_hints.setdefault(endpoint, OrderedSet()).add(callable) def has_hints(self, endpoint: str) -> bool: return bool(self._endpoint_hints.get(endpoint)) def resolve_limits( self, app: flask.Flask, endpoint: str | None = None, blueprint: str | None = None, callable_name: str | None = None, in_middleware: bool = False, marked_for_limiting: bool = False, ) -> tuple[list[Limit], ...]: before_request_context = in_middleware and marked_for_limiting decorated_limits = [] hinted_limits = [] if endpoint: if not in_middleware: if not callable_name: view_func = app.view_functions.get(endpoint, None) name = get_qualified_name(view_func) if view_func else "" else: name = callable_name decorated_limits.extend(self.decorated_limits(name)) for hint in self._endpoint_hints.get(endpoint, OrderedSet()): hinted_limits.extend(self.decorated_limits(hint)) if blueprint: if not before_request_context and ( not decorated_limits or all(not limit.override_defaults for limit in decorated_limits) ): decorated_limits.extend(self.blueprint_limits(app, blueprint)) exemption_scope = self.exemption_scope(app, endpoint, blueprint) all_limits = ( self.application_limits if in_middleware and not (exemption_scope & ExemptionScope.APPLICATION) else [] ) # all_limits += decorated_limits explicit_limits_exempt = all(limit.method_exempt for limit in decorated_limits) # all the decorated limits explicitly declared # that they don't override the defaults - so, they should # be included. combined_defaults = all( not limit.override_defaults for limit in decorated_limits ) # previous requests to this endpoint have exercised decorated # rate limits on callables that are not view functions. check # if all of them declared that they don't override defaults # and if so include the default limits. hinted_limits_request_defaults = ( all(not limit.override_defaults for limit in hinted_limits) if hinted_limits else False ) if ( (explicit_limits_exempt or combined_defaults) and ( not (before_request_context or exemption_scope & ExemptionScope.DEFAULT) ) ) or hinted_limits_request_defaults: all_limits += self.default_limits return all_limits, decorated_limits def exemption_scope( self, app: flask.Flask, endpoint: str | None, blueprint: str | None ) -> ExemptionScope: view_func = app.view_functions.get(endpoint or "", None) name = get_qualified_name(view_func) if view_func else "" route_exemption_scope = self._route_exemptions.get(name, ExemptionScope.NONE) blueprint_instance = app.blueprints.get(blueprint) if blueprint else None if not blueprint_instance: return route_exemption_scope else: assert blueprint ( blueprint_exemption_scope, ancestor_exemption_scopes, ) = self._blueprint_exemption_scope(app, blueprint) if ( blueprint_exemption_scope & ~(ExemptionScope.DEFAULT | ExemptionScope.APPLICATION) or ancestor_exemption_scopes ): for exemption in ancestor_exemption_scopes.values(): blueprint_exemption_scope |= exemption return route_exemption_scope | blueprint_exemption_scope def decorated_limits(self, callable_name: str) -> list[Limit]: limits = [] if not self._route_exemptions.get(callable_name, ExemptionScope.NONE): if callable_name in self._decorated_limits: for group in self._decorated_limits[callable_name]: try: for limit in group: limits.append(limit) except ValueError as e: self._logger.error( f"failed to load ratelimit for function {callable_name}: {e}", ) return limits def blueprint_limits(self, app: flask.Flask, blueprint: str) -> list[Limit]: limits: list[Limit] = [] blueprint_instance = app.blueprints.get(blueprint) if blueprint else None if blueprint_instance: blueprint_name = blueprint_instance.name blueprint_ancestory = set(blueprint.split(".") if blueprint else []) self_exemption, ancestor_exemptions = self._blueprint_exemption_scope( app, blueprint ) if not ( self_exemption & ~(ExemptionScope.DEFAULT | ExemptionScope.APPLICATION) ): blueprint_self_limits = self._blueprint_limits.get( blueprint_name, OrderedSet() ) blueprint_limits: Iterable[LimitGroup] = ( itertools.chain( *( self._blueprint_limits.get(member, []) for member in blueprint_ancestory.intersection( self._blueprint_limits ).difference(ancestor_exemptions) ) ) if not ( blueprint_self_limits and all( limit.override_defaults for limit in blueprint_self_limits ) ) and not self._blueprint_exemptions.get( blueprint_name, ExemptionScope.NONE ) & ExemptionScope.ANCESTORS else blueprint_self_limits ) if blueprint_limits: for limit_group in blueprint_limits: try: limits.extend( [ Limit( limit.limit, limit.key_func, limit.scope, limit.per_method, limit.methods, limit.error_message, limit.exempt_when, limit.override_defaults, limit.deduct_when, limit.on_breach, limit.cost, limit.shared, ) for limit in limit_group ] ) except ValueError as e: self._logger.error( f"failed to load ratelimit for blueprint {blueprint_name}: {e}", ) return limits def _blueprint_exemption_scope( self, app: flask.Flask, blueprint_name: str ) -> tuple[ExemptionScope, dict[str, ExemptionScope]]: name = app.blueprints[blueprint_name].name exemption = self._blueprint_exemptions.get(name, ExemptionScope.NONE) & ~( ExemptionScope.ANCESTORS ) ancestory = set(blueprint_name.split(".")) ancestor_exemption = { k for k, f in self._blueprint_exemptions.items() if f & ExemptionScope.DESCENDENTS }.intersection(ancestory) return exemption, { k: self._blueprint_exemptions.get(k, ExemptionScope.NONE) for k in ancestor_exemption } flask-limiter-3.12/flask_limiter/py.typed000066400000000000000000000000001476516161100205240ustar00rootroot00000000000000flask-limiter-3.12/flask_limiter/typing.py000066400000000000000000000004551476516161100207270ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Generator, Sequence from typing import ( ParamSpec, TypeVar, cast, ) R = TypeVar("R") P = ParamSpec("P") __all__ = [ "Callable", "Generator", "P", "R", "Sequence", "TypeVar", "cast", ] flask-limiter-3.12/flask_limiter/util.py000066400000000000000000000017331476516161100203720ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable from typing import Any from flask import request def get_remote_address() -> str: """ :return: the ip address for the current request (or 127.0.0.1 if none found) """ return request.remote_addr or "127.0.0.1" def get_qualified_name(callable: Callable[..., Any]) -> str: """ Generate the fully qualified name of a callable for use in storing mappings of decorated functions to rate limits The __qualname__ of the callable is appended in case there is a name clash in a module due to locally scoped functions that are decorated. TODO: Ideally __qualname__ should be enough, however view functions generated by class based views do not update that and therefore would not be uniquely identifiable unless __module__ & __name__ are inspected. :meta private: """ return f"{callable.__module__}.{callable.__name__}.{callable.__qualname__}" flask-limiter-3.12/flask_limiter/version.py000066400000000000000000000000571476516161100211000ustar00rootroot00000000000000""" empty file to be updated by versioneer """ flask-limiter-3.12/flask_limiter/wrappers.py000066400000000000000000000125311476516161100212560ustar00rootroot00000000000000from __future__ import annotations import dataclasses import typing import weakref from collections.abc import Iterator from flask import request from flask.wrappers import Response from limits import RateLimitItem, parse_many from limits.strategies import RateLimiter from limits.util import WindowStats from .typing import Callable if typing.TYPE_CHECKING: from .extension import Limiter class RequestLimit: """ Provides details of a rate limit within the context of a request """ #: The instance of the rate limit limit: RateLimitItem #: The full key for the request against which the rate limit is tested key: str #: Whether the limit was breached within the context of this request breached: bool #: Whether the limit is a shared limit shared: bool def __init__( self, extension: Limiter, limit: RateLimitItem, request_args: list[str], breached: bool, shared: bool, ) -> None: self.extension: weakref.ProxyType[Limiter] = weakref.proxy(extension) self.limit = limit self.request_args = request_args self.key = limit.key_for(*request_args) self.breached = breached self.shared = shared self._window: WindowStats | None = None @property def limiter(self) -> RateLimiter: return typing.cast(RateLimiter, self.extension.limiter) @property def window(self) -> WindowStats: if not self._window: self._window = self.limiter.get_window_stats(self.limit, *self.request_args) return self._window @property def reset_at(self) -> int: """Timestamp at which the rate limit will be reset""" return int(self.window[0] + 1) @property def remaining(self) -> int: """Quantity remaining for this rate limit""" return self.window[1] @dataclasses.dataclass(eq=True, unsafe_hash=True) class Limit: """ simple wrapper to encapsulate limits and their context """ limit: RateLimitItem key_func: Callable[[], str] _scope: str | Callable[[str], str] | None per_method: bool = False methods: tuple[str, ...] | None = None error_message: str | None = None exempt_when: Callable[[], bool] | None = None override_defaults: bool | None = False deduct_when: Callable[[Response], bool] | None = None on_breach: Callable[[RequestLimit], Response | None] | None = None _cost: Callable[[], int] | int = 1 shared: bool = False def __post_init__(self) -> None: if self.methods: self.methods = tuple([k.lower() for k in self.methods]) @property def is_exempt(self) -> bool: """Check if the limit is exempt.""" if self.exempt_when: return self.exempt_when() return False @property def scope(self) -> str | None: return ( self._scope(request.endpoint or "") if callable(self._scope) else self._scope ) @property def cost(self) -> int: if isinstance(self._cost, int): return self._cost return self._cost() @property def method_exempt(self) -> bool: """Check if the limit is not applicable for this method""" return self.methods is not None and request.method.lower() not in self.methods def scope_for(self, endpoint: str, method: str | None) -> str: """ Derive final bucket (scope) for this limit given the endpoint and request method. If the limit is shared between multiple routes, the scope does not include the endpoint. """ limit_scope = self.scope if limit_scope: if self.shared: scope = limit_scope else: scope = f"{endpoint}:{limit_scope}" else: scope = endpoint if self.per_method: assert method scope += f":{method.upper()}" return scope @dataclasses.dataclass(eq=True, unsafe_hash=True) class LimitGroup: """ represents a group of related limits either from a string or a callable that returns one """ limit_provider: Callable[[], str] | str key_function: Callable[[], str] scope: str | Callable[[str], str] | None = None methods: tuple[str, ...] | None = None error_message: str | None = None exempt_when: Callable[[], bool] | None = None override_defaults: bool | None = False deduct_when: Callable[[Response], bool] | None = None on_breach: Callable[[RequestLimit], Response | None] | None = None per_method: bool = False cost: Callable[[], int] | int | None = None shared: bool = False def __iter__(self) -> Iterator[Limit]: limit_str = ( self.limit_provider() if callable(self.limit_provider) else self.limit_provider ) limit_items = parse_many(limit_str) if limit_str else [] for limit in limit_items: yield Limit( limit, self.key_function, self.scope, self.per_method, self.methods, self.error_message, self.exempt_when, self.override_defaults, self.deduct_when, self.on_breach, self.cost or 1, self.shared, ) flask-limiter-3.12/push-release.sh000077500000000000000000000002631476516161100171470ustar00rootroot00000000000000#!/bin/bash cur=$(git rev-parse --abbrev-ref HEAD) git checkout master git push origin master --tags git checkout stable git merge master git push origin stable git checkout $cur flask-limiter-3.12/pyproject.toml000066400000000000000000000007321476516161100171300ustar00rootroot00000000000000[tool.versioneer] VCS = "git" style = "pep440-pre" versionfile_source = "flask_limiter/_version.py" versionfile_build = "flask_limiter/_version.py" parentdir_prefix = "flask-limiter-" tag_prefix = "" [tool.ruff] line-length=88 indent-width = 4 exclude = ["_version.py"] [tool.ruff.format] quote-style = "double" indent-style = "space" skip-magic-trailing-comma = false line-ending = "auto" [tool.ruff.lint.isort] required-imports = ["from __future__ import annotations"] flask-limiter-3.12/pytest.ini000066400000000000000000000002611476516161100162420ustar00rootroot00000000000000[pytest] norecursedirs = build *.egg markers = unit: mark a test as a unit test. addopts = --verbose --tb=short --capture=no -rfEsxX --cov=flask_limiter flask-limiter-3.12/requirements/000077500000000000000000000000001476516161100167355ustar00rootroot00000000000000flask-limiter-3.12/requirements/ci.txt000066400000000000000000000000131476516161100200630ustar00rootroot00000000000000-r dev.txt flask-limiter-3.12/requirements/dev.txt000066400000000000000000000000521476516161100202510ustar00rootroot00000000000000-r test.txt -r docs.txt ruff keyring mypy flask-limiter-3.12/requirements/docs.txt000066400000000000000000000003471476516161100204320ustar00rootroot00000000000000-r main.txt furo==2024.8.6 Sphinx>4,<9 sphinx-autobuild==2024.10.3 sphinx-copybutton==0.5.2 sphinx-inline-tabs==2023.4.21 sphinx-issues==5.0.0 sphinxext-opengraph==0.9.1 sphinx-paramlinks==0.6.0 sphinxcontrib-programoutput==0.18 flask-limiter-3.12/requirements/main.txt000066400000000000000000000000641476516161100204220ustar00rootroot00000000000000limits>=3.13 Flask>=2 ordered-set>4,<5 rich>=12,<14 flask-limiter-3.12/requirements/test.txt000066400000000000000000000003751476516161100204620ustar00rootroot00000000000000-r main.txt # For interop / recipes Flask[async]>=2.0.0 flask-restful flask-restx asgiref>=3.2 # Storage related dependencies redis pymemcache pymongo # For the tests themselves coverage<8 hiro>0.1.6 pytest pytest-cov pytest-mock lovely-pytest-docker flask-limiter-3.12/scripts/000077500000000000000000000000001476516161100157015ustar00rootroot00000000000000flask-limiter-3.12/scripts/github_release_notes.sh000077500000000000000000000004521476516161100224330ustar00rootroot00000000000000#!/bin/bash TAG=$(echo $GITHUB_REF | cut -d / -f 3) git format-patch -1 $TAG --stdout | grep -P '^\+' | \ sed '1,4d' | \ grep -v "Release Date" | \ sed -E -e 's/^\+(.*)/\1/' -e 's/^\*(.*)/## \1/' -e 's/^ //' -e 's/\:(.*)\:(.*)/\2/' | \ sed -E -e 's/`(.*) <(https.*)>`_/[\1](\2)/' flask-limiter-3.12/setup.cfg000066400000000000000000000010331476516161100160300ustar00rootroot00000000000000[flake8] exclude = build/**,doc/**,_version.py,version.py,versioneer.py ignore = W503 max_line_length=100 [mypy] strict = True check_untyped_defs = True disallow_any_generics = True disallow_any_unimported = True disallow_incomplete_defs = True disallow_untyped_defs = True disallow_untyped_decorators = True show_error_codes = True warn_return_any = True warn_unused_ignores = True [mypy-werkzeug.*] no_implicit_reexport = False [mypy-flask_limiter._compat.*] ignore_errors = True [mypy-flask_limiter._version] ignore_errors = True flask-limiter-3.12/setup.py000077500000000000000000000026501476516161100157320ustar00rootroot00000000000000""" setup.py for Flask-Limiter """ __author__ = "Ali-Akber Saifee" __email__ = "ali@indydevs.org" __copyright__ = "Copyright 2023, Ali-Akber Saifee" import os from setuptools import find_packages, setup import versioneer this_dir = os.path.abspath(os.path.dirname(__file__)) REQUIREMENTS = filter( None, open(os.path.join(this_dir, "requirements", "main.txt")).read().splitlines() ) EXTRA_REQUIREMENTS = { "redis": ["limits[redis]"], "memcached": ["limits[memcached]"], "mongodb": ["limits[mongodb]"], "valkey": ["limits[valkey]"] } setup( name="Flask-Limiter", author=__author__, author_email=__email__, license="MIT", url="https://flask-limiter.readthedocs.org", project_urls={ "Source": "https://github.com/alisaifee/flask-limiter", }, zip_safe=False, version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), install_requires=list(REQUIREMENTS), classifiers=[k for k in open("CLASSIFIERS").read().split("\n") if k], description="Rate limiting for flask applications", long_description=open("README.rst").read(), packages=find_packages(exclude=["tests*"]), python_requires=">=3.10", extras_require=EXTRA_REQUIREMENTS, include_package_data=True, package_data={ "flask_limiter": ["py.typed"], }, entry_points={ 'flask.commands': [ 'limiter=flask_limiter.commands:cli' ], }, ) flask-limiter-3.12/tag.sh000077500000000000000000000022151476516161100153240ustar00rootroot00000000000000#!/bin/bash last_tag=$(git tag | sort -Vr | head -n 1) echo current version:$(python setup.py --version), current tag: $last_tag read -p "new version:" new_version last_portion=$(grep -P "^Changelog$" HISTORY.rst -5 | grep -P "^v\d+.\d+") changelog_file=/var/tmp/flask-limiter.newchangelog new_changelog_heading="v${new_version}" new_changelog_heading_sep=$(python -c "print('-'*len('$new_changelog_heading'))") echo $new_changelog_heading > $changelog_file echo $new_changelog_heading_sep >> $changelog_file echo "Release Date: `date +"%Y-%m-%d"`" >> $changelog_file python -c "print(open('HISTORY.rst').read().replace('$last_portion', open('$changelog_file').read() +'\n' + '$last_portion'))" > HISTORY.rst.new cp HISTORY.rst.new HISTORY.rst vim -O HISTORY.rst <(echo \# vim:filetype=git;git log $last_tag..HEAD --format='* %s (%h)%n%b' | sed -E '/^\*/! s/(.*)/ \1/g') if rst2html HISTORY.rst > /dev/null then echo "Tag $new_version" git add HISTORY.rst git commit -m "Update changelog for ${new_version}" git tag -s ${new_version} -m "Tag version ${new_version}" rm HISTORY.rst.new else echo changelog has errors. skipping tag. fi; flask-limiter-3.12/tests/000077500000000000000000000000001476516161100153545ustar00rootroot00000000000000flask-limiter-3.12/tests/__init__.py000066400000000000000000000000001476516161100174530ustar00rootroot00000000000000flask-limiter-3.12/tests/conftest.py000066400000000000000000000065601476516161100175620ustar00rootroot00000000000000from __future__ import annotations import socket import pymemcache import pymongo import pytest import redis from flask import Blueprint, Flask, request from flask.views import View from flask_limiter import ExemptionScope, Limiter from flask_limiter.util import get_remote_address def ping_socket(host, port): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect((host, port)) return True except Exception: return False @pytest.fixture def redis_connection(docker_services): docker_services.start("redis") docker_services.wait_for_service("redis", 6379, ping_socket) r = redis.from_url("redis://localhost:46379") r.flushall() return r @pytest.fixture def memcached_connection(docker_services): docker_services.start("memcached") docker_services.wait_for_service("memcached", 11211, ping_socket) return pymemcache.Client(("localhost", 31211)) @pytest.fixture def mongo_connection(docker_services): docker_services.start("mongodb") docker_services.wait_for_service("mongodb", 27017, ping_socket) return pymongo.MongoClient("mongodb://localhost:47017") @pytest.fixture def extension_factory(): def _build_app_and_extension(config={}, **limiter_args): app = Flask(__name__) for k, v in config.items(): app.config.setdefault(k, v) key_func = limiter_args.pop("key_func", get_remote_address) limiter = Limiter(key_func, app=app, **limiter_args) return app, limiter return _build_app_and_extension @pytest.fixture def kitchensink_factory(extension_factory): def _(**kwargs): def dynamic_default(): if request.headers.get("X-Evil"): return "10/minute" return "20/minute" def dynamic_default_cost(): if request.headers.get("X-Evil"): return 2 return 1 app, limiter = extension_factory( default_limits=["10/second", "1000/hour", dynamic_default], default_limits_exempt_when=lambda: request.headers.get("X-Internal"), default_limits_deduct_when=lambda response: response.status_code != 200, default_limits_cost=dynamic_default_cost, application_limits=["5000/hour"], meta_limits=["2/day"], headers_enabled=True, **kwargs, ) @app.route("/") def root(): return "42" health_blueprint = Blueprint("health", __name__, url_prefix="/health") @health_blueprint.route("/") def health(): return "ok" app.register_blueprint(health_blueprint) class ResourceView(View): methods = ["GET", "POST"] decorators = [limiter.limit("5/second", per_method=True)] def dispatch_request(self): return request.method.lower() app.add_url_rule("/resource", view_func=ResourceView.as_view("resource")) limiter.exempt( health_blueprint, flags=ExemptionScope.DEFAULT | ExemptionScope.APPLICATION | ExemptionScope.ANCESTORS, ) return app, limiter return _ @pytest.fixture(scope="session") def docker_services_project_name(): return "flask-limiter" @pytest.fixture(scope="session") def docker_compose_files(pytestconfig): return ["docker-compose.yml"] flask-limiter-3.12/tests/static/000077500000000000000000000000001476516161100166435ustar00rootroot00000000000000flask-limiter-3.12/tests/static/image.png000066400000000000000000000000001476516161100204210ustar00rootroot00000000000000flask-limiter-3.12/tests/test_blueprints.py000066400000000000000000000452541476516161100211660ustar00rootroot00000000000000from __future__ import annotations import datetime import logging import hiro from flask import Blueprint, Flask, current_app from flask_limiter import ExemptionScope, Limiter from flask_limiter.util import get_remote_address def test_blueprint(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) bp = Blueprint("main", __name__) @bp.route("/t1") def t1(): return "test" @bp.route("/t2") @limiter.limit("10 per minute") def t2(): return "test" app.register_blueprint(bp) with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 for i in range(0, 10): assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 429 def test_blueprint_static_exempt(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) bp = Blueprint("main", __name__, static_folder="static") app.register_blueprint(bp, url_prefix="/bp") with app.test_client() as cli: assert cli.get("/bp/static/image.png").status_code == 200 assert cli.get("/bp/static/image.png").status_code == 200 def test_blueprint_limit_with_route_limits(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) bp = Blueprint("main", __name__) @app.route("/") def root(): return "root" @bp.route("/t1") def t1(): return "test" @bp.route("/t2") @limiter.limit("10 per minute") def t2(): return "test" @bp.route("/t3") @limiter.limit("3 per hour", override_defaults=False) def t3(): return "test" limiter.limit("2/minute")(bp) app.register_blueprint(bp) with hiro.Timeline() as timeline: with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 for i in range(0, 10): assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 429 assert cli.get("/t3").status_code == 200 assert cli.get("/t3").status_code == 200 assert cli.get("/t3").status_code == 429 timeline.forward(datetime.timedelta(minutes=1)) assert cli.get("/t3").status_code == 200 timeline.forward(datetime.timedelta(minutes=1)) assert cli.get("/t3").status_code == 429 def test_nested_blueprint_exemption_explicit(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) parent_bp = Blueprint("parent", __name__, url_prefix="/parent") child_bp = Blueprint("child", __name__, url_prefix="/child") limiter.exempt(parent_bp) limiter.exempt(child_bp) @app.route("/") def root(): return "42" @parent_bp.route("/") def parent(): return "41" @child_bp.route("/") def child(): return "40" parent_bp.register_blueprint(child_bp) app.register_blueprint(parent_bp) with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/child/").status_code == 200 assert cli.get("/parent/child/").status_code == 200 def test_nested_blueprint_exemption_legacy(extension_factory): """ To capture legacy behavior, exempting a blueprint will not automatically exempt nested blueprints """ app, limiter = extension_factory(default_limits=["1/minute"]) parent_bp = Blueprint("parent", __name__, url_prefix="/parent") child_bp = Blueprint("child", __name__, url_prefix="/child") limiter.exempt(parent_bp) @app.route("/") def root(): return "42" @parent_bp.route("/") def parent(): return "41" @child_bp.route("/") def child(): return "40" parent_bp.register_blueprint(child_bp) app.register_blueprint(parent_bp) with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/child/").status_code == 200 assert cli.get("/parent/child/").status_code == 429 def test_nested_blueprint_exemption_nested(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) parent_bp = Blueprint("parent", __name__, url_prefix="/parent") child_bp = Blueprint("child", __name__, url_prefix="/child") limiter.exempt(parent_bp, flags=ExemptionScope.DEFAULT | ExemptionScope.DESCENDENTS) @app.route("/") def root(): return "42" @parent_bp.route("/") def parent(): return "41" @child_bp.route("/") def child(): return "40" parent_bp.register_blueprint(child_bp) app.register_blueprint(parent_bp) with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/child/").status_code == 200 assert cli.get("/parent/child/").status_code == 200 def test_nested_blueprint_exemption_ridiculous(extension_factory): app, limiter = extension_factory( default_limits=["1/minute"], application_limits=["5/day"] ) n1 = Blueprint("n1", __name__, url_prefix="/n1") n2 = Blueprint("n2", __name__, url_prefix="/n2") n1_1 = Blueprint("n1_1", __name__, url_prefix="/n1_1") n2_1 = Blueprint("n2_1", __name__, url_prefix="/n2_1") n1_1_1 = Blueprint("n1_1_1", __name__, url_prefix="/n1_1_1") n1_1_2 = Blueprint("n1_1_2", __name__, url_prefix="/n1_1_2") n2_1_1 = Blueprint("n2_1_1", __name__, url_prefix="/n2_1_1") @app.route("/") def root(): return "42" @n1.route("/") def _n1(): return "n1" @n1_1.route("/") def _n1_1(): return "n1_1" @n1_1_1.route("/") def _n1_1_1(): return "n1_1_1" @n1_1_2.route("/") def _n1_1_2(): return "n1_1_2" @n2.route("/") def _n2(): return "n2" @n2_1.route("/") def _n2_1(): return "n2_1" @n2_1_1.route("/") def _n2_1_1(): return "n2_1_1" # All routes under n1, and it's descendents are exempt for default/application limits limiter.exempt( n1, flags=ExemptionScope.DEFAULT | ExemptionScope.APPLICATION | ExemptionScope.DESCENDENTS, ) # n1 descendents are exempt from application & defaults so need their own limits limiter.limit("2/minute")(n1_1) # n1_1_1 wants to not inherit n1_1's limits and is otherwise exempt from # application and defaults due to n1's exemptions. limiter.exempt(n1_1_1, flags=ExemptionScope.ANCESTORS) # n1_1_2 will not get it's parent (n1_1) limit and sets it's own limiter.limit("3/minute")(n1_1_2) # n2 overrides the default limits but still gets the application wide limits limiter.limit("2/minute")(n2) # n2_1 wants out of defaults and application limits limiter.exempt(n2_1, flags=ExemptionScope.DEFAULT | ExemptionScope.APPLICATION) # but want its own limits limiter.limit("3/minute")(n2_1) # n2_1_1 want's out of it's parent's limits only but wants to keep application/default limits limiter.exempt(n2_1_1, flags=ExemptionScope.ANCESTORS) n1.register_blueprint(n1_1) n1_1.register_blueprint(n1_1_1) n1_1.register_blueprint(n1_1_2) n2.register_blueprint(n2_1) n2_1.register_blueprint(n2_1_1) app.register_blueprint(n1) app.register_blueprint(n2) with hiro.Timeline() as timeline: with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 # Default hit # application: exempt, default: exempt, explicit: none assert cli.get("/n1/").status_code == 200 assert cli.get("/n1/").status_code == 200 # application: exempt from n1, default exempt from n1 & overridden # by explicit: 2/minute assert cli.get("/n1/n1_1/").status_code == 200 assert cli.get("/n1/n1_1/").status_code == 200 assert cli.get("/n1/n1_1/").status_code == 429 # application: exempt from n1, default: exempt from n1, inherited: exempt, # explicit: none assert cli.get("/n1/n1_1/n1_1_1/").status_code == 200 assert cli.get("/n1/n1_1/n1_1_1/").status_code == 200 assert cli.get("/n1/n1_1/n1_1_1/").status_code == 200 # application: exempt from n1, default: exempt from n1, inherited: exempt, # explicit: 3/minute assert cli.get("/n1/n1_1/n1_1_2/").status_code == 200 assert cli.get("/n1/n1_1/n1_1_2/").status_code == 200 assert cli.get("/n1/n1_1/n1_1_2/").status_code == 200 assert cli.get("/n1/n1_1/n1_1_2/").status_code == 429 # application: active, default: exempt, explicit: 2/minute assert cli.get("/n2/").status_code == 200 assert cli.get("/n2/").status_code == 200 assert cli.get("/n2/").status_code == 429 # application: exempt, default: exempt, explicit: 3/minute therefore overriding n2 assert cli.get("/n2/n2_1/").status_code == 200 assert cli.get("/n2/n2_1/").status_code == 200 assert cli.get("/n2/n2_1/").status_code == 200 assert cli.get("/n2/n2_1/").status_code == 429 # almost there.. # application: active, default: active (1/minute), ancestors: exempt assert cli.get("/n2/n2_1/n2_1_1/").status_code == 200 assert cli.get("/n2/n2_1/n2_1_1/").status_code == 429 timeline.forward(60) assert cli.get("/n2/n2_1/n2_1_1/").status_code == 200 assert cli.get("/n2/n2_1/n2_1_1/").status_code == 429 timeline.forward(60) # application limit (5/day) gets this one. assert cli.get("/n2/n2_1/n2_1_1/").status_code == 429 # but not those exempt from application limits assert cli.get("/n1/").status_code == 200 assert cli.get("/n1/n1_1/").status_code == 200 assert cli.get("/n1/n1_1/n1_1_1/").status_code == 200 assert cli.get("/n1/n1_1/n1_1_2/").status_code == 200 # but doesn't spare the ones that didn't opt out. assert cli.get("/").status_code == 429 assert cli.get("/n2/").status_code == 429 def test_nested_blueprint_exemption_child_only(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) parent_bp = Blueprint("parent", __name__, url_prefix="/parent") child_bp = Blueprint("child", __name__, url_prefix="/child") limiter.exempt(child_bp) @app.route("/") def root(): return "42" @parent_bp.route("/") def parent(): return "41" @child_bp.route("/") def child(): return "40" parent_bp.register_blueprint(child_bp) app.register_blueprint(parent_bp) app.register_blueprint(child_bp) # weird with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/").status_code == 429 assert cli.get("/parent/child/").status_code == 200 assert cli.get("/parent/child/").status_code == 200 assert cli.get("/child/").status_code == 200 assert cli.get("/child/").status_code == 200 def test_nested_blueprint_child_explicit_limit(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) parent_bp = Blueprint("parent", __name__, url_prefix="/parent") child_bp = Blueprint("child", __name__, url_prefix="/child") limiter.limit("2/minute")(child_bp) @app.route("/") def root(): return "42" @parent_bp.route("/") def parent(): return "41" @child_bp.route("/") def child(): return "40" parent_bp.register_blueprint(child_bp) app.register_blueprint(parent_bp) with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/").status_code == 429 assert cli.get("/parent/child/").status_code == 200 assert cli.get("/parent/child/").status_code == 200 assert cli.get("/parent/child/").status_code == 429 def test_nested_blueprint_child_explicit_nested_limits(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) parent_bp = Blueprint("parent", __name__, url_prefix="/parent") child_bp = Blueprint("child", __name__, url_prefix="/child") grand_child_bp = Blueprint("grand_child", __name__, url_prefix="/grand_child") limiter.limit("3/hour")(parent_bp) limiter.limit("2/minute")(child_bp) limiter.limit("5/day", override_defaults=False)(grand_child_bp) @app.route("/") def root(): return "42" @parent_bp.route("/") def parent(): return "41" @child_bp.route("/") def child(): return "40" @grand_child_bp.route("/") def grand_child(): return "39" child_bp.register_blueprint(grand_child_bp) parent_bp.register_blueprint(child_bp) app.register_blueprint(parent_bp) with hiro.Timeline() as timeline: with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/").status_code == 200 assert cli.get("/parent/").status_code == 429 assert cli.get("/parent/child/").status_code == 200 assert cli.get("/parent/child/").status_code == 200 assert cli.get("/parent/child/").status_code == 429 timeline.forward(datetime.timedelta(minutes=1)) assert cli.get("/parent/child/").status_code == 200 # parent's limit is ignored as override_defaults is True by default assert cli.get("/parent/child/").status_code == 200 assert cli.get("/parent/child/grand_child/").status_code == 200 # global limit is ignored as parent override's default assert cli.get("/parent/child/grand_child/").status_code == 200 # child's limit is not ignored as grandchild sets override default to False assert cli.get("/parent/child/grand_child/").status_code == 429 timeline.forward(datetime.timedelta(minutes=1)) assert cli.get("/parent/child/grand_child/").status_code == 200 assert cli.get("/parent/child/grand_child/").status_code == 429 timeline.forward(datetime.timedelta(minutes=60)) assert cli.get("/parent/child/grand_child/").status_code == 200 timeline.forward(datetime.timedelta(minutes=60)) assert cli.get("/parent/child/grand_child/").status_code == 200 timeline.forward(datetime.timedelta(minutes=60)) # grand child's own limit kicks in assert cli.get("/parent/child/grand_child/").status_code == 429 def test_register_blueprint(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) bp_1 = Blueprint("bp1", __name__) bp_2 = Blueprint("bp2", __name__) bp_3 = Blueprint("bp3", __name__) bp_4 = Blueprint("bp4", __name__) @bp_1.route("/t1") def t1(): return "test" @bp_1.route("/t2") def t2(): return "test" @bp_2.route("/t3") def t3(): return "test" @bp_3.route("/t4") def t4(): return "test" @bp_4.route("/t5") def t5(): return "test" def dy_limit(): return "1/second" app.register_blueprint(bp_1) app.register_blueprint(bp_2) app.register_blueprint(bp_3) app.register_blueprint(bp_4) limiter.limit("1/second")(bp_1) limiter.exempt(bp_3) limiter.limit(dy_limit)(bp_4) with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 timeline.forward(1) assert cli.get("/t1").status_code == 200 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 429 timeline.forward(1) assert cli.get("/t2").status_code == 200 assert cli.get("/t3").status_code == 200 for i in range(0, 10): timeline.forward(1) assert cli.get("/t3").status_code == 429 for i in range(0, 10): assert cli.get("/t4").status_code == 200 assert cli.get("/t5").status_code == 200 assert cli.get("/t5").status_code == 429 def test_invalid_decorated_static_limit_blueprint(caplog): caplog.set_level(logging.INFO) app = Flask(__name__) limiter = Limiter(get_remote_address, app=app, default_limits=["1/second"]) bp = Blueprint("bp1", __name__) @bp.route("/t1") def t1(): return "42" limiter.limit("2/sec")(bp) app.register_blueprint(bp) with app.test_client() as cli: with hiro.Timeline().freeze(): assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 assert "failed to load" in caplog.records[0].msg assert "exceeded at endpoint" in caplog.records[-1].msg def test_invalid_decorated_dynamic_limits_blueprint(caplog): caplog.set_level(logging.INFO) app = Flask(__name__) app.config.setdefault("X", "2 per sec") limiter = Limiter(get_remote_address, app=app, default_limits=["1/second"]) bp = Blueprint("bp1", __name__) @bp.route("/t1") def t1(): return "42" limiter.limit(lambda: current_app.config.get("X"))(bp) app.register_blueprint(bp) with app.test_client() as cli: with hiro.Timeline().freeze(): assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 assert len(caplog.records) == 3 assert "failed to load ratelimit" in caplog.records[0].msg assert "failed to load ratelimit" in caplog.records[1].msg assert "exceeded at endpoint" in caplog.records[2].msg flask-limiter-3.12/tests/test_commands.py000066400000000000000000000106271476516161100205740ustar00rootroot00000000000000from __future__ import annotations import os import re import pytest from flask import Flask from flask_limiter.commands import cli @pytest.fixture(autouse=True) def set_env(): os.environ["NO_COLOR"] = "True" def test_no_limiter(kitchensink_factory): app = Flask(__name__) runner = app.test_cli_runner() result = runner.invoke(cli, ["config"]) assert "No Flask-Limiter extension installed" in result.output result = runner.invoke(cli, ["limits"]) assert "No Flask-Limiter extension installed" in result.output def test_config(kitchensink_factory): app, limiter = kitchensink_factory() runner = app.test_cli_runner() result = runner.invoke(cli, ["config"]) assert re.compile("Enabled.*True").search(result.output) def test_no_config(extension_factory): app, limiter = extension_factory() runner = app.test_cli_runner() result = runner.invoke(cli, ["config"]) assert re.compile("Enabled.*True").search(result.output) def test_limits(kitchensink_factory): app, limiter = kitchensink_factory() runner = app.test_cli_runner() result = runner.invoke(cli, ["limits"]) assert "5000 per 1 hour" in result.output assert re.compile(r"health.health: /health/\n\s*└── Exempt", re.MULTILINE).search( result.output ) def test_limits_filter_endpoint(kitchensink_factory): app, limiter = kitchensink_factory() runner = app.test_cli_runner() result = runner.invoke(cli, ["limits", "--endpoint=root"]) assert "root: /" in result.output result = runner.invoke(cli, ["limits", "--endpoint=groot"]) assert "groot not found" in result.output def test_limits_filter_path(kitchensink_factory): app, limiter = kitchensink_factory() runner = app.test_cli_runner() result = runner.invoke(cli, ["limits", "--path=/"]) assert "root: /" in result.output result = runner.invoke(cli, ["limits", "--path=/", "--method=POST"]) assert "POST: / could not be matched" in result.output result = runner.invoke(cli, ["limits", "--path=/groot"]) assert "groot could not be matched" in result.output def test_limits_with_test(kitchensink_factory, mocker): app, limiter = kitchensink_factory() runner = app.test_cli_runner() mt = mocker.spy(limiter.limiter, "test") mw = mocker.spy(limiter.limiter, "get_window_stats") result = runner.invoke(cli, ["limits", "--key=127.0.0.1"]) assert "5000 per 1 hour: Pass (5000 out of 5000 remaining)" in result.output mt.side_effect = lambda *a: False mw.side_effect = lambda *a: (0, 0) result = runner.invoke(cli, ["limits", "--key=127.0.0.1"]) assert "5000 per 1 hour: Fail (0 out of 5000 remaining)" in result.output assert re.compile(r"health.health: /health/\n\s*└── Exempt", re.MULTILINE).search( result.output ) def test_limits_with_test_storage_down(kitchensink_factory, mocker): app, limiter = kitchensink_factory() ms = mocker.spy(list(app.extensions.get("limiter"))[0].storage, "check") ms.side_effect = lambda: False runner = app.test_cli_runner() result = runner.invoke(cli, ["limits", "--key=127.0.0.1"]) assert "Storage not available" in result.output result = runner.invoke(cli, ["config"]) assert re.compile("└── Status.*└── Error").search(result.output) def test_clear_limits_no_extension(): app = Flask(__name__) runner = app.test_cli_runner() result = runner.invoke(cli, ["clear", "--key=127.0.0.1", "-y"]) assert "No Flask-Limiter extension installed" in result.output def test_clear_limits(kitchensink_factory, redis_connection): app, limiter = kitchensink_factory(storage_uri="redis://localhost:46379") runner = app.test_cli_runner() with app.test_client() as client: [client.get("/") for _ in range(5)] [client.get("/resource") for _ in range(5)] [client.post("/resource") for _ in range(5)] result = runner.invoke(cli, ["limits", "--key=127.0.0.1"]) assert "Fail (0 out of 5 remaining)" in result.output result = runner.invoke(cli, ["clear", "--key=127.0.0.1", "-y"]) assert "5000 per 1 hour: Cleared" in result.output assert "5 per 1 second: Cleared" in result.output result = runner.invoke(cli, ["clear", "--key=127.0.0.1", "--endpoint=root", "-y"]) assert "5000 per 1 hour: Cleared" not in result.output assert "5 per 1 second: Cleared" not in result.output assert "10 per 1 second: Cleared" in result.output flask-limiter-3.12/tests/test_configuration.py000066400000000000000000000104541476516161100216400ustar00rootroot00000000000000from __future__ import annotations import math import time import hiro import pytest from flask import Flask from limits.errors import ConfigurationError from limits.storage import MemoryStorage from limits.strategies import MovingWindowRateLimiter from flask_limiter import HeaderNames from flask_limiter.constants import ConfigVars from flask_limiter.extension import Limiter from flask_limiter.util import get_remote_address def test_invalid_strategy(): app = Flask(__name__) app.config.setdefault(ConfigVars.STRATEGY, "fubar") with pytest.raises(ConfigurationError): Limiter(get_remote_address, app=app) def test_invalid_storage_string(): app = Flask(__name__) app.config.setdefault(ConfigVars.STORAGE_URI, "fubar://localhost:1234") with pytest.raises(ConfigurationError): Limiter(get_remote_address, app=app) def test_constructor_arguments_over_config(redis_connection): app = Flask(__name__) app.config.setdefault(ConfigVars.STRATEGY, "fixed-window-elastic-expiry") limiter = Limiter(get_remote_address, strategy="moving-window") limiter.init_app(app) app.config.setdefault(ConfigVars.STORAGE_URI, "redis://localhost:46379") app.config.setdefault(ConfigVars.APPLICATION_LIMITS, "1/minute") app.config.setdefault(ConfigVars.META_LIMITS, "1/hour") assert type(limiter._limiter) is MovingWindowRateLimiter limiter = Limiter(get_remote_address, storage_uri="memory://") limiter.init_app(app) assert type(limiter._storage) is MemoryStorage @app.route("/") def root(): return "root" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 timeline.forward(60) assert cli.get("/").status_code == 429 def test_header_names_config(): app = Flask(__name__) app.config.setdefault(ConfigVars.HEADER_LIMIT, "XX-Limit") app.config.setdefault(ConfigVars.HEADER_REMAINING, "XX-Remaining") app.config.setdefault(ConfigVars.HEADER_RESET, "XX-Reset") limiter = Limiter( get_remote_address, headers_enabled=True, default_limits=["1/second"] ) limiter.init_app(app) @app.route("/") def root(): return "42" with app.test_client() as client: resp = client.get("/") assert resp.headers["XX-Limit"] == "1" assert resp.headers["XX-Remaining"] == "0" assert resp.headers["XX-Reset"] == str(math.ceil(time.time() + 1)) def test_header_names_constructor(): app = Flask(__name__) limiter = Limiter( get_remote_address, headers_enabled=True, default_limits=["1/second"], header_name_mapping={ HeaderNames.LIMIT: "XX-Limit", HeaderNames.REMAINING: "XX-Remaining", HeaderNames.RESET: "XX-Reset", }, ) limiter.init_app(app) @app.route("/") def root(): return "42" with app.test_client() as client: resp = client.get("/") assert resp.headers["XX-Limit"] == "1" assert resp.headers["XX-Remaining"] == "0" assert resp.headers["XX-Reset"] == str(math.ceil(time.time() + 1)) def test_invalid_config_with_disabled(): app = Flask(__name__) app.config.setdefault(ConfigVars.ENABLED, False) app.config.setdefault(ConfigVars.STORAGE_URI, "fubar://") limiter = Limiter(get_remote_address, app=app, default_limits=["1/hour"]) @app.route("/") def root(): return "root" @app.route("/explicit") @limiter.limit("2/hour") def explicit(): return "explicit" with app.test_client() as client: assert client.get("/").status_code == 200 assert client.get("/").status_code == 200 assert client.get("/explicit").status_code == 200 assert client.get("/explicit").status_code == 200 assert client.get("/explicit").status_code == 200 def test_uninitialized_limiter(): app = Flask(__name__) limiter = Limiter(get_remote_address, default_limits=["1/hour"]) @app.route("/") @limiter.limit("2/hour") def root(): return "root" with app.test_client() as client: assert client.get("/").status_code == 200 assert client.get("/").status_code == 200 assert client.get("/").status_code == 200 flask-limiter-3.12/tests/test_context_manager.py000066400000000000000000000033441476516161100221470ustar00rootroot00000000000000from __future__ import annotations import hiro from flask_limiter import RateLimitExceeded def test_static_limit(extension_factory): app, limiter = extension_factory() @app.route("/t1") def t1(): with limiter.limit("1/second"): resp = "ok" try: with limiter.limit("1/day"): resp += "maybe" except RateLimitExceeded: pass return resp with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: response = cli.get("/t1") assert 200 == response.status_code assert "okmaybe" == response.text assert 429 == cli.get("/t1").status_code timeline.forward(1) response = cli.get("/t1") assert 200 == response.status_code assert "ok" == response.text def test_dynamic_limits(extension_factory): app, limiter = extension_factory() @app.route("/t1") def t1(): with limiter.limit(lambda: "1/second"): return "test" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 429 == cli.get("/t1").status_code def test_scoped_context_manager(extension_factory): app, limiter = extension_factory() @app.route("/t1/") def t1(param: int): with limiter.limit("1/second", scope=param): return "p1" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1/1").status_code assert 429 == cli.get("/t1/1").status_code assert 200 == cli.get("/t1/2").status_code assert 429 == cli.get("/t1/2").status_code flask-limiter-3.12/tests/test_decorators.py000066400000000000000000000731141476516161100211400ustar00rootroot00000000000000from __future__ import annotations import asyncio import logging from functools import wraps from unittest import mock import hiro from flask import Blueprint, Flask, current_app, g, make_response, request from werkzeug.exceptions import BadRequest from flask_limiter import ExemptionScope, Limiter from flask_limiter.util import get_remote_address def get_ip_from_header(): return request.headers.get("Test-IP") or "127.0.0.1" def test_multiple_decorators(extension_factory): app, limiter = extension_factory(key_func=get_ip_from_header) @app.route("/t1") @limiter.limit( "100 per minute", key_func=lambda: "test" ) # effectively becomes a limit for all users @limiter.limit("50/minute") # per ip as per default key_func def t1(): return "test" with hiro.Timeline().freeze(): with app.test_client() as cli: for i in range(0, 100): assert (200 if i < 50 else 429) == cli.get( "/t1", headers={"Test-IP": "127.0.0.2"} ).status_code for i in range(50): assert 200 == cli.get("/t1").status_code assert 429 == cli.get("/t1").status_code assert 429 == cli.get("/t1", headers={"Test-IP": "127.0.0.3"}).status_code def test_exempt_routes(extension_factory): app, limiter = extension_factory( default_limits=["1/minute"], application_limits=["2/minute"] ) @app.route("/t1") def t1(): return "test" @app.route("/t2") @limiter.exempt def t2(): return "test" @app.route("/t3") @limiter.exempt(flags=ExemptionScope.APPLICATION) def t3(): return "test" @app.route("/t4") def t4(): return "test" with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 # exempt from default + application assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 200 # exempt from application assert cli.get("/t3").status_code == 200 assert cli.get("/t3").status_code == 429 # 2/minute for application is now taken up assert cli.get("/t4").status_code == 429 def test_decorated_limit_with_scope(extension_factory): app, limiter = extension_factory() @app.route("/t/") @limiter.limit("1/second", scope=lambda _: request.view_args["path"]) def t(path): return "test" with hiro.Timeline(): with app.test_client() as cli: assert cli.get("/t/1").status_code == 200 assert cli.get("/t/1").status_code == 429 assert cli.get("/t/2").status_code == 200 assert cli.get("/t/2").status_code == 429 def test_decorated_limit_with_conditional_deduction(extension_factory): app, limiter = extension_factory() @app.route("/t/") @limiter.limit("1/second", deduct_when=lambda resp: resp.status_code == 200) @limiter.limit("1/minute", deduct_when=lambda resp: resp.status_code == 400) def t(path): if path == "1": return "test" raise BadRequest() with hiro.Timeline() as timeline: with app.test_client() as cli: assert cli.get("/t/1").status_code == 200 assert cli.get("/t/1").status_code == 429 timeline.forward(1) assert cli.get("/t/2").status_code == 400 timeline.forward(1) assert cli.get("/t/1").status_code == 429 assert cli.get("/t/2").status_code == 429 timeline.forward(60) assert cli.get("/t/1").status_code == 200 def test_shared_limit_with_conditional_deduction(extension_factory): app, limiter = extension_factory() bp = Blueprint("main", __name__) limit = limiter.shared_limit( "2/minute", "not_found", deduct_when=lambda response: response.status_code == 400, ) @app.route("/test/") @limit def app_test(path): if path != "1": raise BadRequest() return path @bp.route("/test/") def bp_test(path): if path != "1": raise BadRequest() return path limit(bp) app.register_blueprint(bp, url_prefix="/bp") with hiro.Timeline() as timeline: with app.test_client() as cli: assert cli.get("/bp/test/1").status_code == 200 assert cli.get("/bp/test/1").status_code == 200 assert cli.get("/test/1").status_code == 200 assert cli.get("/bp/test/2").status_code == 400 assert cli.get("/test/2").status_code == 400 assert cli.get("/bp/test/2").status_code == 429 assert cli.get("/bp/test/1").status_code == 429 assert cli.get("/test/1").status_code == 429 assert cli.get("/test/2").status_code == 429 timeline.forward(60) assert cli.get("/bp/test/1").status_code == 200 assert cli.get("/test/1").status_code == 200 def test_header_ordering_with_conditional_deductions(extension_factory): app, limiter = extension_factory(default_limits=["3/second"], headers_enabled=True) @app.route("/test_combined/") @limiter.limit( "1/hour", override_defaults=False, deduct_when=lambda response: response.status_code != 200, ) @limiter.limit( "4/minute", override_defaults=False, deduct_when=lambda response: response.status_code == 200, ) def app_test_combined(path): if path != "1": raise BadRequest() return path @app.route("/test/") @limiter.limit("2/hour", deduct_when=lambda response: response.status_code != 200) def app_test(path): if path != "1": raise BadRequest() return path with hiro.Timeline() as timeline: with app.test_client() as cli: assert cli.get("/test_combined/1").status_code == 200 resp = cli.get("/test_combined/1") assert resp.status_code == 200 assert resp.headers.get("X-RateLimit-Limit") == "3" assert resp.headers.get("X-RateLimit-Remaining") == "1" assert cli.get("/test_combined/2").status_code == 400 resp = cli.get("/test/1") assert resp.headers.get("X-RateLimit-Limit") == "2" assert resp.headers.get("X-RateLimit-Remaining") == "2" resp = cli.get("/test/2") assert resp.headers.get("X-RateLimit-Limit") == "2" assert resp.headers.get("X-RateLimit-Remaining") == "1" timeline.forward(1) resp = cli.get("/test_combined/1") assert resp.status_code == 429 assert resp.headers.get("X-RateLimit-Limit") == "1" assert resp.headers.get("X-RateLimit-Remaining") == "0" assert cli.get("/test_combined/2").status_code == 429 timeline.forward(60) assert cli.get("/test_combined/1").status_code == 429 assert cli.get("/test_combined/2").status_code == 429 timeline.forward(3600) assert cli.get("/test_combined/1").status_code == 200 def test_decorated_limits_with_combined_defaults(extension_factory): app, limiter = extension_factory(default_limits=["2/minute"]) @app.route("/") @limiter.limit("1/second", override_defaults=False) def root(): return "root" with hiro.Timeline() as timeline: with app.test_client() as cli: assert 200 == cli.get("/").status_code assert 429 == cli.get("/").status_code timeline.forward(60) assert 200 == cli.get("/").status_code timeline.forward(1) assert 200 == cli.get("/").status_code timeline.forward(1) assert 429 == cli.get("/").status_code def test_decorated_limit_with_combined_defaults_per_method(extension_factory): app, limiter = extension_factory( default_limits=["2/minute"], default_limits_per_method=True ) @app.route("/", methods=["GET", "PUT"]) @limiter.limit("1/second", override_defaults=False, methods=["GET"]) def root(): return "root" with hiro.Timeline() as timeline: with app.test_client() as cli: assert 200 == cli.get("/").status_code assert 429 == cli.get("/").status_code assert 200 == cli.put("/").status_code assert 200 == cli.put("/").status_code assert 429 == cli.put("/").status_code timeline.forward(60) assert 200 == cli.get("/").status_code assert 200 == cli.put("/").status_code timeline.forward(1) assert 200 == cli.get("/").status_code assert 200 == cli.put("/").status_code timeline.forward(1) assert 429 == cli.get("/").status_code assert 429 == cli.put("/").status_code def test_decorated_dynamic_limits(extension_factory): app, limiter = extension_factory({"X": "2 per second"}, default_limits=["1/second"]) def request_context_limit(): limits = {"127.0.0.1": "10 per minute", "127.0.0.2": "1 per minute"} remote_addr = request.headers.get("Test-IP").split(",")[0] or "127.0.0.1" limit = limits.setdefault(remote_addr, "1 per minute") return limit @app.route("/t1") @limiter.limit("20/day") @limiter.limit(lambda: current_app.config.get("X")) @limiter.limit(request_context_limit) def t1(): return "42" @app.route("/t2") @limiter.limit(lambda: current_app.config.get("X")) def t2(): return "42" R1 = {"Test-IP": "127.0.0.1, 127.0.0.0"} R2 = {"Test-IP": "127.0.0.2"} with app.test_client() as cli: with hiro.Timeline().freeze() as timeline: for i in range(0, 10): assert cli.get("/t1", headers=R1).status_code == 200 timeline.forward(1) assert cli.get("/t1", headers=R1).status_code == 429 assert cli.get("/t1", headers=R2).status_code == 200 assert cli.get("/t1", headers=R2).status_code == 429 timeline.forward(60) assert cli.get("/t1", headers=R2).status_code == 200 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 429 timeline.forward(1) assert cli.get("/t2").status_code == 200 def test_invalid_decorated_dynamic_limits(caplog): caplog.set_level(logging.INFO) app = Flask(__name__) app.config.setdefault("X", "2 per sec") limiter = Limiter(get_ip_from_header, app=app, default_limits=["1/second"]) @app.route("/t1") @limiter.limit(lambda: current_app.config.get("X")) def t1(): return "42" with app.test_client() as cli: with hiro.Timeline().freeze(): assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 # 2 for invalid limit, 1 for warning. assert len(caplog.records) == 3 assert "failed to load ratelimit" in caplog.records[0].msg assert "failed to load ratelimit" in caplog.records[1].msg assert "exceeded at endpoint" in caplog.records[2].msg assert caplog.records[2].levelname == "INFO" def test_decorated_limit_empty_exempt(caplog): app = Flask(__name__) limiter = Limiter(get_remote_address, app=app) @app.route("/t1") @limiter.limit(lambda: "") def t1(): return "42" with app.test_client() as cli: with hiro.Timeline().freeze(): assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert not caplog.records def test_invalid_decorated_static_limits(caplog): caplog.set_level(logging.INFO) app = Flask(__name__) limiter = Limiter(get_ip_from_header, app=app, default_limits=["1/second"]) @app.route("/t1") @limiter.limit("2/sec") def t1(): return "42" with app.test_client() as cli: with hiro.Timeline().freeze(): assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 assert "failed to load" in caplog.records[0].msg assert "exceeded at endpoint" in caplog.records[-1].msg def test_named_shared_limit(extension_factory): app, limiter = extension_factory() shared_limit_a = limiter.shared_limit("1/minute", scope="a") shared_limit_b = limiter.shared_limit("1/minute", scope="b") @app.route("/t1") @shared_limit_a def route1(): return "route1" @app.route("/t2") @shared_limit_a def route2(): return "route2" @app.route("/t3") @shared_limit_b def route3(): return "route3" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 200 == cli.get("/t3").status_code assert 429 == cli.get("/t2").status_code def test_dynamic_shared_limit(extension_factory): app, limiter = extension_factory() fn_a = mock.Mock() fn_b = mock.Mock() fn_a.return_value = "foo" fn_b.return_value = "bar" dy_limit_a = limiter.shared_limit("1/minute", scope=fn_a) dy_limit_b = limiter.shared_limit("1/minute", scope=fn_b) @app.route("/t1") @dy_limit_a def t1(): return "route1" @app.route("/t2") @dy_limit_a def t2(): return "route2" @app.route("/t3") @dy_limit_b def t3(): return "route3" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 200 == cli.get("/t3").status_code assert 429 == cli.get("/t2").status_code assert 429 == cli.get("/t3").status_code assert 2 == fn_a.call_count assert 2 == fn_b.call_count fn_b.assert_called_with("t3") fn_a.assert_has_calls([mock.call("t1"), mock.call("t2")]) def test_conditional_limits(): """Test that the conditional activation of the limits work.""" app = Flask(__name__) limiter = Limiter(get_ip_from_header, app=app) @app.route("/limited") @limiter.limit("1 per day") def limited_route(): return "passed" @app.route("/unlimited") @limiter.limit("1 per day", exempt_when=lambda: True) def never_limited_route(): return "should always pass" is_exempt = False @app.route("/conditional") @limiter.limit("1 per day", exempt_when=lambda: is_exempt) def conditionally_limited_route(): return "conditional" with app.test_client() as cli: assert cli.get("/limited").status_code == 200 assert cli.get("/limited").status_code == 429 assert cli.get("/unlimited").status_code == 200 assert cli.get("/unlimited").status_code == 200 assert cli.get("/conditional").status_code == 200 assert cli.get("/conditional").status_code == 429 is_exempt = True assert cli.get("/conditional").status_code == 200 is_exempt = False assert cli.get("/conditional").status_code == 429 def test_conditional_shared_limits(): """Test that conditional shared limits work.""" app = Flask(__name__) limiter = Limiter(get_ip_from_header, app=app) @app.route("/limited") @limiter.shared_limit("1 per day", "test_scope") def limited_route(): return "passed" @app.route("/unlimited") @limiter.shared_limit("1 per day", "test_scope", exempt_when=lambda: True) def never_limited_route(): return "should always pass" is_exempt = False @app.route("/conditional") @limiter.shared_limit("1 per day", "test_scope", exempt_when=lambda: is_exempt) def conditionally_limited_route(): return "conditional" with app.test_client() as cli: assert cli.get("/unlimited").status_code == 200 assert cli.get("/unlimited").status_code == 200 assert cli.get("/limited").status_code == 200 assert cli.get("/limited").status_code == 429 assert cli.get("/conditional").status_code == 429 is_exempt = True assert cli.get("/conditional").status_code == 200 is_exempt = False assert cli.get("/conditional").status_code == 429 def test_whitelisting(): app = Flask(__name__) limiter = Limiter( get_ip_from_header, app=app, default_limits=["1/minute"], headers_enabled=True, ) @app.route("/") def t(): return "test" @limiter.request_filter def w(): if request.headers.get("internal", None) == "true": return True return False with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 timeline.forward(60) assert cli.get("/").status_code == 200 for i in range(0, 10): assert cli.get("/", headers={"internal": "true"}).status_code == 200 def test_separate_method_limits(extension_factory): app, limiter = extension_factory() @app.route("/", methods=["GET", "POST"]) @limiter.limit("1/second", per_method=True) def root(): return "root" with hiro.Timeline(): with app.test_client() as cli: assert 200 == cli.get("/").status_code assert 429 == cli.get("/").status_code assert 200 == cli.post("/").status_code assert 429 == cli.post("/").status_code def test_explicit_method_limits(extension_factory): app, limiter = extension_factory(default_limits=["2/second"]) @app.route("/", methods=["GET", "POST"]) @limiter.limit("1/second", methods=["GET"]) def root(): return "root" with hiro.Timeline(): with app.test_client() as cli: assert 200 == cli.get("/").status_code assert 429 == cli.get("/").status_code assert 200 == cli.post("/").status_code assert 200 == cli.post("/").status_code assert 429 == cli.post("/").status_code def test_decorated_limit_immediate(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) def append_info(fn): @wraps(fn) def __inner(*args, **kwargs): g.rate_limit = "2/minute" return fn(*args, **kwargs) return __inner @app.route("/", methods=["GET", "POST"]) @append_info @limiter.limit(lambda: g.rate_limit, per_method=True) def root(): return "root" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/").status_code assert 200 == cli.get("/").status_code assert 429 == cli.get("/").status_code def test_decorated_shared_limit_immediate(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) shared = limiter.shared_limit(lambda: g.rate_limit, "shared") def append_info(fn): @wraps(fn) def __inner(*args, **kwargs): g.rate_limit = "2/minute" return fn(*args, **kwargs) return __inner @app.route("/", methods=["GET", "POST"]) @append_info @shared def root(): return "root" @app.route("/other", methods=["GET", "POST"]) def other(): return "other" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/other").status_code assert 429 == cli.get("/other").status_code assert 200 == cli.get("/").status_code assert 200 == cli.get("/").status_code assert 429 == cli.get("/").status_code def test_async_route(extension_factory): app, limiter = extension_factory() @app.route("/t1") @limiter.limit("1/minute") async def t1(): await asyncio.sleep(0.01) return "test" @app.route("/t2") @limiter.limit("1/minute") @limiter.exempt async def t2(): await asyncio.sleep(0.01) return "test" with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 200 def test_on_breach_callback_swallow_errors(extension_factory, caplog): app, limiter = extension_factory(swallow_errors=True) callbacks = [] def on_breach(request_limit): callbacks.append(request_limit) def failed_on_breach(request_limit): 1 / 0 @app.route("/") @limiter.limit("1/second", on_breach=on_breach) def root(): return "root" @app.route("/other") @limiter.limit("1/second") def other(): return "other" @app.route("/fail") @limiter.limit("1/second", on_breach=failed_on_breach) def fail(): return "fail" with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 assert cli.get("/other").status_code == 200 assert cli.get("/other").status_code == 429 assert cli.get("/fail").status_code == 200 assert cli.get("/fail").status_code == 429 assert len(callbacks) == 1 log = caplog.records[-1] assert log.message == "on_breach callback failed with error division by zero" assert log.levelname == "ERROR" def test_on_breach_callback_custom_response(extension_factory): def on_breach_no_response(request_limit): pass def on_breach_with_response(request_limit): return make_response( f"custom response {request_limit.limit} @ {request.path}", 429 ) def default_on_breach_with_response(request_limit): return make_response( f"default custom response {request_limit.limit} @ {request.path}", 429 ) def on_breach_invalid(): ... def on_breach_fail(request_limit): 1 / 0 app, limiter = extension_factory(on_breach=default_on_breach_with_response) @app.route("/") @limiter.limit("1/second", on_breach=on_breach_no_response) def root(): return "root" @app.route("/t1") @limiter.limit("1/second") def t1(): return "t1" @app.route("/t2") @limiter.limit("1/second", on_breach=on_breach_with_response) def t2(): return "t2" @app.route("/t3") @limiter.limit("1/second", on_breach=on_breach_invalid) def t3(): return "t3" @app.route("/t4") @limiter.limit("1/second", on_breach=on_breach_fail) def t4(): return "t4" with app.test_client() as cli: assert cli.get("/").status_code == 200 resp = cli.get("/") assert resp.status_code == 429 assert resp.text == "default custom response 1 per 1 second @ /" assert cli.get("/t1").status_code == 200 resp = cli.get("/t1") assert resp.status_code == 429 assert resp.text == "default custom response 1 per 1 second @ /t1" assert cli.get("/t2").status_code == 200 resp = cli.get("/t2") assert resp.status_code == 429 assert resp.text == "custom response 1 per 1 second @ /t2" resp = cli.get("/t3") assert resp.status_code == 200 resp = cli.get("/t3") assert resp.status_code == 500 resp = cli.get("/t4") assert resp.status_code == 200 resp = cli.get("/t4") assert resp.status_code == 500 def test_limit_multiple_cost(extension_factory): app, limiter = extension_factory() @app.route("/root") @limiter.limit("4/second", cost=2) def root(): return "root" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/root").status_code assert 200 == cli.get("/root").status_code assert 429 == cli.get("/root").status_code def test_limit_multiple_cost_callable(extension_factory): app, limiter = extension_factory() @app.route("/root") @limiter.limit("4/second", cost=lambda: 2) def root(): return "root" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/root").status_code assert 200 == cli.get("/root").status_code assert 429 == cli.get("/root").status_code def test_shared_limit_multiple_cost(extension_factory): app, limiter = extension_factory() shared_limit = limiter.shared_limit("4/minute", scope="a", cost=2) @app.route("/t1") @shared_limit def route1(): return "route1" @app.route("/t2") @shared_limit def route2(): return "route2" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 200 == cli.get("/t2").status_code assert 429 == cli.get("/t2").status_code def test_shared_limit_multiple_cost_callable(extension_factory): app, limiter = extension_factory() shared_limit = limiter.shared_limit("4/minute", scope="a", cost=lambda: 2) @app.route("/t1") @shared_limit def route1(): return "route1" @app.route("/t2") @shared_limit def route2(): return "route2" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 200 == cli.get("/t2").status_code assert 429 == cli.get("/t2").status_code def test_non_route_decoration_static_limits_override_defaults(extension_factory): app, limiter = extension_factory(default_limits=["1/second"]) @limiter.limit("2/second") def limited(): return "limited" @app.route("/t1") def route1(): return "t1" @app.route("/t2") @limiter.limit("2/second") def route2(): return "t2" @app.route("/t3") def route3(): return limited() @app.route("/t4") def route4(): @limiter.limit("2/day", override_defaults=False) def __inner(): return "inner" return __inner() @app.route("/t5/") def route5(param: int): @limiter.limit("2/day", override_defaults=False) def __inner1(): return "inner1" @limiter.limit("3/day") def __inner2(): return "inner2" return __inner1() if param < 10 else __inner2() with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 429 == cli.get("/t1").status_code for i in range(2): assert 200 == cli.get("/t2").status_code assert 429 == cli.get("/t2").status_code for i in range(2): assert 200 == cli.get("/t3").status_code, i assert 429 == cli.get("/t3").status_code assert 200 == cli.get("/t4").status_code assert 429 == cli.get("/t4").status_code timeline.forward(1) assert 200 == cli.get("/t4").status_code timeline.forward(1) assert 429 == cli.get("/t4").status_code assert 200 == cli.get("/t5/1").status_code assert 429 == cli.get("/t5/1").status_code timeline.forward(1) assert 200 == cli.get("/t5/1").status_code timeline.forward(1) assert 429 == cli.get("/t5/1").status_code timeline.forward(60 * 60 * 24) assert 200 == cli.get("/t5/11").status_code assert 200 == cli.get("/t5/11").status_code assert 200 == cli.get("/t5/11").status_code assert 429 == cli.get("/t5/11").status_code def test_non_route_decoration_static_limits(extension_factory): app, limiter = extension_factory() @limiter.limit("1/second") def limited(): return "limited" @app.route("/t1") def route1(): return limited() with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 429 == cli.get("/t1").status_code def test_non_route_decoration_dynamic_limits(extension_factory): app, limiter = extension_factory() def dynamic_limit_provider(): return "1/second" @limiter.limit(dynamic_limit_provider) def limited(): return "limited" @app.route("/t1") def route1(): return limited() with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 429 == cli.get("/t1").status_code def test_non_route_decoration_multiple_sequential_limits_per_request(extension_factory): app, limiter = extension_factory() @limiter.limit("10/second") def l1(): return "l1" @limiter.limit("1/second") def l2(): return "l2" @app.route("/t1") def route1(): return l1() + l2() with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 429 == cli.get("/t1").status_code def test_inner_function_decoration(extension_factory): app, limiter = extension_factory() @app.route("/t1") def route1(): @limiter.limit("5/second") def l1(): return "l1" return l1() @app.route("/t2") def route2(): @limiter.limit("1/second") def l1(): return "l1" return l1() with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 200 == cli.get("/t2").status_code for _ in range(4): assert 200 == cli.get("/t1").status_code assert 429 == cli.get("/t1").status_code assert 429 == cli.get("/t2").status_code flask-limiter-3.12/tests/test_error_handling.py000066400000000000000000000257701476516161100217750ustar00rootroot00000000000000from __future__ import annotations import json from unittest.mock import patch import hiro from flask import make_response from flask_limiter.constants import ConfigVars def test_error_message(extension_factory): app, limiter = extension_factory({ConfigVars.DEFAULT_LIMITS: "1 per day"}) @app.route("/") def null(): return "" with app.test_client() as cli: @app.errorhandler(429) def ratelimit_handler(e): return make_response( '{"error" : "rate limit %s"}' % str(e.description), 429 ) cli.get("/") assert "1 per 1 day" in cli.get("/").data.decode() assert {"error": "rate limit 1 per 1 day"} == json.loads( cli.get("/").data.decode() ) def test_custom_error_message(extension_factory): app, limiter = extension_factory() @app.errorhandler(429) def ratelimit_handler(e): return make_response(e.description, 429) def l1(): return "1/second" def e1(): return "dos" @app.route("/t1") @limiter.limit("1/second", error_message="uno") def t1(): return "1" @app.route("/t2") @limiter.limit(l1, error_message=e1) def t2(): return "2" s1 = limiter.shared_limit("1/second", scope="error_message", error_message="tres") @app.route("/t3") @s1 def t3(): return "3" with hiro.Timeline().freeze(): with app.test_client() as cli: cli.get("/t1") resp = cli.get("/t1") assert 429 == resp.status_code assert resp.data == b"uno" cli.get("/t2") resp = cli.get("/t2") assert 429 == resp.status_code assert resp.data == b"dos" cli.get("/t3") resp = cli.get("/t3") assert 429 == resp.status_code assert resp.data == b"tres" def test_swallow_error(extension_factory): app, limiter = extension_factory( { ConfigVars.DEFAULT_LIMITS: "1 per day", ConfigVars.HEADERS_ENABLED: True, ConfigVars.SWALLOW_ERRORS: True, } ) @app.route("/") def null(): return "ok" with app.test_client() as cli: with patch("limits.strategies.FixedWindowRateLimiter.hit") as hit: def raiser(*a, **k): raise Exception hit.side_effect = raiser assert "ok" in cli.get("/").data.decode() with patch( "limits.strategies.FixedWindowRateLimiter.get_window_stats" ) as get_window_stats: def raiser(*a, **k): raise Exception get_window_stats.side_effect = raiser assert "ok" in cli.get("/").data.decode() def test_swallow_error_conditional_deduction(extension_factory): def conditional_deduct(_): return True app, limiter = extension_factory( { ConfigVars.DEFAULT_LIMITS: "1 per day", ConfigVars.SWALLOW_ERRORS: True, ConfigVars.DEFAULT_LIMITS_DEDUCT_WHEN: conditional_deduct, } ) @app.route("/") def null(): return "ok" with app.test_client() as cli: with patch("limits.strategies.FixedWindowRateLimiter.hit") as hit: def raiser(*a, **k): raise Exception hit.side_effect = raiser assert "ok" in cli.get("/").data.decode() def test_no_swallow_error(extension_factory): app, limiter = extension_factory( {ConfigVars.DEFAULT_LIMITS: "1 per day", ConfigVars.HEADERS_ENABLED: True} ) @app.route("/") def null(): return "ok" @app.errorhandler(500) def e500(e): return str(e.original_exception), 500 def raiser(*a, **k): raise Exception("underlying") with app.test_client() as cli: with patch("limits.strategies.FixedWindowRateLimiter.hit") as hit: hit.side_effect = raiser assert 500 == cli.get("/").status_code assert "underlying" == cli.get("/").data.decode() with patch( "limits.strategies.FixedWindowRateLimiter.get_window_stats" ) as get_window_stats: get_window_stats.side_effect = raiser assert 500 == cli.get("/").status_code assert "underlying" == cli.get("/").data.decode() def test_no_swallow_error_conditional_deduction(extension_factory): def conditional_deduct(_): return True app, limiter = extension_factory( { ConfigVars.DEFAULT_LIMITS: "1 per day", ConfigVars.SWALLOW_ERRORS: False, ConfigVars.DEFAULT_LIMITS_DEDUCT_WHEN: conditional_deduct, } ) @app.route("/") def null(): return "ok" with app.test_client() as cli: with patch("limits.strategies.FixedWindowRateLimiter.hit") as hit: def raiser(*a, **k): raise Exception hit.side_effect = raiser assert 500 == cli.get("/").status_code def test_fallback_to_memory_config(redis_connection, extension_factory): _, limiter = extension_factory( config={ConfigVars.ENABLED: True}, default_limits=["5/minute"], storage_uri="redis://localhost:46379", in_memory_fallback=["1/minute"], ) assert len(limiter._in_memory_fallback) == 1 assert limiter._in_memory_fallback_enabled _, limiter = extension_factory( config={ConfigVars.ENABLED: True, ConfigVars.IN_MEMORY_FALLBACK: "1/minute"}, default_limits=["5/minute"], storage_uri="redis://localhost:46379", ) assert len(limiter._in_memory_fallback) == 1 assert limiter._in_memory_fallback_enabled _, limiter = extension_factory( config={ConfigVars.ENABLED: True, ConfigVars.IN_MEMORY_FALLBACK_ENABLED: True}, default_limits=["5/minute"], storage_uri="redis://localhost:46379", ) assert limiter._in_memory_fallback_enabled _, limiter = extension_factory( config={ConfigVars.ENABLED: True}, default_limits=["5/minute"], storage_uri="redis://localhost:46379", in_memory_fallback_enabled=True, ) def test_fallback_to_memory_backoff_check(redis_connection, extension_factory): app, limiter = extension_factory( config={ConfigVars.ENABLED: True}, default_limits=["5/minute"], storage_uri="redis://localhost:46379", in_memory_fallback=["1/minute"], ) @app.route("/t1") def t1(): return "test" with app.test_client() as cli: def raiser(*a): raise Exception("redis dead") with hiro.Timeline() as timeline: with patch("redis.Redis.execute_command") as exec_command: exec_command.side_effect = raiser assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 timeline.forward(1) assert cli.get("/t1").status_code == 429 timeline.forward(2) assert cli.get("/t1").status_code == 429 timeline.forward(4) assert cli.get("/t1").status_code == 429 timeline.forward(8) assert cli.get("/t1").status_code == 429 timeline.forward(16) assert cli.get("/t1").status_code == 429 timeline.forward(32) assert cli.get("/t1").status_code == 200 # redis back to normal, but exponential backoff will only # result in it being marked after pow(2,0) seconds and next # check assert cli.get("/t1").status_code == 429 timeline.forward(2) assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 def test_fallback_to_memory_with_global_override(redis_connection, extension_factory): app, limiter = extension_factory( config={ConfigVars.ENABLED: True}, default_limits=["5/minute"], storage_uri="redis://localhost:46379", in_memory_fallback=["1/minute"], ) @app.route("/t1") def t1(): return "test" @app.route("/t2") @limiter.limit("3 per minute") def t2(): return "test" with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 429 def raiser(*a): raise Exception("redis dead") with patch("redis.Redis.execute_command") as exec_command: exec_command.side_effect = raiser assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 429 # redis back to normal, go back to regular limits with hiro.Timeline() as timeline: timeline.forward(2) limiter._storage.storage.flushall() assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 429 def test_fallback_to_memory(extension_factory): app, limiter = extension_factory( config={ConfigVars.ENABLED: True}, default_limits=["2/minute"], storage_uri="redis://localhost:46379", in_memory_fallback_enabled=True, headers_enabled=True, ) @app.route("/t1") def t1(): return "test" @app.route("/t2") @limiter.limit("1 per minute") def t2(): return "test" with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 429 def raiser(*a): raise Exception("redis dead") with patch("redis.Redis.execute_command") as exec_command: exec_command.side_effect = raiser assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 429 with hiro.Timeline() as timeline: timeline.forward(1) limiter._storage.storage.flushall() assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 429 flask-limiter-3.12/tests/test_flask_ext.py000066400000000000000000000721211476516161100207500ustar00rootroot00000000000000""" """ from __future__ import annotations import logging import time from collections import Counter from unittest import mock import hiro from flask import Flask, abort, make_response, request from werkzeug.exceptions import BadRequest from flask_limiter.constants import ConfigVars from flask_limiter.extension import Limiter from flask_limiter.util import get_remote_address def test_reset(extension_factory): app, limiter = extension_factory({ConfigVars.DEFAULT_LIMITS: "1 per day"}) @app.route("/") def null(): return "Hello Reset" with app.test_client() as cli: cli.get("/") assert "1 per 1 day" in cli.get("/").data.decode() limiter.reset() assert "Hello Reset" == cli.get("/").data.decode() assert "1 per 1 day" in cli.get("/").data.decode() def test_reset_unsupported(extension_factory, memcached_connection): app, limiter = extension_factory( { ConfigVars.DEFAULT_LIMITS: "1 per day", ConfigVars.STORAGE_URI: "memcached://localhost:31211", } ) @app.route("/") def null(): return "Hello Reset" with app.test_client() as cli: cli.get("/") assert "1 per 1 day" in cli.get("/").data.decode() # no op with memcached but no error raised limiter.reset() assert "1 per 1 day" in cli.get("/").data.decode() def test_static_exempt(extension_factory): app, limiter = extension_factory(default_limits=["1/minute"]) @app.route("/") def root(): return "root" with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 assert cli.get("/static/image.png").status_code == 200 assert cli.get("/static/image.png").status_code == 200 def test_combined_rate_limits(extension_factory): app, limiter = extension_factory( {ConfigVars.DEFAULT_LIMITS: "1 per hour; 10 per day"} ) @app.route("/t1") @limiter.limit("100 per hour;10/minute") def t1(): return "t1" @app.route("/t2") def t2(): return "t2" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 200 == cli.get("/t2").status_code assert 429 == cli.get("/t2").status_code def test_defaults_per_method(extension_factory): app, limiter = extension_factory( { ConfigVars.DEFAULT_LIMITS: "1 per hour", ConfigVars.DEFAULT_LIMITS_PER_METHOD: True, } ) @app.route("/t1", methods=["GET", "POST"]) def t1(): return "t1" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 429 == cli.get("/t1").status_code assert 200 == cli.post("/t1").status_code assert 429 == cli.post("/t1").status_code def test_default_limit_with_exemption(extension_factory): def is_backdoor(): return request.headers.get("backdoor") == "true" app, limiter = extension_factory( { ConfigVars.DEFAULT_LIMITS: "1 per hour", ConfigVars.DEFAULT_LIMITS_EXEMPT_WHEN: is_backdoor, } ) @app.route("/t1") def t1(): return "test" with hiro.Timeline() as timeline: with app.test_client() as cli: assert cli.get("/t1", headers={"backdoor": "true"}).status_code == 200 assert cli.get("/t1", headers={"backdoor": "true"}).status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 timeline.forward(3600) assert cli.get("/t1").status_code == 200 def test_default_limit_with_variable_cost(extension_factory): def cost_fn(): if request.headers.get("suspect"): return 2 return 1 app, limiter = extension_factory( { ConfigVars.APPLICATION_LIMITS: "10 per day", ConfigVars.DEFAULT_LIMITS: "2 per hour", ConfigVars.DEFAULT_LIMITS_COST: cost_fn, ConfigVars.APPLICATION_LIMITS_COST: cost_fn, } ) @app.route("/t1") def t1(): return "test" @app.route("/t2") def t2(): return "test" with hiro.Timeline() as timeline: with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 timeline.forward(3600) assert cli.get("/t1", headers={"suspect": 1}).status_code == 200 assert cli.get("/t1", headers={"suspect": 1}).status_code == 429 assert cli.get("/t2", headers={"suspect": 1}).status_code == 200 timeline.forward(3600) assert cli.get("/t2", headers={"suspect": 1}).status_code == 200 timeline.forward(3600) assert cli.get("/t2", headers={"suspect": 1}).status_code == 200 timeline.forward(3600) assert cli.get("/t2", headers={"suspect": 1}).status_code == 429 def test_default_limit_with_conditional_deduction(extension_factory): def failed_request(response): return response.status_code != 200 app, limiter = extension_factory( { ConfigVars.DEFAULT_LIMITS: "1 per hour", ConfigVars.DEFAULT_LIMITS_DEDUCT_WHEN: failed_request, } ) @app.route("/t1/") def t1(path): if path != "1": raise BadRequest() return path with hiro.Timeline() as timeline: with app.test_client() as cli: assert cli.get("/t1/1").status_code == 200 assert cli.get("/t1/1").status_code == 200 assert cli.get("/t1/2").status_code == 400 assert cli.get("/t1/1").status_code == 429 assert cli.get("/t1/2").status_code == 429 timeline.forward(3600) assert cli.get("/t1/1").status_code == 200 assert cli.get("/t1/2").status_code == 400 def test_deduct_when_custom_cost_moving_window(extension_factory): app, limiter = extension_factory(strategy="moving-window") @app.route("/") @limiter.limit("20/minute", cost=15, deduct_when=lambda r: r.status_code == 200) def root(): return "root" with hiro.Timeline(): with app.test_client() as cli: assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 def test_key_func(extension_factory): app, limiter = extension_factory() @app.route("/t1") @limiter.limit("100 per minute", key_func=lambda: "test") def t1(): return "test" with hiro.Timeline().freeze(): with app.test_client() as cli: for i in range(0, 100): assert ( 200 == cli.get( "/t1", headers={"X_FORWARDED_FOR": "127.0.0.2"} ).status_code ) assert 429 == cli.get("/t1").status_code def test_logging(caplog): caplog.set_level(logging.INFO) app = Flask(__name__) limiter = Limiter(get_remote_address, app=app) @app.route("/t1") @limiter.limit("1/minute") def t1(): return "test" with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 429 == cli.get("/t1").status_code assert len(caplog.records) == 1 assert caplog.records[0].levelname == "INFO" def test_reuse_logging(caplog): caplog.set_level(logging.INFO) app = Flask(__name__) app_handler = mock.Mock() app_handler.level = logging.INFO app.logger.addHandler(app_handler) limiter = Limiter(get_remote_address, app=app) for handler in app.logger.handlers: limiter.logger.addHandler(handler) @app.route("/t1") @limiter.limit("1/minute") def t1(): return "42" with app.test_client() as cli: cli.get("/t1") cli.get("/t1") assert app_handler.handle.call_count == 1 def test_disabled_flag(extension_factory): app, limiter = extension_factory( config={ConfigVars.ENABLED: False}, default_limits=["1/minute"] ) @app.route("/t1") def t1(): return "test" @app.route("/t2") @limiter.limit("10 per minute") def t2(): return "test" with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 for i in range(0, 10): assert cli.get("/t2").status_code == 200 assert cli.get("/t2").status_code == 200 def test_multiple_apps(): app1 = Flask(__name__) app2 = Flask(__name__) limiter = Limiter(get_remote_address, default_limits=["1/second"]) limiter.init_app(app1) limiter.init_app(app2) @app1.route("/ping") def ping(): return "PONG" @app1.route("/slowping") @limiter.limit("1/minute") def slow_ping(): return "PONG" @app2.route("/ping") @limiter.limit("2/second") def ping_2(): return "PONG" @app2.route("/slowping") @limiter.limit("2/minute") def slow_ping_2(): return "PONG" with hiro.Timeline().freeze() as timeline: with app1.test_client() as cli: assert cli.get("/ping").status_code == 200 assert cli.get("/ping").status_code == 429 timeline.forward(1) assert cli.get("/ping").status_code == 200 assert cli.get("/slowping").status_code == 200 timeline.forward(59) assert cli.get("/slowping").status_code == 429 timeline.forward(1) assert cli.get("/slowping").status_code == 200 with app2.test_client() as cli: assert cli.get("/ping").status_code == 200 assert cli.get("/ping").status_code == 200 assert cli.get("/ping").status_code == 429 timeline.forward(1) assert cli.get("/ping").status_code == 200 assert cli.get("/slowping").status_code == 200 timeline.forward(59) assert cli.get("/slowping").status_code == 200 assert cli.get("/slowping").status_code == 429 timeline.forward(1) assert cli.get("/slowping").status_code == 200 def test_headers_no_breach(): app = Flask(__name__) limiter = Limiter( get_remote_address, app=app, application_limits=["60/minute"], default_limits=["10/minute"], headers_enabled=True, ) @app.route("/t1") def t1(): return "test" @app.route("/t2") @limiter.limit("2/second; 5 per minute; 10/hour") def t2(): return "test" with hiro.Timeline().freeze(): with app.test_client() as cli: resp = cli.get("/t1") assert resp.headers.get("X-RateLimit-Limit") == "10" assert resp.headers.get("X-RateLimit-Remaining") == "9" assert resp.headers.get("X-RateLimit-Reset") == str(int(time.time() + 61)) assert resp.headers.get("Retry-After") == str(60) resp = cli.get("/t2") assert resp.headers.get("X-RateLimit-Limit") == "2" assert resp.headers.get("X-RateLimit-Remaining") == "1" assert resp.headers.get("X-RateLimit-Reset") == str(int(time.time() + 2)) assert resp.headers.get("Retry-After") == str(1) def test_headers_application_limits(): app = Flask(__name__) limiter = Limiter( get_remote_address, app=app, application_limits=["60/minute"], headers_enabled=True, ) @app.route("/t1") def t1(): return "test" @app.route("/t2") @limiter.limit("2/second; 5 per minute; 10/hour") def t2(): return "test" with hiro.Timeline().freeze(): with app.test_client() as cli: resp = cli.get("/t1") assert resp.headers.get("X-RateLimit-Limit") == "60" assert resp.headers.get("X-RateLimit-Remaining") == "59" assert resp.headers.get("X-RateLimit-Reset") == str(int(time.time() + 61)) assert resp.headers.get("Retry-After") == str(60) resp = cli.get("/t2") assert resp.headers.get("X-RateLimit-Limit") == "2" assert resp.headers.get("X-RateLimit-Remaining") == "1" assert resp.headers.get("X-RateLimit-Reset") == str(int(time.time() + 2)) assert resp.headers.get("Retry-After") == str(1) def test_headers_breach(): app = Flask(__name__) limiter = Limiter( get_remote_address, app=app, default_limits=["10/minute"], headers_enabled=True, ) @app.route("/t1") @limiter.limit("2/second; 10 per minute; 20/hour") def t(): return "test" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: for i in range(10): cli.get("/t1") timeline.forward(1) resp = cli.get("/t1") timeline.forward(1) assert resp.headers.get("X-RateLimit-Limit") == "10" assert resp.headers.get("X-RateLimit-Remaining") == "0" assert resp.headers.get("X-RateLimit-Reset") == str(int(time.time() + 50)) assert resp.headers.get("Retry-After") == str(int(50)) def test_retry_after(): app = Flask(__name__) _ = Limiter( get_remote_address, app=app, default_limits=["1/minute"], headers_enabled=True, ) @app.route("/t1") def t(): return "test" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: resp = cli.get("/t1") retry_after = int(resp.headers.get("Retry-After")) assert retry_after > 0 timeline.forward(retry_after) resp = cli.get("/t1") assert resp.status_code == 200 def test_retry_after_exists_seconds(): app = Flask(__name__) _ = Limiter( get_remote_address, app=app, default_limits=["1/minute"], headers_enabled=True, ) @app.route("/t1") def t(): return "", 200, {"Retry-After": "1000000"} with app.test_client() as cli: resp = cli.get("/t1") retry_after = int(resp.headers.get("Retry-After")) assert retry_after > 1000 def test_retry_after_exists_rfc1123(): app = Flask(__name__) _ = Limiter( get_remote_address, app=app, default_limits=["1/minute"], headers_enabled=True, ) @app.route("/t1") def t(): return "", 200, {"Retry-After": "Sun, 06 Nov 2032 01:01:01 GMT"} with app.test_client() as cli: resp = cli.get("/t1") retry_after = int(resp.headers.get("Retry-After")) assert retry_after > 1000 def test_custom_headers_from_config(): app = Flask(__name__) app.config.setdefault(ConfigVars.HEADER_LIMIT, "X-Limit") app.config.setdefault(ConfigVars.HEADER_REMAINING, "X-Remaining") app.config.setdefault(ConfigVars.HEADER_RESET, "X-Reset") limiter = Limiter( get_remote_address, app=app, default_limits=["10/minute"], headers_enabled=True, ) @app.route("/t1") @limiter.limit("2/second; 10 per minute; 20/hour") def t(): return "test" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: for i in range(11): resp = cli.get("/t1") timeline.forward(1) assert resp.headers.get("X-Limit") == "10" assert resp.headers.get("X-Remaining") == "0" assert resp.headers.get("X-Reset") == str(int(time.time() + 50)) def test_application_shared_limit(extension_factory): app, limiter = extension_factory(application_limits=["2/minute"]) @app.route("/t1") def t1(): return "route1" @app.route("/t2") def t2(): return "route2" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 200 == cli.get("/t2").status_code assert 429 == cli.get("/t1").status_code def test_application_limit_conditional(extension_factory): def app_limit_exempt(): return "X" in request.headers def app_limit_deduct(response): return response.status_code == 400 app, limiter = extension_factory( application_limits=["2/minute"], application_limits_exempt_when=app_limit_exempt, application_limits_deduct_when=app_limit_deduct, ) @app.route("/t1", methods=["GET", "POST"]) def t1(): return "route1" @app.route("/t2") def t2(): abort(400) with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 400 == cli.get("/t2").status_code assert 200 == cli.get("/t1").status_code assert 400 == cli.get("/t2").status_code assert 429 == cli.get("/t1").status_code assert 429 == cli.get("/t2").status_code def test_application_limit_per_method(extension_factory): app, limiter = extension_factory( application_limits=["2/minute"], application_limits_per_method=True, ) @app.route("/t1", methods=["GET", "POST"]) def t1(): return "route1" @app.route("/t2", methods=["GET", "POST"]) def t2(): return "route2" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 200 == cli.get("/t2").status_code assert 429 == cli.get("/t1").status_code assert 429 == cli.get("/t2").status_code assert 200 == cli.post("/t1").status_code assert 200 == cli.post("/t2").status_code assert 429 == cli.post("/t1").status_code assert 429 == cli.post("/t2").status_code def test_callable_default_limit(extension_factory): app, limiter = extension_factory( default_limits=[ lambda: request.headers.get("suspect", 0) and "1/minute" or "2/minute" ] ) @app.route("/t1") def t1(): return "t1" @app.route("/t2") def t2(): return "t2" with hiro.Timeline().freeze(): with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 assert cli.get("/t2", headers={"suspect": "1"}).status_code == 200 assert cli.get("/t2", headers={"suspect": "1"}).status_code == 429 def test_callable_application_limit(extension_factory): app, limiter = extension_factory( application_limits=[ lambda: request.headers.get("suspect", 0) and "1/minute" or "2/minute" ] ) @app.route("/t1") def t1(): return "t1" @app.route("/t2") def t2(): return "t2" with hiro.Timeline().freeze(): with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 200 assert cli.get("/t2").status_code == 429 assert cli.get("/t1", headers={"suspect": 1}).status_code == 200 assert cli.get("/t2", headers={"suspect": 1}).status_code == 429 def test_no_auto_check(extension_factory): app, limiter = extension_factory(auto_check=False) @app.route("/", methods=["GET", "POST"]) @limiter.limit("1/second", per_method=True) def root(): return "root" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/").status_code assert 200 == cli.get("/").status_code def test_no_auto_check_custom_before_request(extension_factory): app, limiter = extension_factory(auto_check=False) @app.route("/", methods=["GET", "POST"]) @limiter.limit("1/second", per_method=True) def root(): return "root" @app.before_request def _(): limiter.check() with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/").status_code assert 429 == cli.get("/").status_code def test_fail_on_first_breach(extension_factory): app, limiter = extension_factory(fail_on_first_breach=True) current_limits = [] @app.route("/", methods=["GET", "POST"]) @limiter.limit("1/second", per_method=True) @limiter.limit("2/minute", per_method=True) def root(): return "root" @app.after_request def collect_current_limits(r): current_limits.extend(limiter.current_limits) return r with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert 200 == cli.get("/").status_code assert 429 == cli.get("/").status_code timeline.forward(1) assert 200 == cli.get("/").status_code timeline.forward(1) assert 429 == cli.get("/").status_code assert not current_limits[0].breached assert not current_limits[1].breached assert current_limits[2].breached def test_no_fail_on_first_breach(extension_factory): app, limiter = extension_factory(fail_on_first_breach=False) @app.route("/", methods=["GET", "POST"]) @limiter.limit("1/second", per_method=True) @limiter.limit("2/minute", per_method=True) def root(): return "root" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert 200 == cli.get("/").status_code assert 429 == cli.get("/").status_code timeline.forward(1) assert 429 == cli.get("/").status_code def test_default_on_breach_callback(extension_factory): collected = Counter() def on_breach(limit): collected[limit.key] += 1 app, limiter = extension_factory(on_breach=on_breach, default_limits=["2/second"]) @app.route("/") def root(): return "groot" @app.route("/sub") @limiter.limit("1/second") def sub_path(): return "subgroot" with app.test_client() as cli: cli.get("/") cli.get("/") cli.get("/") cli.get("/sub") cli.get("/sub") cli.get("/sub") assert collected["LIMITER/127.0.0.1/root/2/1/second"] == 1 assert collected["LIMITER/127.0.0.1/sub_path/1/1/second"] == 2 def test_custom_key_prefix(redis_connection, extension_factory): app1, limiter1 = extension_factory( key_prefix="moo", storage_uri="redis://localhost:46379" ) app2, limiter2 = extension_factory( {ConfigVars.KEY_PREFIX: "cow"}, storage_uri="redis://localhost:46379" ) app3, limiter3 = extension_factory(storage_uri="redis://localhost:46379") @app1.route("/test") @limiter1.limit("1/day") def app1_test(): return "app1 test" @app2.route("/test") @limiter2.limit("1/day") def app2_test(): return "app1 test" @app3.route("/test") @limiter3.limit("1/day") def app3_test(): return "app1 test" with app1.test_client() as cli: resp = cli.get("/test") assert 200 == resp.status_code resp = cli.get("/test") assert 429 == resp.status_code with app2.test_client() as cli: resp = cli.get("/test") assert 200 == resp.status_code resp = cli.get("/test") assert 429 == resp.status_code with app3.test_client() as cli: resp = cli.get("/test") assert 200 == resp.status_code resp = cli.get("/test") assert 429 == resp.status_code def test_multiple_instances_no_key_prefix(): app = Flask(__name__) limiter1 = Limiter(get_remote_address, app=app) limiter2 = Limiter(get_remote_address, app=app) @app.route("/test1") @limiter2.limit("1/second") def app_test1(): return "app test1" @app.route("/test2") @limiter1.limit("10/minute") @limiter2.limit("1/second") def app_test2(): return "app test2" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert cli.get("/test1").status_code == 200 assert cli.get("/test1").status_code == 429 assert cli.get("/test2").status_code == 200 assert cli.get("/test2").status_code == 429 for i in range(8): timeline.forward(1) assert cli.get("/test1").status_code == 200 assert cli.get("/test2").status_code == 200 timeline.forward(1) assert cli.get("/test1").status_code == 200 assert cli.get("/test2").status_code == 429 timeline.forward(59) assert cli.get("/test2").status_code == 200 def test_independent_instances_by_key_prefix(): app = Flask(__name__) limiter1 = Limiter(get_remote_address, key_prefix="lmt1", app=app) limiter2 = Limiter(get_remote_address, key_prefix="lmt2", app=app) @app.route("/test1") @limiter2.limit("1/second") def app_test1(): return "app test1" @app.route("/test2") @limiter1.limit("10/minute") @limiter2.limit("1/second") def app_test2(): return "app test2" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert cli.get("/test1").status_code == 200 assert cli.get("/test2").status_code == 200 resp = cli.get("/test1") assert resp.status_code == 429 assert "1 per 1 second" in resp.data.decode() resp = cli.get("/test2") assert resp.status_code == 429 assert "1 per 1 second" in resp.data.decode() for i in range(8): assert cli.get("/test1").status_code == 429 assert cli.get("/test2").status_code == 429 assert cli.get("/test2").status_code == 429 timeline.forward(1) assert cli.get("/test1").status_code == 200 assert cli.get("/test2").status_code == 429 timeline.forward(59) assert cli.get("/test1").status_code == 200 assert cli.get("/test2").status_code == 200 def test_multiple_limiters_default_limits(): app = Flask(__name__) Limiter(get_remote_address, key_prefix="lmt1", app=app, default_limits=["1/second"]) Limiter( get_remote_address, key_prefix="lmt2", default_limits=["10/minute"], app=app, ) @app.route("/test1") def app_test1(): return "app test1" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert cli.get("/test1").status_code == 200 assert cli.get("/test1").status_code == 429 for _ in range(9): timeline.forward(1) assert cli.get("/test1").status_code == 200 timeline.forward(1) assert cli.get("/test1").status_code == 429 timeline.forward(50) assert cli.get("/test1").status_code == 200 def test_meta_limits(extension_factory): def meta_breach_cb(limit): return make_response("Would you like some tea?", 429) app, limiter = extension_factory( default_limits=["2/second"], meta_limits=["2/minute; 3/hour", lambda: "4/day"], on_meta_breach=meta_breach_cb, headers_enabled=True, ) @app.route("/") def root(): return "root" @app.route("/exempt") @limiter.exempt def exempt(): return "exempt" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: for _ in range(2): assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 timeline.forward(1) # blocked because of max 2 breaches/minute assert cli.get("/").status_code == 429 assert cli.get("/exempt").status_code == 200 timeline.forward(59) assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 assert cli.get("/exempt").status_code == 200 timeline.forward(59) # blocked because of max 3 breaches/hour response = cli.get("/") assert response.text == "Would you like some tea?" assert response.status_code == 429 assert response.headers.get("X-RateLimit-Limit") == "3" assert response.headers.get("X-RateLimit-Remaining") == "0" assert cli.get("/exempt").status_code == 200 # forward to 1 hour since start timeline.forward(60 * 58) assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 200 assert cli.get("/").status_code == 429 # forward another hour and it should now be blocked for the day timeline.forward(60 * 60) response = cli.get("/") assert response.status_code == 429 assert response.headers.get("X-RateLimit-Limit") == "4" assert response.headers.get("X-RateLimit-Remaining") == "0" # forward 22 hours timeline.forward(60 * 60 * 22) assert cli.get("/").status_code == 200 flask-limiter-3.12/tests/test_regressions.py000066400000000000000000000145351476516161100213400ustar00rootroot00000000000000""" """ from __future__ import annotations import time import hiro from flask import Blueprint from flask_limiter.constants import ConfigVars def test_redis_request_slower_than_fixed_window(redis_connection, extension_factory): app, limiter = extension_factory( { ConfigVars.DEFAULT_LIMITS: "5 per second", ConfigVars.STORAGE_URI: "redis://localhost:46379", ConfigVars.STRATEGY: "fixed-window", ConfigVars.HEADERS_ENABLED: True, } ) @app.route("/t1") def t1(): time.sleep(1.1) return "t1" with app.test_client() as cli: resp = cli.get("/t1") assert resp.headers["X-RateLimit-Remaining"] == "5" def test_redis_request_slower_than_moving_window(redis_connection, extension_factory): app, limiter = extension_factory( { ConfigVars.DEFAULT_LIMITS: "5 per second", ConfigVars.STORAGE_URI: "redis://localhost:46379", ConfigVars.STRATEGY: "moving-window", ConfigVars.HEADERS_ENABLED: True, } ) @app.route("/t1") def t1(): time.sleep(1.1) return "t1" with app.test_client() as cli: resp = cli.get("/t1") assert resp.headers["X-RateLimit-Remaining"] == "5" def test_dynamic_limits(extension_factory): app, limiter = extension_factory( {ConfigVars.STRATEGY: "moving-window", ConfigVars.HEADERS_ENABLED: True} ) def func(*a): return "1/second; 2/minute" @app.route("/t1") @limiter.limit(func) def t1(): return "t1" with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 timeline.forward(2) assert cli.get("/t1").status_code == 200 assert cli.get("/t1").status_code == 429 def test_invalid_ratelimit_key(extension_factory): app, limiter = extension_factory({ConfigVars.HEADERS_ENABLED: True}) def func(*a): return None @app.route("/t1") @limiter.limit("2/second", key_func=func) def t1(): return "t1" with app.test_client() as cli: cli.get("/t1") cli.get("/t1") cli.get("/t1") assert cli.get("/t1").status_code == 200 limiter.limit("1/second", key_func=lambda: "key")(t1) cli.get("/t1") assert cli.get("/t1").status_code == 429 def test_custom_key_prefix_with_headers(redis_connection, extension_factory): app1, limiter1 = extension_factory( key_prefix="moo", storage_uri="redis://localhost:46379", headers_enabled=True ) app2, limiter2 = extension_factory( key_prefix="cow", storage_uri="redis://localhost:46379", headers_enabled=True ) @app1.route("/test") @limiter1.limit("1/minute") def t1(): return "app1 test" @app2.route("/test") @limiter2.limit("1/minute") def t2(): return "app2 test" with app1.test_client() as cli: resp = cli.get("/test") assert 200 == resp.status_code resp = cli.get("/test") assert resp.headers.get("Retry-After") == str(60) assert 429 == resp.status_code with app2.test_client() as cli: resp = cli.get("/test") assert 200 == resp.status_code resp = cli.get("/test") assert resp.headers.get("Retry-After") == str(60) assert 429 == resp.status_code def test_default_limits_with_per_route_limit(extension_factory): app, limiter = extension_factory(application_limits=["3/minute"]) @app.route("/explicit") @limiter.limit("1/minute") def explicit(): return "explicit" @app.route("/default") def default(): return "default" with app.test_client() as cli: with hiro.Timeline().freeze() as timeline: assert 200 == cli.get("/explicit").status_code assert 429 == cli.get("/explicit").status_code assert 200 == cli.get("/default").status_code assert 429 == cli.get("/default").status_code timeline.forward(60) assert 200 == cli.get("/explicit").status_code assert 200 == cli.get("/default").status_code def test_application_limits_from_config(extension_factory): app, limiter = extension_factory( config={ ConfigVars.APPLICATION_LIMITS: "4/second", ConfigVars.DEFAULT_LIMITS: "1/second", ConfigVars.DEFAULT_LIMITS_PER_METHOD: True, } ) @app.route("/root") def root(): return "null" @app.route("/test", methods=["GET", "PUT"]) @limiter.limit("3/second", methods=["GET"]) def test(): return "test" with app.test_client() as cli: with hiro.Timeline() as timeline: assert cli.get("/root").status_code == 200 assert cli.get("/root").status_code == 429 assert cli.get("/test").status_code == 200 assert cli.get("/test").status_code == 200 assert cli.get("/test").status_code == 429 timeline.forward(1) assert cli.get("/test").status_code == 200 assert cli.get("/test").status_code == 200 assert cli.get("/test").status_code == 200 assert cli.get("/test").status_code == 429 timeline.forward(1) assert cli.put("/test").status_code == 200 assert cli.put("/test").status_code == 429 assert cli.get("/test").status_code == 200 assert cli.get("/root").status_code == 200 assert cli.get("/test").status_code == 429 def test_endpoint_with_dot_but_not_blueprint(extension_factory): """ https://github.com/alisaifee/flask-limiter/issues/336 """ app, limiter = extension_factory(default_limits=["2/day"]) def route(): return "42" app.add_url_rule("/teapot/iam", "_teapot.iam", route) bp = Blueprint("teapot", __name__, url_prefix="/teapot") @bp.route("/") def bp_route(): return "43" app.register_blueprint(bp) limiter.limit("1/day")(bp) with app.test_client() as cli: assert cli.get("/teapot/iam").status_code == 200 assert cli.get("/teapot/iam").status_code == 200 assert cli.get("/teapot/iam").status_code == 429 assert cli.get("/teapot/").status_code == 200 assert cli.get("/teapot/").status_code == 429 flask-limiter-3.12/tests/test_storage.py000066400000000000000000000032011476516161100204250ustar00rootroot00000000000000from __future__ import annotations import hiro import pytest @pytest.fixture(autouse=True) def setup(redis_connection, memcached_connection, mongo_connection): redis_connection.flushall() memcached_connection.flush_all() @pytest.mark.parametrize( "storage_uri", [ "memcached://localhost:31211", "redis://localhost:46379", "mongodb://localhost:47017", ], ) def test_fixed_window(extension_factory, storage_uri): app, limiter = extension_factory( application_limits=["2/minute"], storage_uri=storage_uri, strategy="fixed-window", ) @app.route("/t1") def t1(): return "route1" @app.route("/t2") def t2(): return "route2" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 200 == cli.get("/t2").status_code assert 429 == cli.get("/t1").status_code @pytest.mark.parametrize( "storage_uri", [ "redis://localhost:46379", "mongodb://localhost:47017", ], ) def test_moving_window(extension_factory, storage_uri): app, limiter = extension_factory( application_limits=["2/minute"], storage_uri=storage_uri, strategy="moving-window", ) @app.route("/t1") def t1(): return "route1" @app.route("/t2") def t2(): return "route2" with hiro.Timeline().freeze(): with app.test_client() as cli: assert 200 == cli.get("/t1").status_code assert 200 == cli.get("/t2").status_code assert 429 == cli.get("/t1").status_code flask-limiter-3.12/tests/test_views.py000066400000000000000000000150111476516161100201200ustar00rootroot00000000000000from __future__ import annotations import flask_restful import hiro import pytest from flask import request from flask.views import MethodView, View def test_pluggable_views(extension_factory): app, limiter = extension_factory(default_limits=["1/hour"]) class Va(View): methods = ["GET", "POST"] decorators = [limiter.limit("2/second")] def dispatch_request(self): return request.method.lower() class Vb(View): methods = ["GET"] decorators = [limiter.limit("1/second, 3/minute")] def dispatch_request(self): return request.method.lower() class Vc(View): methods = ["GET"] def dispatch_request(self): return request.method.lower() app.add_url_rule("/a", view_func=Va.as_view("a")) app.add_url_rule("/b", view_func=Vb.as_view("b")) app.add_url_rule("/c", view_func=Vc.as_view("c")) with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert 200 == cli.get("/a").status_code assert 200 == cli.get("/a").status_code assert 429 == cli.post("/a").status_code assert 200 == cli.get("/b").status_code timeline.forward(1) assert 200 == cli.get("/b").status_code timeline.forward(1) assert 200 == cli.get("/b").status_code timeline.forward(1) assert 429 == cli.get("/b").status_code assert 200 == cli.get("/c").status_code assert 429 == cli.get("/c").status_code def test_pluggable_method_views(extension_factory): app, limiter = extension_factory(default_limits=["1/hour"]) class Va(MethodView): decorators = [limiter.limit("2/second")] def get(self): return request.method.lower() def post(self): return request.method.lower() class Vb(MethodView): decorators = [limiter.limit("1/second, 3/minute")] def get(self): return request.method.lower() class Vc(MethodView): def get(self): return request.method.lower() class Vd(MethodView): decorators = [limiter.limit("1/minute", methods=["get"])] def get(self): return request.method.lower() def post(self): return request.method.lower() app.add_url_rule("/a", view_func=Va.as_view("a")) app.add_url_rule("/b", view_func=Vb.as_view("b")) app.add_url_rule("/c", view_func=Vc.as_view("c")) app.add_url_rule("/d", view_func=Vd.as_view("d")) with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert 200 == cli.get("/a").status_code assert 200 == cli.get("/a").status_code assert 429 == cli.get("/a").status_code assert 429 == cli.post("/a").status_code assert 200 == cli.get("/b").status_code timeline.forward(1) assert 200 == cli.get("/b").status_code timeline.forward(1) assert 200 == cli.get("/b").status_code timeline.forward(1) assert 429 == cli.get("/b").status_code assert 200 == cli.get("/c").status_code assert 429 == cli.get("/c").status_code assert 200 == cli.get("/d").status_code assert 429 == cli.get("/d").status_code assert 200 == cli.post("/d").status_code assert 429 == cli.post("/d").status_code timeline.forward(3600) assert 200 == cli.post("/d").status_code def test_flask_restful_resource(extension_factory): app, limiter = extension_factory(default_limits=["1/hour"]) api = flask_restful.Api(app) class Va(flask_restful.Resource): decorators = [limiter.limit("2/second")] def get(self): return request.method.lower() def post(self): return request.method.lower() class Vb(flask_restful.Resource): decorators = [limiter.limit("1/second, 3/minute")] def get(self): return request.method.lower() class Vc(flask_restful.Resource): def get(self): return request.method.lower() class Vd(flask_restful.Resource): decorators = [ limiter.limit("2/second", methods=["GET"]), limiter.limit("1/second", methods=["POST"]), ] def get(self): return request.method.lower() def post(self): return request.method.lower() api.add_resource(Va, "/a") api.add_resource(Vb, "/b") api.add_resource(Vc, "/c") api.add_resource(Vd, "/d") with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert 200 == cli.get("/a").status_code assert 200 == cli.get("/a").status_code assert 429 == cli.get("/a").status_code assert 429 == cli.post("/a").status_code assert 200 == cli.get("/b").status_code assert 200 == cli.get("/d").status_code assert 200 == cli.get("/d").status_code assert 429 == cli.get("/d").status_code assert 200 == cli.post("/d").status_code assert 429 == cli.post("/d").status_code timeline.forward(1) assert 200 == cli.get("/b").status_code timeline.forward(1) assert 200 == cli.get("/b").status_code timeline.forward(1) assert 429 == cli.get("/b").status_code assert 200 == cli.get("/c").status_code assert 429 == cli.get("/c").status_code @pytest.mark.xfail def test_flask_restx_resource(extension_factory): import flask_restx app, limiter = extension_factory() api = flask_restx.Api(app) ns = api.namespace("test") @ns.route("/a") class Va(flask_restx.Resource): decorators = [limiter.limit("2/second", per_method=True)] def get(self): return request.method.lower() def post(self): return request.method.lower() with hiro.Timeline().freeze() as timeline: with app.test_client() as cli: assert 200 == cli.get("/test/a").status_code assert 200 == cli.get("/test/a").status_code assert 200 == cli.post("/test/a").status_code assert 200 == cli.post("/test/a").status_code assert 429 == cli.get("/test/a").status_code assert 429 == cli.post("/test/a").status_code timeline.forward(1) assert 200 == cli.get("/test/a").status_code assert 200 == cli.post("/test/a").status_code flask-limiter-3.12/versioneer.py000066400000000000000000002512031476516161100167500ustar00rootroot00000000000000 # Version: 0.29 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/python-versioneer/python-versioneer * Brian Warner * License: Public Domain (Unlicense) * Compatible with: Python 3.7, 3.8, 3.9, 3.10, 3.11 and pypy3 * [![Latest Version][pypi-image]][pypi-url] * [![Build Status][travis-image]][travis-url] This is a tool for managing a recorded version number in setuptools-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install Versioneer provides two installation modes. The "classic" vendored mode installs a copy of versioneer into your repository. The experimental build-time dependency mode is intended to allow you to skip this step and simplify the process of upgrading. ### Vendored mode * `pip install versioneer` to somewhere in your $PATH * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is available, so you can also use `conda install -c conda-forge versioneer` * add a `[tool.versioneer]` section to your `pyproject.toml` or a `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) * Note that you will need to add `tomli; python_version < "3.11"` to your build-time dependencies if you use `pyproject.toml` * run `versioneer install --vendor` in your source tree, commit the results * verify version information with `python setup.py version` ### Build-time dependency mode * `pip install versioneer` to somewhere in your $PATH * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is available, so you can also use `conda install -c conda-forge versioneer` * add a `[tool.versioneer]` section to your `pyproject.toml` or a `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) * add `versioneer` (with `[toml]` extra, if configuring in `pyproject.toml`) to the `requires` key of the `build-system` table in `pyproject.toml`: ```toml [build-system] requires = ["setuptools", "versioneer[toml]"] build-backend = "setuptools.build_meta" ``` * run `versioneer install --no-vendor` in your source tree, commit the results * verify version information with `python setup.py version` ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes). The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/python-versioneer/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other languages) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg` and `pyproject.toml`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install --[no-]vendor` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## Similar projects * [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time dependency * [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of versioneer * [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools plugin ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the "Unlicense", as described in https://unlicense.org/. [pypi-image]: https://img.shields.io/pypi/v/versioneer.svg [pypi-url]: https://pypi.python.org/pypi/versioneer/ [travis-image]: https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg [travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer """ # pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring # pylint:disable=missing-class-docstring,too-many-branches,too-many-statements # pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error # pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with # pylint:disable=attribute-defined-outside-init,too-many-arguments import configparser import errno import functools import json import os import re import subprocess import sys from pathlib import Path from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple, Union, cast have_tomllib = True if sys.version_info >= (3, 11): import tomllib else: try: import tomli as tomllib except ImportError: have_tomllib = False class VersioneerConfig: """Container for Versioneer configuration parameters.""" VCS: str style: str tag_prefix: str versionfile_source: str versionfile_build: Optional[str] parentdir_prefix: Optional[str] verbose: Optional[bool] def get_root() -> str: """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") pyproject_toml = os.path.join(root, "pyproject.toml") versioneer_py = os.path.join(root, "versioneer.py") if not ( os.path.exists(setup_py) or os.path.exists(pyproject_toml) or os.path.exists(versioneer_py) ): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") pyproject_toml = os.path.join(root, "pyproject.toml") versioneer_py = os.path.join(root, "versioneer.py") if not ( os.path.exists(setup_py) or os.path.exists(pyproject_toml) or os.path.exists(versioneer_py) ): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. my_path = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(my_path)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir and "VERSIONEER_PEP518" not in globals(): print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(my_path), versioneer_py)) except NameError: pass return root def get_config_from_root(root: str) -> VersioneerConfig: """Read the project setup.cfg file to determine Versioneer config.""" # This might raise OSError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . root_pth = Path(root) pyproject_toml = root_pth / "pyproject.toml" setup_cfg = root_pth / "setup.cfg" section: Union[Dict[str, Any], configparser.SectionProxy, None] = None if pyproject_toml.exists() and have_tomllib: try: with open(pyproject_toml, 'rb') as fobj: pp = tomllib.load(fobj) section = pp['tool']['versioneer'] except (tomllib.TOMLDecodeError, KeyError) as e: print(f"Failed to load config from {pyproject_toml}: {e}") print("Try to load it from setup.cfg") if not section: parser = configparser.ConfigParser() with open(setup_cfg) as cfg_file: parser.read_file(cfg_file) parser.get("versioneer", "VCS") # raise error if missing section = parser["versioneer"] # `cast`` really shouldn't be used, but its simplest for the # common VersioneerConfig users at the moment. We verify against # `None` values elsewhere where it matters cfg = VersioneerConfig() cfg.VCS = section['VCS'] cfg.style = section.get("style", "") cfg.versionfile_source = cast(str, section.get("versionfile_source")) cfg.versionfile_build = section.get("versionfile_build") cfg.tag_prefix = cast(str, section.get("tag_prefix")) if cfg.tag_prefix in ("''", '""', None): cfg.tag_prefix = "" cfg.parentdir_prefix = section.get("parentdir_prefix") if isinstance(section, configparser.SectionProxy): # Make sure configparser translates to bool cfg.verbose = section.getboolean("verbose") else: cfg.verbose = section.get("verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" HANDLERS.setdefault(vcs, {})[method] = f return f return decorate def run_command( commands: List[str], args: List[str], cwd: Optional[str] = None, verbose: bool = False, hide_stderr: bool = False, env: Optional[Dict[str, str]] = None, ) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs: Dict[str, Any] = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs) break except OSError as e: if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, process.returncode return stdout, process.returncode LONG_VERSION_PY['git'] = r''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. # Generated by versioneer-0.29 # https://github.com/python-versioneer/python-versioneer """Git implementation of _version.py.""" import errno import os import re import subprocess import sys from typing import Any, Callable, Dict, List, Optional, Tuple import functools def get_keywords() -> Dict[str, str]: """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" VCS: str style: str tag_prefix: str parentdir_prefix: str versionfile_source: str verbose: bool def get_config() -> VersioneerConfig: """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command( commands: List[str], args: List[str], cwd: Optional[str] = None, verbose: bool = False, hide_stderr: bool = False, env: Optional[Dict[str, str]] = None, ) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) process = None popen_kwargs: Dict[str, Any] = {} if sys.platform == "win32": # This hides the console window if pythonw.exe is used startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs["startupinfo"] = startupinfo for command in commands: try: dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None), **popen_kwargs) break except OSError as e: if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, process.returncode return stdout, process.returncode def versions_from_parentdir( parentdir_prefix: str, root: str, verbose: bool, ) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords: Dict[str, str] = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords( keywords: Dict[str, str], tag_prefix: str, verbose: bool, ) -> Dict[str, Any]: """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r'\d', r): continue if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs( tag_prefix: str, root: str, verbose: bool, runner: Callable = run_command ) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner(GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", f"{tag_prefix}[[:digit:]]*" ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces: Dict[str, Any]) -> str: """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces: Dict[str, Any]) -> str: """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%%d.dev%%d" %% (post_version + 1, pieces["distance"]) else: rendered += ".post0.dev%%d" %% (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions() -> Dict[str, Any]: """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords: Dict[str, str] = {} try: with open(versionfile_abs, "r") as fobj: for line in fobj: if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) except OSError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords( keywords: Dict[str, str], tag_prefix: str, verbose: bool, ) -> Dict[str, Any]: """Get version information from git keywords.""" if "refnames" not in keywords: raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] # Filter out refs that exactly match prefix or that don't start # with a number once the prefix is stripped (mostly a concern # when prefix is '') if not re.match(r'\d', r): continue if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs( tag_prefix: str, root: str, verbose: bool, runner: Callable = run_command ) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] # GIT_DIR can interfere with correct operation of Versioneer. # It may be intended to be passed to the Versioneer-versioned project, # but that should not change where we get our version from. env = os.environ.copy() env.pop("GIT_DIR", None) runner = functools.partial(runner, env=env) _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = runner(GITS, [ "describe", "--tags", "--dirty", "--always", "--long", "--match", f"{tag_prefix}[[:digit:]]*" ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root) # --abbrev-ref was added in git-1.6.3 if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == "HEAD": # If we aren't exactly on a branch, pick a branch which represents # the current commit. If all else fails, we are on a branchless # commit. branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) # --contains was added in git-1.5.4 if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split("\n") # Remove the first line if we're running detached if "(" in branches[0]: branches.pop(0) # Strip off the leading "* " from the list of branches. branches = [branch[2:] for branch in branches] if "master" in branches: branch_name = "master" elif not branches: branch_name = None else: # Pick the first branch that is returned. Good or bad. branch_name = branches[0] pieces["branch"] = branch_name # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() # Use only the last line. Previous lines may contain GPG signature # information. date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(versionfile_source: str, ipy: Optional[str]) -> None: """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [versionfile_source] if ipy: files.append(ipy) if "VERSIONEER_PEP518" not in globals(): try: my_path = __file__ if my_path.endswith((".pyc", ".pyo")): my_path = os.path.splitext(my_path)[0] + ".py" versioneer_file = os.path.relpath(my_path) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: with open(".gitattributes", "r") as fobj: for line in fobj: if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True break except OSError: pass if not present: with open(".gitattributes", "a+") as fobj: fobj.write(f"{versionfile_source} export-subst\n") files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir( parentdir_prefix: str, root: str, verbose: bool, ) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.29) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename: str) -> Dict[str, Any]: """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename: str, versions: Dict[str, Any]) -> None: """Write the given version number to the given _version.py file.""" contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_branch(pieces: Dict[str, Any]) -> str: """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The ".dev0" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear "older" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0" if pieces["branch"] != "master": rendered += ".dev0" rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: """Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present). """ vc = str.split(ver, ".post") return vc[0], int(vc[1] or 0) if len(vc) == 2 else None def render_pep440_pre(pieces: Dict[str, Any]) -> str: """TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE """ if pieces["closest-tag"]: if pieces["distance"]: # update the post release segment tag_version, post_version = pep440_split_post(pieces["closest-tag"]) rendered = tag_version if post_version is not None: rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) else: rendered += ".post0.dev%d" % (pieces["distance"]) else: # no commits, use the tag as the version rendered = pieces["closest-tag"] else: # exception #1 rendered = "0.post0.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . The ".dev0" means not master branch. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["branch"] != "master": rendered += ".dev0" rendered += "+g%s" % pieces["short"] if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-branch": rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-post-branch": rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose: bool = False) -> Dict[str, Any]: """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or bool(cfg.verbose) # `bool()` used to avoid `None` assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version() -> str: """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(cmdclass: Optional[Dict[str, Any]] = None): """Get the custom setuptools subclasses used by Versioneer. If the package uses a different cmdclass (e.g. one from numpy), it should be provide as an argument. """ if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/python-versioneer/python-versioneer/issues/52 cmds = {} if cmdclass is None else cmdclass.copy() # we add "version" to setuptools from setuptools import Command class cmd_version(Command): description = "report generated version string" user_options: List[Tuple[str, str, str]] = [] boolean_options: List[str] = [] def initialize_options(self) -> None: pass def finalize_options(self) -> None: pass def run(self) -> None: vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # pip install -e . and setuptool/editable_wheel will invoke build_py # but the build_py command is not expected to copy any files. # we override different "build_py" commands for both environments if 'build_py' in cmds: _build_py: Any = cmds['build_py'] else: from setuptools.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) if getattr(self, "editable_mode", False): # During editable installs `.py` and data files are # not copied to build_lib return # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if 'build_ext' in cmds: _build_ext: Any = cmds['build_ext'] else: from setuptools.command.build_ext import build_ext as _build_ext class cmd_build_ext(_build_ext): def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_ext.run(self) if self.inplace: # build_ext --inplace will only build extensions in # build/lib<..> dir with no _version.py to write to. # As in place builds will already have a _version.py # in the module dir, we do not need to write one. return # now locate _version.py in the new build/ directory and replace # it with an updated value if not cfg.versionfile_build: return target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) if not os.path.exists(target_versionfile): print(f"Warning: {target_versionfile} does not exist, skipping " "version update. This can happen if you are running build_ext " "without first running build_py.") return print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_ext"] = cmd_build_ext if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # type: ignore # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.setuptools_buildexe import py2exe as _py2exe # type: ignore except ImportError: from py2exe.distutils_buildexe import py2exe as _py2exe # type: ignore class cmd_py2exe(_py2exe): def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # sdist farms its file list building out to egg_info if 'egg_info' in cmds: _egg_info: Any = cmds['egg_info'] else: from setuptools.command.egg_info import egg_info as _egg_info class cmd_egg_info(_egg_info): def find_sources(self) -> None: # egg_info.find_sources builds the manifest list and writes it # in one shot super().find_sources() # Modify the filelist and normalize it root = get_root() cfg = get_config_from_root(root) self.filelist.append('versioneer.py') if cfg.versionfile_source: # There are rare cases where versionfile_source might not be # included by default, so we must be explicit self.filelist.append(cfg.versionfile_source) self.filelist.sort() self.filelist.remove_duplicates() # The write method is hidden in the manifest_maker instance that # generated the filelist and was thrown away # We will instead replicate their final normalization (to unicode, # and POSIX-style paths) from setuptools import unicode_utils normalized = [unicode_utils.filesys_decode(f).replace(os.sep, '/') for f in self.filelist.files] manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt') with open(manifest_filename, 'w') as fobj: fobj.write('\n'.join(normalized)) cmds['egg_info'] = cmd_egg_info # we override different "sdist" commands for both environments if 'sdist' in cmds: _sdist: Any = cmds['sdist'] else: from setuptools.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self) -> None: versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir: str, files: List[str]) -> None: root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ OLD_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ INIT_PY_SNIPPET = """ from . import {0} __version__ = {0}.get_versions()['version'] """ def do_setup() -> int: """Do main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (OSError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") maybe_ipy: Optional[str] = ipy if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except OSError: old = "" module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] snippet = INIT_PY_SNIPPET.format(module) if OLD_SNIPPET in old: print(" replacing boilerplate in %s" % ipy) with open(ipy, "w") as f: f.write(old.replace(OLD_SNIPPET, snippet)) elif snippet not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(snippet) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) maybe_ipy = None # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(cfg.versionfile_source, maybe_ipy) return 0 def scan_setup_py() -> int: """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors def setup_command() -> NoReturn: """Set up Versioneer and exit with appropriate error code.""" errors = do_setup() errors += scan_setup_py() sys.exit(1 if errors else 0) if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": setup_command()