pax_global_header00006660000000000000000000000064147671173350014530gustar00rootroot0000000000000052 comment=f95c453f3835da7f8097a1b5302ea1b7a3d6e57f zarr-python-3.0.6/000077500000000000000000000000001476711733500140335ustar00rootroot00000000000000zarr-python-3.0.6/.git-blame-ignore-revs000066400000000000000000000002321476711733500201300ustar00rootroot00000000000000# lint codebase with black and ruff 4e348d6b80c96da461fd866576c971b8a659ba15 # migrate from black to ruff format 22cea005629913208a85799372e045f353744add zarr-python-3.0.6/.git_archival.txt000066400000000000000000000002171476711733500173060ustar00rootroot00000000000000node: f95c453f3835da7f8097a1b5302ea1b7a3d6e57f node-date: 2025-03-20T16:03:57-07:00 describe-name: v3.0.6 ref-names: HEAD -> main, tag: v3.0.6 zarr-python-3.0.6/.gitattributes000066400000000000000000000001341476711733500167240ustar00rootroot00000000000000*.py linguist-language=python *.ipynb linguist-documentation .git_archival.txt export-subst zarr-python-3.0.6/.github/000077500000000000000000000000001476711733500153735ustar00rootroot00000000000000zarr-python-3.0.6/.github/CODEOWNERS000066400000000000000000000000701476711733500167630ustar00rootroot00000000000000zarr/_storage/absstore.py @zarr-developers/azure-team zarr-python-3.0.6/.github/CONTRIBUTING.md000066400000000000000000000002471476711733500176270ustar00rootroot00000000000000Contributing ============ Please see the [project documentation](https://zarr.readthedocs.io/en/stable/contributing.html) for information about contributing to Zarr. zarr-python-3.0.6/.github/ISSUE_TEMPLATE/000077500000000000000000000000001476711733500175565ustar00rootroot00000000000000zarr-python-3.0.6/.github/ISSUE_TEMPLATE/bug_report.yml000066400000000000000000000037651476711733500224640ustar00rootroot00000000000000name: Bug Report description: Report incorrect behaviour in the library. labels: ["bug"] body: - type: markdown attributes: value: | Please provide the following information. - type: input id: Zarr-version attributes: label: Zarr version description: Value of ``zarr.__version__`` placeholder: v2.10.2, v2.11.3, v2.12.0, etc. validations: required: true - type: input id: Numcodecs-version attributes: label: Numcodecs version description: Value of ``numcodecs.__version__`` placeholder: v0.8.1, v0.9.0, v0.10.0, etc. validations: required: true - type: input id: Python-version attributes: label: Python Version description: Version of Python interpreter placeholder: 3.10, 3.11, 3.12 etc. validations: required: true - type: input id: OS attributes: label: Operating System description: Operating System placeholder: (Linux/Windows/Mac) validations: required: true - type: input id: installation attributes: label: Installation description: How was Zarr installed? placeholder: e.g., "using pip into virtual environment", or "using conda" validations: required: true - type: textarea id: description attributes: label: Description description: Explain why the current behavior is a problem, what the expected output/behaviour is, and why the expected output/behaviour is a better solution. validations: required: true - type: textarea id: reproduce attributes: label: Steps to reproduce description: Minimal, reproducible code sample, a copy-pastable example if possible. validations: required: true - type: textarea id: additional-output attributes: label: Additional output description: If you think it might be relevant, please provide the output from ``pip freeze`` or ``conda env export`` depending on which was used to install Zarr. zarr-python-3.0.6/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000011641476711733500215500ustar00rootroot00000000000000blank_issues_enabled: true contact_links: - name: Propose a new major feature url: https://github.com/zarr-developers/zarr-specs about: A new major feature should be discussed in the Zarr specifications repository. - name: Discuss something on ZulipChat url: https://ossci.zulipchat.com/ about: For questions like "How do I do X with Zarr?", consider posting your question to our developer chat. - name: Discuss something on GitHub Discussions url: https://github.com/zarr-developers/zarr-python/discussions about: For questions like "How do I do X with Zarr?", you can move to GitHub Discussions. zarr-python-3.0.6/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000012021476711733500231450ustar00rootroot00000000000000name: Documentation Improvement description: Report missing or wrong documentation. Alternatively, you can just open a pull request with the suggested change. title: "DOC: " labels: [documentation, help wanted] body: - type: textarea attributes: label: Describe the issue linked to the documentation description: > Please provide a description of what documentation you believe needs to be fixed/improved. validations: required: true - type: textarea attributes: label: Suggested fix for documentation description: > Please explain the suggested fix and why it's better than the existing documentation. zarr-python-3.0.6/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000005601476711733500211750ustar00rootroot00000000000000[Description of PR] TODO: * [ ] Add unit tests and/or doctests in docstrings * [ ] Add docstrings and API docs for any new/modified user-facing classes and functions * [ ] New/modified features documented in `docs/user-guide/*.rst` * [ ] Changes documented as a new file in `changes/` * [ ] GitHub Actions have all passed * [ ] Test coverage is 100% (Codecov passes) zarr-python-3.0.6/.github/codecov.yml000066400000000000000000000005301476711733500175360ustar00rootroot00000000000000coverage: status: project: default: target: 100 threshold: 0.1 patch: default: target: 100 comment: layout: "diff, files" behavior: default require_changes: true # if true: only post the comment if coverage changes branches: # branch names that can post comment - "main" zarr-python-3.0.6/.github/dependabot.yml000066400000000000000000000011471476711733500202260ustar00rootroot00000000000000--- version: 2 updates: # Updates for main - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" groups: actions: patterns: - "*" # Updates for support/v2 branch - package-ecosystem: "pip" directory: "/" target-branch: "support/v2" schedule: interval: "weekly" groups: requirements: patterns: - "*" - package-ecosystem: "github-actions" directory: "/" target-branch: "support/v2" schedule: interval: "weekly" groups: actions: patterns: - "*" zarr-python-3.0.6/.github/labeler.yml000066400000000000000000000001441476711733500175230ustar00rootroot00000000000000needs release notes: - all: - changed-files: - all-globs-to-all-files: '!changes/*.rst' zarr-python-3.0.6/.github/workflows/000077500000000000000000000000001476711733500174305ustar00rootroot00000000000000zarr-python-3.0.6/.github/workflows/gpu_test.yml000066400000000000000000000044761476711733500220200ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: GPU Test on: push: branches: [ main ] pull_request: branches: [ main ] workflow_dispatch: env: LD_LIBRARY_PATH: /usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/lib64 concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: test: name: py=${{ matrix.python-version }}, np=${{ matrix.numpy-version }}, deps=${{ matrix.dependency-set }} runs-on: gpu-runner strategy: matrix: python-version: ['3.11'] numpy-version: ['2.1'] dependency-set: ["minimal"] steps: - uses: actions/checkout@v4 # - name: cuda-toolkit # uses: Jimver/cuda-toolkit@v0.2.16 # id: cuda-toolkit # with: # cuda: '12.4.1' - name: Set up CUDA run: | wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.1-1_all.deb sudo dpkg -i cuda-keyring_1.1-1_all.deb sudo apt-get update sudo apt-get -y install cuda-toolkit-12-6 echo "/usr/local/cuda/bin" >> $GITHUB_PATH - name: GPU check run: | nvidia-smi echo $PATH echo $LD_LIBRARY_PATH nvcc -V - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' - name: Install Hatch and CuPy run: | python -m pip install --upgrade pip pip install hatch - name: Set Up Hatch Env run: | hatch env create gputest.py${{ matrix.python-version }}-${{ matrix.numpy-version }}-${{ matrix.dependency-set }} hatch env run -e gputest.py${{ matrix.python-version }}-${{ matrix.numpy-version }}-${{ matrix.dependency-set }} list-env - name: Run Tests run: | hatch env run --env gputest.py${{ matrix.python-version }}-${{ matrix.numpy-version }}-${{ matrix.dependency-set }} run-coverage - name: Upload coverage uses: codecov/codecov-action@13ce06bfc6bbe3ecf90edbbf1bc32fe5978ca1d3 # v5.3.1 with: token: ${{ secrets.CODECOV_TOKEN }} verbose: true # optional (default = false) zarr-python-3.0.6/.github/workflows/hypothesis.yaml000066400000000000000000000052421476711733500225160ustar00rootroot00000000000000name: Slow Hypothesis CI on: push: branches: - "main" pull_request: branches: - "main" types: [opened, reopened, synchronize, labeled] schedule: - cron: "0 0 * * *" # Daily “At 00:00” UTC workflow_dispatch: # allows you to trigger manually env: FORCE_COLOR: 3 jobs: hypothesis: name: Slow Hypothesis Tests runs-on: "ubuntu-latest" defaults: run: shell: bash -l {0} strategy: matrix: python-version: ['3.11'] numpy-version: ['2.1'] dependency-set: ["optional"] steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' - name: Install Hatch run: | python -m pip install --upgrade pip pip install hatch - name: Set Up Hatch Env run: | hatch env create test.py${{ matrix.python-version }}-${{ matrix.numpy-version }}-${{ matrix.dependency-set }} hatch env run -e test.py${{ matrix.python-version }}-${{ matrix.numpy-version }}-${{ matrix.dependency-set }} list-env # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache - name: Restore cached hypothesis directory id: restore-hypothesis-cache uses: actions/cache/restore@v4 with: path: .hypothesis/ key: cache-hypothesis-${{ runner.os }}-${{ github.run_id }} restore-keys: | cache-hypothesis- - name: Run slow Hypothesis tests if: success() id: status run: | hatch env run --env test.py${{ matrix.python-version }}-${{ matrix.numpy-version }}-${{ matrix.dependency-set }} run-hypothesis # explicitly save the cache so it gets updated, also do this even if it fails. - name: Save cached hypothesis directory id: save-hypothesis-cache if: always() && steps.status.outcome != 'skipped' uses: actions/cache/save@v4 with: path: .hypothesis/ key: cache-hypothesis-${{ runner.os }}-${{ github.run_id }} - name: Upload coverage uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} verbose: true # optional (default = false) - name: Generate and publish the report if: | failure() && steps.status.outcome == 'failure' && github.event_name == 'schedule' && github.repository_owner == 'zarr-developers' uses: xarray-contrib/issue-from-pytest-log@v1 with: log-path: output-${{ matrix.python-version }}-log.jsonl issue-title: "Nightly Hypothesis tests failed" issue-label: "topic-hypothesis" zarr-python-3.0.6/.github/workflows/issue-metrics.yml000066400000000000000000000022431476711733500227500ustar00rootroot00000000000000name: Monthly issue metrics on: workflow_dispatch: schedule: - cron: '3 2 1 * *' permissions: contents: read jobs: build: name: issue metrics runs-on: ubuntu-latest permissions: issues: write pull-requests: read steps: - name: Get dates for last month shell: bash run: | # Calculate the first day of the previous month first_day=$(date -d "last month" +%Y-%m-01) # Calculate the last day of the previous month last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d) #Set an environment variable with the date range echo "$first_day..$last_day" echo "last_month=$first_day..$last_day" >> "$GITHUB_ENV" - name: Run issue-metrics tool uses: github/issue-metrics@v3 env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} SEARCH_QUERY: 'repo:zarr-developers/zarr-python is:issue created:${{ env.last_month }} -reason:"not planned"' - name: Create issue uses: peter-evans/create-issue-from-file@v5 with: title: Monthly issue metrics report token: ${{ secrets.GITHUB_TOKEN }} content-filepath: ./issue_metrics.md zarr-python-3.0.6/.github/workflows/needs_release_notes.yml000066400000000000000000000007501476711733500241630ustar00rootroot00000000000000name: "Pull Request Labeler" on: - pull_request_target jobs: labeler: if: ${{ github.event.pull_request.user.login != 'dependabot[bot]' }} && ${{ github.event.pull_request.user.login != 'pre-commit-ci[bot]' }} permissions: contents: read pull-requests: write runs-on: ubuntu-latest steps: - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} sync-labels: true zarr-python-3.0.6/.github/workflows/releases.yml000066400000000000000000000026041476711733500217600ustar00rootroot00000000000000name: Wheels on: [push, pull_request] jobs: build_artifacts: name: Build wheel on ubuntu-latest runs-on: ubuntu-latest strategy: fail-fast: false steps: - uses: actions/checkout@v4 with: submodules: true fetch-depth: 0 - uses: actions/setup-python@v5.2.0 name: Install Python with: python-version: '3.11' - name: Install PyBuild run: | python -m pip install --upgrade pip pip install hatch - name: Build wheel and sdist run: hatch build - uses: actions/upload-artifact@v4 with: name: releases path: dist test_dist_pypi: needs: [build_artifacts] runs-on: ubuntu-latest steps: - uses: actions/download-artifact@v4 with: name: releases path: dist - name: test run: | ls ls dist upload_pypi: needs: [build_artifacts] runs-on: ubuntu-latest if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v') steps: - uses: actions/download-artifact@v4 with: name: releases path: dist - uses: pypa/gh-action-pypi-publish@v1.12.4 with: user: __token__ password: ${{ secrets.pypi_password }} # To test: repository_url: https://test.pypi.org/legacy/ zarr-python-3.0.6/.github/workflows/test.yml000066400000000000000000000107411476711733500211350ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: Test on: push: branches: [ main ] pull_request: branches: [ main ] workflow_dispatch: concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: test: name: os=${{ matrix.os }}, py=${{ matrix.python-version }}, np=${{ matrix.numpy-version }}, deps=${{ matrix.dependency-set }} strategy: matrix: python-version: ['3.11', '3.12', '3.13'] numpy-version: ['1.25', '2.1'] dependency-set: ["minimal", "optional"] os: ["ubuntu-latest"] include: - python-version: '3.11' numpy-version: '1.25' dependency-set: 'optional' os: 'macos-latest' - python-version: '3.13' numpy-version: '2.1' dependency-set: 'optional' os: 'macos-latest' - python-version: '3.11' numpy-version: '1.25' dependency-set: 'optional' os: 'windows-latest' - python-version: '3.13' numpy-version: '2.1' dependency-set: 'optional' os: 'windows-latest' runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 with: fetch-depth: 0 # grab all branches and tags - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' - name: Install Hatch run: | python -m pip install --upgrade pip pip install hatch - name: Set Up Hatch Env run: | hatch env create test.py${{ matrix.python-version }}-${{ matrix.numpy-version }}-${{ matrix.dependency-set }} hatch env run -e test.py${{ matrix.python-version }}-${{ matrix.numpy-version }}-${{ matrix.dependency-set }} list-env - name: Run Tests run: | hatch env run --env test.py${{ matrix.python-version }}-${{ matrix.numpy-version }}-${{ matrix.dependency-set }} run-coverage - name: Upload coverage uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} verbose: true # optional (default = false) test-upstream-and-min-deps: name: py=${{ matrix.python-version }}-${{ matrix.dependency-set }} runs-on: ubuntu-latest strategy: matrix: python-version: ['3.11', "3.13"] dependency-set: ["upstream", "min_deps"] exclude: - python-version: "3.13" dependency-set: min_deps - python-version: "3.11" dependency-set: upstream steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' - name: Install Hatch run: | python -m pip install --upgrade pip pip install hatch - name: Set Up Hatch Env run: | hatch env create ${{ matrix.dependency-set }} hatch env run -e ${{ matrix.dependency-set }} list-env - name: Run Tests run: | hatch env run --env ${{ matrix.dependency-set }} run - name: Upload coverage uses: codecov/codecov-action@v5 with: token: ${{ secrets.CODECOV_TOKEN }} verbose: true # optional (default = false) doctests: name: doctests runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 # required for hatch version discovery, which is needed for numcodecs.zarr3 - name: Set up Python uses: actions/setup-python@v5 with: python-version: '3.13' cache: 'pip' - name: Install Hatch run: | python -m pip install --upgrade pip pip install hatch - name: Set Up Hatch Env run: | hatch env create doctest hatch env run -e doctest list-env - name: Run Tests run: | hatch env run --env doctest run test-complete: name: Test complete needs: [ test, test-upstream-and-min-deps, doctests ] if: always() runs-on: ubuntu-latest steps: - name: Check failure if: | contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') run: exit 1 - name: Success run: echo Success! zarr-python-3.0.6/.gitignore000066400000000000000000000017521476711733500160300ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] # C extensions *.so # Distribution / packaging .Python env/ .venv/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .coverage .coverage.* .cache coverage.xml *,cover # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ docs/api docs/data data data.zip # PyBuilder target/ # PyCharm .idea # Jupyter .ipynb_checkpoints/ # VCS versioning src/zarr/_version.py # emacs *~ # VSCode .vscode/ # test data #*.zarr #*.zip #example* #doesnotexist #test_sync* data/* src/fixture/ fixture/ junit.xml .DS_Store tests/.hypothesis .hypothesis/ zarr/version.py zarr-python-3.0.6/.pre-commit-config.yaml000066400000000000000000000030531476711733500203150ustar00rootroot00000000000000ci: autoupdate_commit_msg: "chore: update pre-commit hooks" autoupdate_schedule: "monthly" autofix_commit_msg: "style: pre-commit fixes" autofix_prs: false default_stages: [pre-commit, pre-push] repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.9.9 hooks: - id: ruff args: ["--fix", "--show-fixes"] - id: ruff-format - repo: https://github.com/codespell-project/codespell rev: v2.4.1 hooks: - id: codespell args: ["-L", "fo,ihs,kake,te", "-S", "fixture"] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: check-yaml - id: trailing-whitespace - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.15.0 hooks: - id: mypy files: src|tests additional_dependencies: # Package dependencies - packaging - donfig - numcodecs[crc32c] - numpy==2.1 # until https://github.com/numpy/numpy/issues/28034 is resolved - typing_extensions - universal-pathlib # Tests - pytest - repo: https://github.com/scientific-python/cookie rev: 2025.01.22 hooks: - id: sp-repo-review - repo: https://github.com/pre-commit/pygrep-hooks rev: v1.10.0 hooks: - id: rst-directive-colons - id: rst-inline-touching-normal - repo: https://github.com/numpy/numpydoc rev: v1.8.0 hooks: - id: numpydoc-validation - repo: https://github.com/twisted/towncrier rev: 24.8.0 hooks: - id: towncrier-check zarr-python-3.0.6/.pyup.yml000066400000000000000000000005131476711733500156300ustar00rootroot00000000000000# pyup.io config file # see https://pyup.io/docs/configuration/ for all available options schedule: every month requirements: - requirements_dev_minimal.txt: pin: True update: all - requirements_dev_numpy.txt: pin: True update: all - requirements_dev_optional.txt: pin: True update: all zarr-python-3.0.6/.readthedocs.yaml000066400000000000000000000006161476711733500172650ustar00rootroot00000000000000version: 2 build: os: ubuntu-22.04 tools: python: "3.12" jobs: pre_build: - | if [ "$READTHEDOCS_VERSION_TYPE" != "tag" ]; then towncrier build --version Unreleased --yes; fi sphinx: configuration: docs/conf.py fail_on_warning: true formats: all python: install: - method: pip path: . extra_requirements: - docs zarr-python-3.0.6/FUNDING.yml000066400000000000000000000001031476711733500156420ustar00rootroot00000000000000github: [numfocus] custom: ['https://numfocus.org/donate-to-zarr'] zarr-python-3.0.6/LICENSE.txt000066400000000000000000000021441476711733500156570ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2015-2024 Zarr Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. zarr-python-3.0.6/README.md000066400000000000000000000142001476711733500153070ustar00rootroot00000000000000

# Zarr
Latest Release latest release
latest release
Package Status status
License license
Build Status build status
Pre-commit Status pre-commit status
Coverage coverage
Downloads pypi downloads
Developer Chat
Funding CZI's Essential Open Source Software for Science
Citation DOI
## What is it? Zarr is a Python package providing an implementation of compressed, chunked, N-dimensional arrays, designed for use in parallel computing. See the [documentation](https://zarr.readthedocs.io) for more information. ## Main Features - [**Create**](https://zarr.readthedocs.io/en/stable/tutorial.html#creating-an-array) N-dimensional arrays with any NumPy `dtype`. - [**Chunk arrays**](https://zarr.readthedocs.io/en/stable/tutorial.html#chunk-optimizations) along any dimension. - [**Compress**](https://zarr.readthedocs.io/en/stable/tutorial.html#compressors) and/or filter chunks using any NumCodecs codec. - [**Store arrays**](https://zarr.readthedocs.io/en/stable/tutorial.html#tutorial-storage) in memory, on disk, inside a zip file, on S3, etc... - [**Read**](https://zarr.readthedocs.io/en/stable/tutorial.html#reading-and-writing-data) an array [**concurrently**](https://zarr.readthedocs.io/en/stable/tutorial.html#parallel-computing-and-synchronization) from multiple threads or processes. - Write to an array concurrently from multiple threads or processes. - Organize arrays into hierarchies via [**groups**](https://zarr.readthedocs.io/en/stable/tutorial.html#groups). ## Where to get it Zarr can be installed from PyPI using `pip`: ```bash pip install zarr ``` or via `conda`: ```bash conda install -c conda-forge zarr ``` For more details, including how to install from source, see the [installation documentation](https://zarr.readthedocs.io/en/stable/index.html#installation). zarr-python-3.0.6/TEAM.md000066400000000000000000000013211476711733500151000ustar00rootroot00000000000000## Active core-developers - @joshmoore (Josh Moore) - @jni (Juan Nunez-Iglesias) - @rabernat (Ryan Abernathey) - @jhamman (Joe Hamman) - @d-v-b (Davis Bennett) - @jakirkham (jakirkham) - @martindurant (Martin Durant) - @normanrz (Norman Rzepka) - @dstansby (David Stansby) - @dcherian (Deepak Cherian) - @TomAugspurger (Tom Augspurger) ## Emeritus core-developers - @alimanfoo (Alistair Miles) - @shoyer (Stephan Hoyer) - @ryan-williams (Ryan Williams) - @jrbourbeau (James Bourbeau) - @mzjp2 (Zain Patel) - @grlee77 (Gregory Lee) ## Former core-developers - @jeromekelleher (Jerome Kelleher) - @tjcrone (Tim Crone) - @funkey (Jan Funke) - @shikharsg - @Carreau (Matthias Bussonnier) - @dazzag24 - @WardF (Ward Fisher) zarr-python-3.0.6/bench/000077500000000000000000000000001476711733500151125ustar00rootroot00000000000000zarr-python-3.0.6/bench/compress_normal.py000066400000000000000000000016671476711733500207010ustar00rootroot00000000000000import sys import timeit import line_profiler import numpy as np import zarr from zarr import blosc if __name__ == "__main__": sys.path.insert(0, "..") # setup a = np.random.normal(2000, 1000, size=200000000).astype("u2") z = zarr.empty_like( a, chunks=1000000, compression="blosc", compression_opts={"cname": "lz4", "clevel": 5, "shuffle": 2}, ) print(z) print("*" * 79) # time t = timeit.repeat("z[:] = a", repeat=10, number=1, globals=globals()) print(t) print(min(t)) print(z) # profile profile = line_profiler.LineProfiler(blosc.compress) profile.run("z[:] = a") profile.print_stats() print("*" * 79) # time t = timeit.repeat("z[:]", repeat=10, number=1, globals=globals()) print(t) print(min(t)) # profile profile = line_profiler.LineProfiler(blosc.decompress) profile.run("z[:]") profile.print_stats() zarr-python-3.0.6/bench/compress_normal.txt000066400000000000000000000252341476711733500210640ustar00rootroot00000000000000zarr.core.Array((200000000,), uint16, chunks=(1000000,), order=C) compression: blosc; compression_opts: {'clevel': 5, 'cname': 'lz4', 'shuffle': 2} nbytes: 381.5M; nbytes_stored: 294; ratio: 1360544.2; initialized: 0/200 store: builtins.dict ******************************************************************************* [0.27119584499996563, 0.2855067059999783, 0.2887747180002407, 0.3058794240005227, 0.3139041080003153, 0.3021271820007314, 0.31543190899992624, 0.31403100900024583, 0.3272544129995367, 0.31834129100025166] 0.27119584499996563 zarr.core.Array((200000000,), uint16, chunks=(1000000,), order=C) compression: blosc; compression_opts: {'clevel': 5, 'cname': 'lz4', 'shuffle': 2} nbytes: 381.5M; nbytes_stored: 314.1M; ratio: 1.2; initialized: 200/200 store: builtins.dict Timer unit: 1e-06 s Total time: 0.297223 s File: /home/aliman/code/github/alimanfoo/zarr/zarr/blosc.pyx Function: compress at line 137 Line # Hits Time Per Hit % Time Line Contents ============================================================== 137 def compress(source, char* cname, int clevel, int shuffle): 138 """Compress data in a numpy array. 139 140 Parameters 141 ---------- 142 source : array-like 143 Data to be compressed. 144 cname : bytes 145 Name of compression library to use. 146 clevel : int 147 Compression level. 148 shuffle : int 149 Shuffle filter. 150 151 Returns 152 ------- 153 dest : bytes-like 154 Compressed data. 155 156 """ 157 158 cdef: 159 char *source_ptr 160 char *dest_ptr 161 Py_buffer source_buffer 162 size_t nbytes, cbytes, itemsize 163 200 506 2.5 0.2 array.array char_array_template = array.array('b', []) 164 array.array dest 165 166 # setup source buffer 167 200 458 2.3 0.2 PyObject_GetBuffer(source, &source_buffer, PyBUF_ANY_CONTIGUOUS) 168 200 119 0.6 0.0 source_ptr = source_buffer.buf 169 170 # setup destination 171 200 239 1.2 0.1 nbytes = source_buffer.len 172 200 103 0.5 0.0 itemsize = source_buffer.itemsize 173 200 2286 11.4 0.8 dest = array.clone(char_array_template, nbytes + BLOSC_MAX_OVERHEAD, 174 zero=False) 175 200 129 0.6 0.0 dest_ptr = dest.data.as_voidptr 176 177 # perform compression 178 200 1734 8.7 0.6 if _get_use_threads(): 179 # allow blosc to use threads internally 180 200 167 0.8 0.1 compressor_set = blosc_set_compressor(cname) 181 200 94 0.5 0.0 if compressor_set < 0: 182 raise ValueError('compressor not supported: %r' % cname) 183 200 288570 1442.8 97.1 with nogil: 184 cbytes = blosc_compress(clevel, shuffle, itemsize, nbytes, 185 source_ptr, dest_ptr, 186 nbytes + BLOSC_MAX_OVERHEAD) 187 188 else: 189 with nogil: 190 cbytes = blosc_compress_ctx(clevel, shuffle, itemsize, nbytes, 191 source_ptr, dest_ptr, 192 nbytes + BLOSC_MAX_OVERHEAD, cname, 193 0, 1) 194 195 # release source buffer 196 200 616 3.1 0.2 PyBuffer_Release(&source_buffer) 197 198 # check compression was successful 199 200 120 0.6 0.0 if cbytes <= 0: 200 raise RuntimeError('error during blosc compression: %d' % cbytes) 201 202 # resize after compression 203 200 1896 9.5 0.6 array.resize(dest, cbytes) 204 205 200 186 0.9 0.1 return dest ******************************************************************************* [0.24293352799941204, 0.2324290420001489, 0.24935673900017719, 0.25716222699975333, 0.24246313799994823, 0.23272456500035332, 0.2636815870000646, 0.2576046349995522, 0.2781278639995435, 0.23824110699933954] 0.2324290420001489 Timer unit: 1e-06 s Total time: 0.240178 s File: /home/aliman/code/github/alimanfoo/zarr/zarr/blosc.pyx Function: decompress at line 75 Line # Hits Time Per Hit % Time Line Contents ============================================================== 75 def decompress(source, dest): 76 """Decompress data. 77 78 Parameters 79 ---------- 80 source : bytes-like 81 Compressed data, including blosc header. 82 dest : array-like 83 Object to decompress into. 84 85 Notes 86 ----- 87 Assumes that the size of the destination buffer is correct for the size of 88 the uncompressed data. 89 90 """ 91 cdef: 92 int ret 93 char *source_ptr 94 char *dest_ptr 95 Py_buffer source_buffer 96 array.array source_array 97 Py_buffer dest_buffer 98 size_t nbytes 99 100 # setup source buffer 101 200 573 2.9 0.2 if PY2 and isinstance(source, array.array): 102 # workaround fact that array.array does not support new-style buffer 103 # interface in PY2 104 release_source_buffer = False 105 source_array = source 106 source_ptr = source_array.data.as_voidptr 107 else: 108 200 112 0.6 0.0 release_source_buffer = True 109 200 144 0.7 0.1 PyObject_GetBuffer(source, &source_buffer, PyBUF_ANY_CONTIGUOUS) 110 200 98 0.5 0.0 source_ptr = source_buffer.buf 111 112 # setup destination buffer 113 200 552 2.8 0.2 PyObject_GetBuffer(dest, &dest_buffer, 114 PyBUF_ANY_CONTIGUOUS | PyBUF_WRITEABLE) 115 200 100 0.5 0.0 dest_ptr = dest_buffer.buf 116 200 84 0.4 0.0 nbytes = dest_buffer.len 117 118 # perform decompression 119 200 1856 9.3 0.8 if _get_use_threads(): 120 # allow blosc to use threads internally 121 200 235286 1176.4 98.0 with nogil: 122 ret = blosc_decompress(source_ptr, dest_ptr, nbytes) 123 else: 124 with nogil: 125 ret = blosc_decompress_ctx(source_ptr, dest_ptr, nbytes, 1) 126 127 # release buffers 128 200 754 3.8 0.3 if release_source_buffer: 129 200 326 1.6 0.1 PyBuffer_Release(&source_buffer) 130 200 165 0.8 0.1 PyBuffer_Release(&dest_buffer) 131 132 # handle errors 133 200 128 0.6 0.1 if ret <= 0: 134 raise RuntimeError('error during blosc decompression: %d' % ret) zarr-python-3.0.6/changes/000077500000000000000000000000001476711733500154435ustar00rootroot00000000000000zarr-python-3.0.6/changes/.gitignore000066400000000000000000000000141476711733500174260ustar00rootroot00000000000000!.gitignore zarr-python-3.0.6/changes/README.md000066400000000000000000000005761476711733500167320ustar00rootroot00000000000000Writing a changelog entry ------------------------- Please put a new file in this directory named `xxxx..rst`, where - `xxxx` is the pull request number associated with this entry - `` is one of: - feature - bugfix - doc - removal - misc Inside the file, please write a short description of what you have changed, and how it impacts users of `zarr-python`. zarr-python-3.0.6/codecov.yml000066400000000000000000000002321476711733500161750ustar00rootroot00000000000000coverage: status: patch: default: target: auto project: default: target: auto threshold: 0.1 comment: false zarr-python-3.0.6/docs/000077500000000000000000000000001476711733500147635ustar00rootroot00000000000000zarr-python-3.0.6/docs/Makefile000066400000000000000000000176501476711733500164340ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = -W --keep-going SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\'t have Sphinx installed, grab it from https://www.sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " epub3 to make an epub3" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" @echo " dummy to check syntax errors of document sources" .PHONY: clean clean: rm -rf $(BUILDDIR)/* rm -rf $(BUILDDIR)/../api .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/zarr.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/zarr.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/zarr" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/zarr" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 epub3: $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." .PHONY: dummy dummy: $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy @echo @echo "Build finished. Dummy builder generates no files." zarr-python-3.0.6/docs/_static/000077500000000000000000000000001476711733500164115ustar00rootroot00000000000000zarr-python-3.0.6/docs/_static/custom.css000066400000000000000000000044751476711733500204470ustar00rootroot00000000000000@import url('https://fonts.googleapis.com/css2?family=Lato:ital,wght@0,400;0,700;0,900;1,400;1,700;1,900&family=Open+Sans:ital,wght@0,400;0,600;1,400;1,600&display=swap'); body { font-family: 'Open Sans', sans-serif; } pre, code { font-size: 100%; line-height: 155%; } /* Style the active version button. - dev: orange - stable: green - old, PR: red Colors from: Wong, B. Points of view: Color blindness. Nat Methods 8, 441 (2011). https://doi.org/10.1038/nmeth.1618 */ /* If the active version has the name "dev", style it orange */ #version_switcher_button[data-active-version-name*="dev"] { background-color: #E69F00; border-color: #E69F00; color:#000000; } /* green for `stable` */ #version_switcher_button[data-active-version-name*="stable"] { background-color: #009E73; border-color: #009E73; } /* red for `old` */ #version_switcher_button:not([data-active-version-name*="stable"], [data-active-version-name*="dev"], [data-active-version-name=""]) { background-color: #980F0F; border-color: #980F0F; } /* Main page overview cards */ .sd-card { background: #fff; border-radius: 0; padding: 30px 10px 20px 10px; margin: 10px 0px; } .sd-card .sd-card-header { text-align: center; } .sd-card .sd-card-header .sd-card-text { margin: 0px; } .sd-card .sd-card-img-top { height: 52px; width: 52px; margin-left: auto; margin-right: auto; } .sd-card .sd-card-header { border: none; background-color: white; font-size: var(--pst-font-size-h5); font-weight: bold; padding: 2.5rem 0rem 0.5rem 0rem; } .sd-card .sd-card-footer { border: none; background-color: white; } .sd-card .sd-card-footer .sd-card-text { max-width: 220px; margin-left: auto; margin-right: auto; } /* Dark theme tweaking */ html[data-theme=dark] .sd-card img[src*='.svg'] { filter: invert(0.82) brightness(0.8) contrast(1.2); } /* Main index page overview cards */ html[data-theme=dark] .sd-card { background-color:var(--pst-color-background); } html[data-theme=dark] .sd-shadow-sm { box-shadow: 0 .1rem 1rem rgba(250, 250, 250, .6) !important } html[data-theme=dark] .sd-card .sd-card-header { background-color:var(--pst-color-background); } html[data-theme=dark] .sd-card .sd-card-footer { background-color:var(--pst-color-background); } html[data-theme=dark] h1 { color: var(--pst-color-primary); } zarr-python-3.0.6/docs/_static/custom.js000066400000000000000000000011451476711733500202620ustar00rootroot00000000000000// handle redirects (() => { let anchorMap = { "installation": "installation.html", "getting-started": "getting_started.html#getting-started", "highlights": "getting_started.html#highlights", "contributing": "contributing.html", "projects-using-zarr": "getting_started.html#projects-using-zarr", "contents": "getting_started.html#contents", "indices-and-tables": "api.html#indices-and-tables" } let hash = window.location.hash.substring(1); if (hash && hash in anchorMap) { window.location.replace(anchorMap[hash]); } })(); zarr-python-3.0.6/docs/_static/index_api.svg000066400000000000000000000066771476711733500211120ustar00rootroot00000000000000 image/svg+xml zarr-python-3.0.6/docs/_static/index_contribute.svg000066400000000000000000000047401476711733500225040ustar00rootroot00000000000000 image/svg+xml zarr-python-3.0.6/docs/_static/index_getting_started.svg000066400000000000000000000076111476711733500235150ustar00rootroot00000000000000 image/svg+xml zarr-python-3.0.6/docs/_static/index_user_guide.svg000066400000000000000000000144351476711733500224630ustar00rootroot00000000000000 image/svg+xml zarr-python-3.0.6/docs/_static/logo1.png000066400000000000000000001416141476711733500201470ustar00rootroot00000000000000PNG  IHDR[~gAMA a cHRMz&u0`:pQ<bKGD pHYs+tIME ~IDATxwUչ߾wfl%[4jb {5Qaf`W5k F{G`Ez{^}ff; 0{v9oﯬ $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A $H A ga lL4y L{pfD(4dUw3AU8$ɃI7 ejkJ 9 ] *Ab@ՠwAڌ퀓Dž4/`o#Uc`ȌA׭!$HRӨTaf)Ik4j`1QL JAbO/m7 ŗe$2+k#~~  QXF _W\ qH?R}d䝜0b"ua Qv@cP+wSM$!=&^43HCPsr~kT?9 /]5":]o!1 k1寗 !p1p m)kztNc& d`$C!-ֲH$@zUw A Χ}xE@:,룭jB0tKa9Sc(FoLF!mT]6KW4%H\HN kՠAU";: GU7[qpr@y'Q!!Ev6G|5M9.Ћp"8x9l&nJꑜT6r)Gxc\v8cW*qڛxp|`bcpzl(hsB[{ow3x{lOtvO>9ÙqHRPu-#|tsh < !xv΢~u!fK0'3e)ISE%Lt&ѸNEb0L).Fx2QD*S^[ryc >g+B`u7ҳ S>PV5p)T/x /ƛLϰi`0r񉪋K 46"<uL˓Ǩ0s& 3HCԁ,ZkI}RԱ`"EΏ-Fp{&܊Xg^23 UyE8ZcGp{0<1 BTat\RzTߓ.UD.!P)&Py* {O!I"Eb,3& ȃr&CHK<^\Ro`#nfp#ASaP=ӧ(PQ\LAN;hPՋnC8  E!gR<8C^WO0a^쟠?cTOF_oN\q ݇ [:K$!2E,H"N"4E{710 T6S*TDjJ2C;4qsa>ǘqW4 c>.cؐ)4q".0<хL>=|E7&z I\68l !ޣ0h#` \[WN⽪su1q6*i=<r֔x8v0`H` KqH,&ÈbQ0tYླྀNy: 9նgԅ5xħ{} ģ51G#` %Pg\FO-btkS4Z  `ıPq}CB`HJ LyFqb40 BuQ8tGH:̮){|x_đ@:5!<Sg< 'qbZek(k݇.6] J)ظj?Lz87`2PK&cSR$Pѯ@rI8qp*KBj)%"$<~N8Jϱ _bN([v|8>W}"Ӂv5@b\(ln=8[H89Ip+0 Qd5OH#x ~";[cJyT^PN7&5 "1Qy~ L5!݀ÄNClEKrdx>n: )8R;g j,0e`l |\VA`ԠH&,N};=:j"0y^S8TRG`*N6ϘƬ.gnz.nl}q"_b"K,ac-<LQfU `Ya\~\4 xy{0,?Ӊ| ^|Cg0{YQzy8 xgp\tK>-4N3m1h6pn:O?!>n&%\sL;עa'`#;yx8`WD~qYQfc1oÆ >OLYj,1傁L:?I&ݏEݺg̥`Z׌#:#zȆ <<s1e92P$w*EfTFOtk2RSnd_}=":5 ,l<  a'1!ӵNukn< 0}'V[8o* \<23cyYnSʱWK߄? ϼz >`< tm(^Xڒ!d#.3 `BF/k=)u5Qqz[ } {40: q"mG Zmq&"Tįs}iFl ӌQvn@M׳ efv>гֵǓs;*Ͻpsw!99*|":ΜhA !W|R ˁ ;Tp:!UwF!cf{35 qMV3)aYs.Wшj@*n{Ay(up30{1@C|܉:.U7NTigaKjcVJ3r6/ 4=*g΢=nq<?w$ 畢("QYۤ7 ˊ}o`v- * RBqW?l`$0!40f~ omh.)*,B^F6n Y A嗁*-AmfH,Ifp'rs dψϣ✁,]x!@$aw  FkV"O#c)z= \/#1#:9kӀ_ L5Ǭ_c bE+(DG}G%?F^5ǿ6&&0sS謁T}K?i &#B\J{HQq@ cfnpڔxFhq;!}yO-/KQq@ov,΋t( Qv9ʯSyN3j rTE5wz=Jd;yKɒ1c$[c.A؁֞r;wqV>pXaa>KlaP$KWc":p{誔)䡪!EG(e0*ϿBs8"-G2|OST`T\&2+^U ܄9p6y)Sjy &49 Rv?q0{y%HO!1c 56  ȝv4ߒٞέQ20ALBC|G[I1$ƝA8||X^>!n{M78"?^ bcBb|q];sDL$-9v-4BXk |INb1#Gx[[gɗe̟^&i/cl87JS<}m)1͇My3)kh1xΕ B?:q wk@5b-YD鱋Kn4f܍NO'].0TF Ƀ B }#E0 oc%oSEZ߬C.ho!DKح]݆gvOb^fSd=k0~PySby'Lj8(pVzg|DR0Qxb4'[!taA7mlQpN*F։> ZRHSsn;P<pJ`ݭ$ky&X$cieOt޸8BXd&;_9E]<Ů˨8Ix܏3G:0kk#+!ģ.q["7mǽC)w85CXWf9ۈ[na kXz7A5`$́Iae"|:,]j ;;0WXDdG3 Tw_!vA`CۈZ9xÀsÂƱ4Nx7E/"[u Sh Z&ޫ:(2jǸgY`+H`2p<&7^b@=﫯ʕ4En_v;#^JL_"[y]h1T@}Us|ȝ1  Fb,$d2S1SKq*p$aeB mH; ` Cו/Â[ft@ l{Twoiګ #9ܜh%ژ#zX9.|d{?P^y{W̝{IH2HϦ' 춣 pڐkD/\kĽj~cb#+e">^sk{tC=Q 5SNci'֥.$ DbV&$,`z~!ӿmv7ieEST*3/!}q:-iGy;0FYCƅEOk9/x jjb҃}O5lX]{15i 8?ZΌb9/Z6oC%mӪ6rwiLQR:a>ai7xJ+e(Wx=44 p)Gݍ]4Eń{G}91urbp.:z`;5x:*>k)?jT ep 3 Q&8c~w5[fڔI_2zO\@Qy3-"p#YW[ނpes'&XHʄZxR''\Z&#Am1406D/ߍaFsO$3 K3KYy_SiLmxz*, lL16%Чu/0^-=}+,^‡G}KBF#dYI({-?k8Ռ˛n:W=?@QpRY 5`o/NڡLr: am-Kf(RL< ϛkNbzo$#1+M_1 dqy5^W##T6߻a:P.ߛW|Ja++#HБ@K8,w6U%u<yd:ց# H)K^lGb8Ȅz[(;QKAO<v0 c{Dt[_[B_.~?} p0bnF)j4W->2cJp>-$dI`#?+Y_Sxӌ)aޤWmޟi; }#lgB-c c-`GTVeXl+( GqHI4 3(c4{#K^D\^z}l&ke S٧evIj3vrl^ D76LZ0j"R)^G3prwpL1ǺZ3ՍSct%lZE ,H&PT_P +r{|M5) L'#bV!?s5&*h:g64f_1q AaH 7;f/6j*Z;~/^PϿA$QU8|*к.>2xVIC`쵌կa.!vcdz:wˊ *G1Aʘ= lۣVoDЇK[m4M?Wkt2uJ*zLmVhtCVq`:N/جڧN t6Ǔ=a~n^ Q`|Xp|eL}bo']-fDwcdȦx 4l'~>=R`Z`_"61B <Ⱂ0Wx,dpx셬T,t7QR[4G A'6м.]DgQFv̶T=:.C]\acYELY'N+h,z :̳OSuhĺҜNLǠӂL_sn6q.a"b˘UmqZ1Si/'.w2S /x'hP42pJ' gwBo6_p?8? 6#, 7M7S;/ά1iLC/גw|lmQ 2u?SFp vہ#iHl-TQU8L, ^gQ=T`mo!pE-">u1z.VF7w;Bn+-X{-paI[spa#iC b+Mn]/x[08<[ek)G3 \ 7 vOQ8:n%J(| `'Lv=%BvTBϑK=8 x9x~4xHFiBsR;,B$aem/|(O,`~QX>0׭׶!b o@.X o5BGal FawnE bJYEL8jZr)mC-Ć+b:eu?!F&*_h[~֫[wD!=G/vɲs/}oB@D`5dxo)y6b;?xL0x!y>M Cl>>jߙ9jHֿ {aj (rQ/Z\˻,(7w1$^VINYs+n1$\ԎyKlRd#j Ȕ66D(7@m.C[wDHE1)z>m  ¥ !XxC] En%p9<AYߋIg c` \ωSjn&EBxx8GUN|$YJAQ V 2]c˵@5ؖb٩U_%0`hPg*p5M劁1 bs#o's&bXJv?=(RL6oL FaT0 ZcaZtӔbRd3Yȅ.`;w&Kns96#oKm8),ʻ>{R*dLٯ -~g~d}^(32@xEe4b\fU}Klb6 x"c<0 A>MIrA!1+-0A<3N7t=A-pjR2|=b4ʹ*;&=0 U#L: #Hal@2}Q5\fUFrF !>7LdvÒ\h<,dcQ0`7 dz&絟K^X{<{CŤT#K]OǙB}^^w Rzʾ%(MAT\Gbl85mA$ae++Ȧ8˸c?aՖe{`NHQlhW|Tgg2ĝC>.m…x~pPw (gji**rc .~Y-L:#Q дX ,5 p)nN'd:7`<}&e ohL몚( O",m @P4\@DQ{ dmD#$6KCܷLa X,#SkPs]"G0F>-RTV sK3nE/H^%dl;%~eyBf/*fFlAtI^O>%rHP/VM|2b2vK(l6%QK]f/WAwnQ+#;MPo#@tpVojBB8Yvo g,⼁#{l"Ȣ0h^^j\&MEc^\[ H|FH; q$(sm(S{iK0aB`gMܽ?T Z74 @@\ jLsU-gZHnKpf" 65i=! 8)g$>LGGט!LsY^%XF$aeBo9&1>b )m &N=`KSxQa ez, ^SܡȾB E=6$O)?6hPM޵p3!3=9܎`TiQfHeRBoK%pB*quy= ٳ}nI'b\)E۬7ka]@עs:ԜJ| +"W1-CQ\Qg)`AVÂwQ8tq|D=㼃GL$a%Ba%dZfP-`٬Z(YoK| j=M:]|My"OTZ,tcR;]i6xq(b=";Дs4dR a[+$%|g籴.z:&6OZod-N `{=_Ln%p ^V|<aNRJA^˵g8L, ^xq~<0!ϒoV3d0#Fk]qлsRW]*"h:< ښvt0~tKQk >P$Vsn|q]EMa2y6zᖥvbpuoOʨ0_>2u~Fy{,$;IJX1'>p !`U|^jD!,:q8QMpF`L-#凐mGѻU XHq:b2D֠A;V-lRL=ipj>C~ڨ4 *Jr!*NVs Yyϵp+lֿqQ-2v~Ck[׎>xL q!6Z^HȘ%cyWHU0@틸kP9=̙]4BA\xx *$R DKƶ.hHj)R!." ' y/~ \?kd|j!J#9,QJ<mNϥgM.KQ 4>=ߍǓ4ER'fNȟ@ { 4!+זܹZ/#ВE!;6J;H8-(nI2O Y@Z3jп4j06aG^RǙ{ WJ[LT";!yc(@1)s V!2T#'w!}=s'&[V3lg>f?&&HmuB=؎%N>CY7vGZ^ź;73H2Ls!ě|v3ʥyT#U+lwcKS]" B1^Ԙ`"1+N%(ԉE5n+;Кgܺ wF0 Ӫ6ኢRm1 {|[܂ B}#dWA7L<x}{Cú_ukEX^ğ`#v|U F^)u-U0qӆ {oN3ɤ] y7ܢ(ۺMb}y#!o_BhN &gFn\;Wλy͌彘cb|n}! :a V2!@ӊ2 H?ʳ){"~>-v74V Y1Rsy筁qp8Tl]3G#n2J(P D -n4oD䑒Ǎ C%AR؝yy3X* {x./ ȧ^EN3MQ[V㰨oW` lc< Enyd#ogl64ʹ*/9A8EG'OWUvvgЎB-Sb}Eh{`HDp,54 LxJ#,gO ^I`_5x.3FB<8et~dKQB#Gwbt"(mZom/s}cvn%O9+ ?I[F6f`U"1+疆Paiʄv1V,Ƨ\{x\.WAmoz"-BA [e/KHuRn#anQ-Jy4=S_T zQi9uROiujV` .7T!v}*RQ1>0pO<aGR܇k1eYn+ oQh0 Ήi8X8аc/^RC)ߪB"cap'Sf)K-6,Fa ƝonB1~Gc5ew?L1 B/tc8B@5B˫?SYM RTM'R]~ֱ-JP({s~}Kq4pbsy*X!_#ἱw5jt&Bisc'y3ݮZ$GϏ/ΫCQ&fol؏%(A(΂'NPe lF. jɘ,7@ ;p0bM*;6O>[vy6 4y]0#_-W@D%`"1+j'Qo  vBJ ̨U^~&=N]_9⽏0N0xI!a BwM 3Q]ܼXjL廍c 6Ax(#4 wfVgUd1 xυ_DzMEvi&bTm\ qQ]^``R\v ʣbbQTyc]uU4XHD_2Siv~^||GX#a)-o#K<+yL!khBl 8x"-յ3\;qh>ǺB@ZbLi'z,bR)z&P9Ra@SR.pC,ݼޕٮFBb$;MCPux&R}yk?y23xPEB o?H`6{)ʷ(faֱw[|ZG~'o}޵:ogwE*s8;>6r3ȬτΥ0;˶JBbV&φTn{8t4ぇ0Sn+*@r9>E<*e}d:&V8LU` b[;}k[!WNwoah6""ef#$#6Öh"CpG܊8թul0p(P |"VZb'- {cP!6IsC^G`B8m]eb9O/Clb$a&9p2ٵ2/#>Հ~x C7u5B c|ND7/fNrSM_B; B ܊Wk39Xcp THNF>OC b1bdm{M2'Xypo3pax<#F AtBqvTؗaD@܍.m0Ύ9tb 4K[B<;#ن@W<7?/FGx#Ղe^ .s f0\+bg!v^4U,-  A)"aApOlܠm,0;@RWtkJ[{ڄת#\Pa120h/q19|:* s]&~~8vlqPqtN!^73U8\hSb3Z~0]pJ%qM["*W}oAx?ίNS>>gj vg`"1+ !pM _Q38/xkת˾ 'pKd:Zxr ioXR"uF OXȖw`z=6arӋ\8c/k-Ac^981x}`_<a@Lܠ?6bIXiG:7jq;x4tP~zofd]afI}Żx F1>x-GN5~1r(FID:?#"P*p aC`"1+CPB𛯳]^ܳc#{ov-ֵѺvdq|8O(|mXQ4On0[jM:py(lh6PE ƕ\(~AMp^ lgxW*"aؚxbӳA?|]_ <׊êDbV&BuWt˟?NX\vIhK7cZ 8:v!mɊ r"Ǹ8ucY2 adz~tG]ֻ8796ה)F> vL c(4<ce+1ֿt|*oiZ٭lS]ЎY1?-B+'[+,޽ʆ6=940 K7V8{:Hi1,`OBbđmoׇ2Fw*2 1+zZop܉w`[d:K_10jvZa첵c [ :Tϖƍ='>⛱e{R ßJc A®g@eQ;7.ư L-o,^sҾW\63׻ʋ/Xqy3Y_Fu6A1gb!!v\u-2q-Tl$P+̰ą8{+7/d}0-D?$[ie"S[,ăAx5܃R^6 5r,!Fw] -4A@9}8_q) D> hsx*u`Ţad!;w6M*gQq{ ̤{-uךe /Ϣhb(+* H0'b.b$OP]x5z-BC{nDv{'C| Wx"aU)8sX^ ބ !oӴ܈^:6aHsm$XEHD[z0P9};_a\'xx@_3ΤhPi{;.죑xV.85N/k&f4\ggSE?F=+gn2#3ʂa_TJF+B`B-=5ADSE>SKB{04A{\k5&Ex>cS\K'* J˛t,7L^1b BlDpeuڍ+j$aeC^xJ> wz?ٶT};!^08 1FZv!M+)yu4s6we"_3g;iB eI׾|`⹐PKq9TR(|e+[0; $YGil 556@͒YZB`{"N'ɒ 4#S,S82{G$H$/~I @~p cScif͵AԼ\涎C ?Rg jL>?kFOy 1+j=]e3T@!p3AH]m )|i0nE 4SQ (|XyOL8I/S_#~"8^5os$mq Pj8C,$JlEvi'㍅1zO B2]M g'`̍ Qn?>|F {g)R9py6xGZᘰ^)zM B9׽_j0w;.[!ŠCRyM\4姾x*Z@+hڐ̯9 2A/,ػ救<)3WQ`&SB*zȕMh_❻8. 5致gxrU51׈?sx|R_fLvaB3BbV&Z^ Fw5ΥFje K#, xn"{b3 BB c_R_%`{bHl`Ʌ~ڧ} >:0ĺK9zMi̝4q~d"q*r`&Ns_ Խ?rF<6xcm g w^Y?Z|L/ڛЭ?d}2EB"^la.GE5hהz]ȚK-tB1ٵ ,f'{qXh+%Pfثz"t%dNWx0vwAx-RvA=Q`gk"ޥ zՖ֋Dr hMnZz1So{u9 ЁgďD3`~'ɹaW![h&_)fUޞ"J,%; J47 {xqUk>u0fa<8?_=_8)P=5kV3'|G}U>%p8bH !1#( lLS(R@ -p,yLmƴ6- V8aYPBXlz-q̑ HpJp7R-kC$g"R+0k{B; AeTq#ufehMMnXA |D G+ p|1s_J:م)<lfɆ#D"{؞>8^K}7kͅ@K䙈 e*fw#G"nr n>ڧ>o(b1A9Úo/aRH)ˈVvֽ ' q@~sAW|ץ3ͼ gV g,n3󎓫.33PRh *~EERE+4`)HZCSDDAH $dC%ےl9sfɦl5MB3gf}nGb@wġNa3щ/@#6>u4>d+ۉԩ^~< B'>K8JCZf2|TT1b JP-f}›d<߼e'sLb#?o@D-lK&*&yXܢؿ}cɈ)HSb_ 24V81O ܭ:<8N) c{AVט7&74V"2q~c*KPє=s\.M1uW7f}px)(s>պ#0xbrb2K%ǮΡ%^GD`NS5@/ġٗ Yܯh\7Nadk y]Dl49JvHTMᇱ%zU"ج&'Nى8^ANWkʻ 1J[D5 ZEThѵװdȪh*A> z&p)orFz_mSyp)͗ 2XsՄ|ă^f2wM&!b#d]01<zUfvmmY@,{;Mn{Z"GYA8"뀙8]g..4Hy9; qʞW`q6Wh]Zw,ږ^"/QlT\n;Փկ L>y^3Ox8M] }@ zӋ˹Iљ; Td'&/晊iypǃ< ڞg'(jm=^,BwxdѲ']ӜE.[ZDbOk"{$:QlZl6ߤf;LͱH#f9>0k"e&sQQ]Y] s׹޹A>;4pYmj{?Jj*'SJ<޷ xTbMG8xa{ڛn{K}qA&Xu˯ʛ:qx`= ؓ/yFЃ š -Vt^BF> zt SEu9ijYɋjZ1LAY۱:)7U;Xu$]cV` wj @oJEh]E>e?jOslu92Pث5=Jov ;hEPNp{ |׀YB~&fc9 ^; jr@ϠudrҟZOŊfs|[E5A:>-qAomqA#; ਰFΜ}I?O՞؊6]pZ'pCE/$8q=UNɈ@BDػG;z`P~!h@a9#Q5jIv2 ʞu?oǦ,VƼr9 !D=/EQyMT|8 m"2([o`G{bRGGTiB@%1gd=vGL|0TTg(~w/ʧ'1(7tU@Hoi)kI$1~}NDaPj$ާtENUfh0-Uťٕ ?Igo|8GNWe(y]ky9Oݠv"h:U'A |INпa"q,eP: اMbQ6"UY'o"50lU-@ Ѓ=Ewb_*'ۓ?Oc֗_عE*uMygAOrk4E6T{uo%yhoR&I#A. ^圕y^ 2\RT%0a}yjMf`d>qC$ EQ<"[@uį Zh1 ~M;i U'yQkm"Ź,]۝JAH<4p(%V *Y( ]>0:~ qBTp@BgbA&${ = i% BJIDAT'5RO?76`mMr[:O'adnV` Nb; EJ![fo6j/P[J/-2hPЎUSQWp],^~;p8š-t.u'ڼr qMuT> 4µ}T%ay` ;ྜ^W]$ prݫ<=۫qf/~av￸d<rS}d/.-}zpT5>[޾EBG-*V5Yzпo(P8ɈdmžEQ-oPChQ Lj:F6r{oGR[ yvZIL*(܀.Z{S`V.R^iuaêx'cQU5]Nc-Q=-MrX՘3 ,ġ Ѓ_RPcFag0֜ζ27&湤PBZ{*_ B"f 7(ށh1vgZD5w#j0-OLVܿy IJgWy6r3g'02 oNvn-m9pez RI,MQjns8ܧ*"}ZK;_B%rf{#|ߍ^fqI|}B=fQFW ЧlW# pD'b~AI<*rw7%" ~NJ<ny'g\ݶ-#8+|/p5g5X6,Z٪:Ǎ(3ii̴oow)P4w >7>Eu{23vU'R禴˃[hn-ge.et0;X[(H5D~O\i'w^oo|Yα_qו K:XU7Z;vc7?0>W@٠%Pr8(oG^@tʯA!= c[oߦD^90GEAǩ7 w ܁7@p$x{n"%&/+. Kחˈ% ^U?y?Dh&(y7Í? Z CA֡/sO>CCe~0pA,[AtDpe^lk^/ M_)A V) 7{ME~"*0{=dN g}iS8m>"rZ[X F{ M`M*f~i8 [ڐk@Qd0wy{jAnt0NB3)D1x޵۶nR#fPoan~v^ձL2Ix+7 Xo͂^8pn/?c^uX˩5T;XYo=CZt8Z[z)6"Qa+X?) wH:ǾuWT0HD2!DUT *rWhFdp yu m!@8!B3S۱@CD8$|fUvߑo]80+ThA$M|Stb,b7y P 'c]B-2kS[QP`K1Um~~~,[?*S\g#9LDT|?>6A7n/"E7jێ߹7rp6;!R@zx氼ŵI]o& MDPx D&ϝiހQox^Eϯ@ȭrD 7",A8]ac:^>xPVAz5͝ V4/Y8g¸z {f~g,1~Ғ ݿ)LQj MXq/g6 æ 8 r6ҸV,[/фq W HԛHm c~ 8T3pܟZh|e50S>l%+X xi:O\ӛUE  gzך3h>C<U# `\])a_Q`oh[_Cε!QA?T6ϸ Ce"Ҫh#>RK\/S).e1*ܱǬ,UY"~udNߠ !  N`6X^<,dVqg~ۡuZܠ>Q~>EfyabŻ3,'=|8'%>yJ]l|h웖NGRA"9IU r=ss,'6z =f ~}Z io_-7!_F9$p+ȟ}[PegnKvHUi O\Dyor1..! E4bUd?/75]d$E' K.ն7-;0IZ\3MQo}X~T5& Wt\Jpӂ\1B]ntp3 po_я+.V1׸q7k7|*K9)ޖ,Ĭߡ%<0ُgNDȄ=Dġ)~aIכ5'¥_t ѯ%o5M]:!H{GnBSK8}`w,֣*Xhh9$(lHNG~h"~i4l,QZGDs-N i j;ҙjMrjN(S0DvcQxP! s#37IU1qix"}cbieIKUSirW \r=yUnCX@mïj^$Cϓ[zAw`Y1Zq[*09! X]WUA  xqGU2": 89ʪB'薌Zyr р Yp`M5k b;Vu>!Vʋcld'ĵk9Gb&u1E|" }mUxOe[36"RT$4+T*Q-$;i2V\Cv¼ *~1BY 1&'c9@/ġ';TAE6p:;~K4qoYa44D琺[z 8q $Prb9 ʻEfnP*2]|'爵_9ׂwaLE{X7*lF_z?UM&<2+Tڭ/i3<g{c^/3A\5̫<#Ng^n P@^CXiExǎL$]>&"SfA+ՓE0n>ő 'u݌wNE@ Xp㓘m*9XGCKe%x`lXbؿ]iiڔ`1F{AQ߆|iZpDۚr&) đgUC9l讫N؛J  m+.pz!/zi5,_< ѦpOhns+T9?qM8͝c& 'zertupWrPۀTFxsv)mS^$C҉f߁.U\c;V `:y~ivQ\jbUZ %)(1wv5a9Ǚj3DѤ6I2UcF_82fS͈l+y_Wʭ&'\DYlU(yItZ]8 cOD AuHG -2r՟׽}[F7*MCEդG/8u25QVAzDA%wp@m Gn%B"rСD4=: XR ,\tġ'~>pE| 0Zj\'isT;_Jz_;Mݳ(IM>DʿVq-,Z_JY`(Ǫp&p6:W"*o ,2Q(^."WjNBSc /ǩ̀('DġX́FDGʷ/:k.x_%Q$։ؐIk-Е3>?\V*t.9CP-moDq>û{sj p"(,w"W&JF+I"!WHŒ'ߏlz =V!#P W(CX7Zx6`$9GDiU|yxI,XuN;웭M=  П7N^(xU^QK4{GHHp4)2eմIq=f$yHPҽKG^N~~giu~`BV $*^7kvDh[$Ip3<$xl 9R%P?| 8Փs7(tx@g Ѓh!΀Qh})w`ڃy.@N P(TX4EU)$yy(_W7Z^sd{Q @2˱ă+-N}tc"H1+y|UO'|]Z a] X]Vnzg[gsڒ g(qq}>h:qY% Z榳e?̓L9T?|Ua%pWɻ,%8 墳+r/u {"6e @=_ma;C=Jdın>_Om%v BN`7,\w:6MJahO gNnɻB'f-#|zr*O0 m{ (1;gsQ| pe˥ЊeJ:A`pN<.q$!t9]s1p#]!|cM7YO'0BD&͘Ź}BM (@.q~.B937XUDbgO H3PS|[լu1g"Œe fѭڰS)NkfFv6 Qq<ഭl"Gb]@(*mUTw#PG+~BnGUDAAAz)(Wm#Ao\m)Pq#8F;ӵUz&;W;~( &D{`kNA`?-gN8J.w nNA%B?$C?\b1Qjep^k|d㇬ \ۧ5J򠎺8/qt\:HEAڅ;!([8SMéC2X'WY8ciN;Y| r W5b A?A9TMkGEՌUܯpK좗 ꋾFDIAս}>ʓ .DAۉb]Q0q <]yEld! D"TpXCB md8h* >hs!xP}40XŃ=ES1[bomҤ~'sPV(@6#mB,A( IE$I<;t;ǀ]Cv50Ww0|%5l;(xq؁x"w>m*!ΡXu)!HUh j$"R(w `_(Q D|* .,7$pKhqgsNBOA?\5G>#E* zQAvhhnRb;C}I"(&CWW:ji);|EQqy,7L ;98Ys "nwBA8G}xچ)ц;fiZqh(kqj\챥 Oy/׫3ؼ!Dָ#ޞۗHy<[My 4W;QȚ\{Qȯ~8$нql^l:XP0I<RGT'MjVmdDqf-M!If"hjJ7E NPMS?\WۗCx цL\Ql -rg3<Zb/$lBBU/&?D[y<[M>oI,l8c$a Ai~EkuDuSb*z"I`1p 9t"޾j\;Qvng#/K38 'rBB^2Q#(JL@yl}1#e8m&!IDqsR_WK.WUGωO n#Cx4Wx&y'%@p!քV9nY]LT)ĶSt } (z n\5"#-P4z? ܤ#q#_m()8OG#Cx${;\ *3d [P7zC'l(!Q8A=Σ q) 8[G֝C/}nvbˡ8du6iyQpПp;&AUj3sCt'Afܮ+!A 4ת.y mPtIMzN!o #{1v)Hmj$;(A;,d/Ft$@ @ @ @ @ @ @ @ @ @ @ @ @ @ fHo'ˆD!@mpؽ޾@ E޾@>x A{B@ =HzzΝ/&t ]C (@e 00p}yОD!@`!q@8@A@ Ё @ @@ t C :!q@8t/v@peb8B_5%>nm { vQT ]8Q&c\hp A@:c(|.ϟT`LUj@ Y(+'[}K|׀o=_z (!!=@"  :¤aAD+ @&z@+O ׷{(DJDc&(躾.dYCmꋟ1} A5'^yFn6pF7n2VFw]Y6qlEV@)c!p`K co_kZXhdhJOMnPTzlAl;ou/ˎGȪ#"<~`W`l(#ܹeV[Go!&7NTH2UDtDzFC2l#AK8[>%K]0x[]Z5BF!FypW`,3eCA8dXɭNJZIEA;鍝MN0B@bׁex'R,!p0ša=?۫k"^U/vGń}'UQa q ,$?gعħy "akJZe'zTD``7b+ sWS ,izcOG 5(B5^V׌p:m`{ʁ#`60q_(E98 8([g[ܤJPؓ`|c/M<X3 7] \#aQjKgϏ(?8؇ -g#`&sZ-TM`tKv߅=o/+h1$#J(ӰSמ>otaIn>﬊U:jؾahMM](Sj`n_]i_P Yr1p%ӡC8/J~H&F +فe'"*Dr+p })qg,3S .Eց|K47!.Faq Ϙ=#GcOJ|d`%(!i(VΈ󎓁G71%*d׵ "1gBA,Θ}iG%C rv-jsjƛ$K~%:~N>_p ~o_. CBzRvd8SK$j))W:ԕ.J]$q5d݁F^?g_2J_8 ,E8C VP0)5ǧ&^YJ⭁@c=!&rfjr(ڲ_/fa?'qgc!P:VW1vc%o׊b{t}F8;EWgY(p xJs޾@~IH%*zJW:d,& ,#VFUuUnmݍ9/cU`YUi0/1R (8+\)YTRrWO Q 0fuo೪ kEVCzfl˙MX ',-ð*[ \ۥXGws*c҇$M#GUA!Q"NۍV&592,Bc1=ŒXOiy(nq[?Uzml_} }WxD<:( pB2ny-Km蚏P{fTec[q BPPU^PHl'Ҳ j }e٩$@X/@ħY Ld##VލF(,FUSQ WƞU@ S(˭<=גUP|N>J+bKͮXtME^Zt+[{OSPV=< ŒQ맼i2 T ;{ϋ^i$3-o21Yl 5؜yL 2\*,ދ>I>ʲx@PePj%pL7W uZ * f5c*%>fvoEx?` WZ+ʟ-ki0qR;E9#خi  ռD  QyKX_p"4-a#2LD[g Y-FZ`KiU Լt=O^yahϘWrYmEBQ"^TY6j&Tw`8MzZvh82Nb% 6;J|:\TFa +6bw b#1*,<l/-s.ײ=,uٳ.*U8˲Q֙[?@#2[M$A}bPr:Ox/V0&PYbf]o_v ;m5P$C,;Cb R8`2ʲ2Cۼ,;Q+ B'`Пgo^/Ç˼}j 0T ^:c1163rBB<+N9S@AKXi達1mb_Y 09lAzeU_KCJ| Ĝ Fw Ksh1o-o|@Xw %?B =ҜD|_R,GMaXkKk0f8kUIi>zTMAQu_~9jOz3i{p΃CdcWirס@,CAC#xTC)}bu%0Yх+I_ݘeWsg>ﲭĵ_~UCJHMͺa7,]&"7=ʆǪ^= DO{r[hJ~v\}>4d9z H!r:Bn8ͯA0P B?\B C3Vp?>/+('o$V忀4*¨ھUj}sÆ@ K@ħp~P2o9ӹ KU4 5ى2֚h*XqMĹ~:3؈ Ad8H/A~ۀ^:65Q'%8k݀<ഀzQ rl4i?blsՉ_f>K8:xe?=All5"ۨ@'JtMD_OTSXص?i#M\͋Uꆗu} neU/a!w~F5Q7H"EE2 Oq'{8z CKp0HAV냈wyd97us 8B>򵑗)83u&9:&v܉u.5{z_ 'vF#]}=a-j1wB@"y5H_ au,'4u9,@0=+h Gmu Ч%Vg+$rM1y+VM!C]we`=Auoǀ4JnװXC]T'CGi#PAxǤszF,ua)ĈĭIi C^ =ġkt^{8iDq Slx)`SJM݋Á#D$tK}(t鯳xJ1 Q5 k%6 ׋&hg\{?ܓmIҵR )yMD8lf:Tնlqٖ]w%˪&~Unek{#*+T֦rʇ41z5@2 {b` tnM?xΕAKKy=Q+= 6a)EwF{Mn40vmS?@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ .[cȫL%tEXtdate:create2022-05-20T13:22:18+00:00Kc%tEXtdate:modify2022-05-20T13:22:18+00:00:>tEXtSoftwarewww.inkscape.org<IENDB`zarr-python-3.0.6/docs/_static/logo_horizontal.svg000066400000000000000000000351061476711733500223500ustar00rootroot00000000000000 zarr-python-3.0.6/docs/about.rst000066400000000000000000000015651476711733500166360ustar00rootroot00000000000000About ===== Zarr is a format for the storage of chunked, compressed, N-dimensional arrays inspired by `HDF5 `_, `h5py `_ and `bcolz `_. These documents describe the Zarr-Python implementation. More information about the Zarr format can be found on the `main website `_. Projects using Zarr ------------------- If you are using Zarr-Python, we would `love to hear about it `_. Funding ------- The project is fiscally sponsored by `NumFOCUS `_, a US 501(c)(3) public charity, and development is supported by the `MRC Centre for Genomics and Global Health `_ and the `Chan Zuckerberg Initiative `_. .. _NumCodecs: https://numcodecs.readthedocs.io/ zarr-python-3.0.6/docs/conf.py000066400000000000000000000300061476711733500162610ustar00rootroot00000000000000#!/usr/bin/env python3 # # zarr documentation build configuration file, created by # sphinx-quickstart on Mon May 2 21:40:09 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys from importlib.metadata import version as get_version from typing import Any import sphinx import sphinx.application # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath("..")) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.viewcode", "sphinx.ext.intersphinx", 'autoapi.extension', "numpydoc", "sphinx_issues", "sphinx_copybutton", "sphinx_design", 'sphinx_reredirects', ] issues_github_path = "zarr-developers/zarr-python" autoapi_dirs = ['../src/zarr'] autoapi_add_toctree_entry = False autoapi_generate_api_docs = True autoapi_member_order = "groupwise" autoapi_root = "api" autoapi_keep_files = True autoapi_options = [ 'members', 'undoc-members', 'show-inheritance', 'show-module-summary', 'imported-members', ] def skip_submodules( app: sphinx.application.Sphinx, what: str, name: str, obj: object, skip: bool, options: dict[str, Any] ) -> bool: # Skip documenting zarr.codecs submodules # codecs are documented in the main zarr.codecs namespace if what == "module" and name.startswith("zarr.codecs.") or name.startswith("zarr.core"): skip = True return skip # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The main toctree document. main_doc = "index" # General information about the project. project = "zarr" copyright = "2025, Zarr Developers" author = "Zarr Developers" version = get_version("zarr") release = get_version("zarr") redirects = { "spec": "https://zarr-specs.readthedocs.io", "spec/v1": 'https://zarr-specs.readthedocs.io/en/latest/v1/v1.0.html', "spec/v2": "https://zarr-specs.readthedocs.io/en/latest/v2/v2.0.html", "spec/v3": "https://zarr-specs.readthedocs.io/en/latest/v3/core/v3.0.html", "license": "https://github.com/zarr-developers/zarr-python/blob/main/LICENSE.txt", "tutorial": "user-guide", "getting-started": "quickstart", "roadmap": "developers/roadmap.html", "installation": "user-guide/installation.html", "api": "api/zarr/index", "release": "release-notes" } # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "talks"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "pydata_sphinx_theme" html_favicon = "_static/logo1.png" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "github_url": "https://github.com/zarr-developers/zarr-python", "twitter_url": "https://twitter.com/zarr_dev", "icon_links": [ { "name": "Zarr Dev", "url": "https://zarr.dev/", "icon": "_static/logo1.png", "type": "local", }, ], "collapse_navigation": True, "navigation_with_keys": False, "announcement": "Zarr-Python 3 is here! Check out the release announcement here.", } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. # " v documentation" by default. # html_title = 'zarr v@@' # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "_static/logo_horizontal.svg" def setup(app: sphinx.application.Sphinx) -> None: app.add_css_file("custom.css") app.connect("autoapi-skip-member", skip_submodules) # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_js_files = [ "custom.js", ] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. # html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = {"tutorial": []} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "zarrdoc" maximum_signature_line_length = 80 # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (main_doc, "zarr.tex", "Zarr-Python", author, "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(main_doc, "zarr", "Zarr-Python", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( main_doc, "zarr", "Zarr-Python", author, "zarr", "One line description of project.", "Miscellaneous", ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. # use in refs e.g: # :ref:`comparison manual ` intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "numpy": ("https://numpy.org/doc/stable/", None), "numcodecs": ("https://numcodecs.readthedocs.io/en/stable/", None), } # sphinx-copybutton configuration copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: " copybutton_line_continuation_character = "\\" copybutton_prompt_is_regexp = True zarr-python-3.0.6/docs/developers/000077500000000000000000000000001476711733500171335ustar00rootroot00000000000000zarr-python-3.0.6/docs/developers/contributing.rst000066400000000000000000000416661476711733500224110ustar00rootroot00000000000000.. _dev-guide-contributing: Contributing to Zarr ==================== Zarr is a community maintained project. We welcome contributions in the form of bug reports, bug fixes, documentation, enhancement proposals and more. This page provides information on how best to contribute. Asking for help --------------- If you have a question about how to use Zarr, please post your question on StackOverflow using the `"zarr" tag `_. If you don't get a response within a day or two, feel free to raise a `GitHub issue `_ including a link to your StackOverflow question. We will try to respond to questions as quickly as possible, but please bear in mind that there may be periods where we have limited time to answer questions due to other commitments. Bug reports ----------- If you find a bug, please raise a `GitHub issue `_. Please include the following items in a bug report: 1. A minimal, self-contained snippet of Python code reproducing the problem. You can format the code nicely using markdown, e.g.:: ```python import zarr g = zarr.group() # etc. ``` 2. An explanation of why the current behaviour is wrong/not desired, and what you expect instead. 3. Information about the version of Zarr, along with versions of dependencies and the Python interpreter, and installation information. The version of Zarr can be obtained from the ``zarr.__version__`` property. Please also state how Zarr was installed, e.g., "installed via pip into a virtual environment", or "installed using conda". Information about other packages installed can be obtained by executing ``pip freeze`` (if using pip to install packages) or ``conda env export`` (if using conda to install packages) from the operating system command prompt. The version of the Python interpreter can be obtained by running a Python interactive session, e.g.:: $ python Python 3.12.7 | packaged by conda-forge | (main, Oct 4 2024, 15:57:01) [Clang 17.0.6 ] on darwin Enhancement proposals --------------------- If you have an idea about a new feature or some other improvement to Zarr, please raise a `GitHub issue `_ first to discuss. We very much welcome ideas and suggestions for how to improve Zarr, but please bear in mind that we are likely to be conservative in accepting proposals for new features. The reasons for this are that we would like to keep the Zarr code base lean and focused on a core set of functionalities, and available time for development, review and maintenance of new features is limited. But if you have a great idea, please don't let that stop you from posting it on GitHub, just please don't be offended if we respond cautiously. Contributing code and/or documentation -------------------------------------- Forking the repository ~~~~~~~~~~~~~~~~~~~~~~ The Zarr source code is hosted on GitHub at the following location: * `https://github.com/zarr-developers/zarr-python `_ You will need your own fork to work on the code. Go to the link above and hit the `"Fork" `_ button. Then clone your fork to your local machine:: $ git clone git@github.com:your-user-name/zarr-python.git $ cd zarr-python $ git remote add upstream git@github.com:zarr-developers/zarr-python.git Creating a development environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To work with the Zarr source code, it is recommended to use `hatch `_ to create and manage development environments. Hatch will automatically install all Zarr dependencies using the same versions as are used by the core developers and continuous integration services. Assuming you have a Python 3 interpreter already installed, and you have cloned the Zarr source code and your current working directory is the root of the repository, you can do something like the following:: $ pip install hatch $ hatch env show # list all available environments To verify that your development environment is working, you can run the unit tests for one of the test environments, e.g.:: $ hatch env run --env test.py3.12-2.1-optional run-pytest Creating a branch ~~~~~~~~~~~~~~~~~ Before you do any new work or submit a pull request, please open an issue on GitHub to report the bug or propose the feature you'd like to add. It's best to synchronize your fork with the upstream repository, then create a new, separate branch for each piece of work you want to do. E.g.:: git checkout main git fetch upstream git checkout -b shiny-new-feature upstream/main git push -u origin shiny-new-feature This changes your working directory to the 'shiny-new-feature' branch. Keep any changes in this branch specific to one bug or feature so it is clear what the branch brings to Zarr. To update this branch with latest code from Zarr, you can retrieve the changes from the main branch and perform a rebase:: git fetch upstream git rebase upstream/main This will replay your commits on top of the latest Zarr git main. If this leads to merge conflicts, these need to be resolved before submitting a pull request. Alternatively, you can merge the changes in from upstream/main instead of rebasing, which can be simpler:: git pull upstream main Again, any conflicts need to be resolved before submitting a pull request. Running the test suite ~~~~~~~~~~~~~~~~~~~~~~ Zarr includes a suite of unit tests. The simplest way to run the unit tests is to activate your development environment (see `creating a development environment`_ above) and invoke:: $ hatch env run --env test.py3.12-2.1-optional run-pytest All tests are automatically run via GitHub Actions for every pull request and must pass before code can be accepted. Test coverage is also collected automatically via the Codecov service. .. note:: Previous versions of Zarr-Python made extensive use of doctests. These tests were not maintained during the 3.0 refactor but may be brought back in the future. See :issue:`2614` for more details. Code standards - using pre-commit ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All code must conform to the PEP8 standard. Regarding line length, lines up to 100 characters are allowed, although please try to keep under 90 wherever possible. ``Zarr`` uses a set of ``pre-commit`` hooks and the ``pre-commit`` bot to format, type-check, and prettify the codebase. ``pre-commit`` can be installed locally by running:: $ python -m pip install pre-commit The hooks can be installed locally by running:: $ pre-commit install This would run the checks every time a commit is created locally. These checks will also run on every commit pushed to an open PR, resulting in some automatic styling fixes by the ``pre-commit`` bot. The checks will by default only run on the files modified by a commit, but the checks can be triggered for all the files by running:: $ pre-commit run --all-files If you would like to skip the failing checks and push the code for further discussion, use the ``--no-verify`` option with ``git commit``. Test coverage ~~~~~~~~~~~~~ .. note:: Test coverage for Zarr-Python 3 is currently not at 100%. This is a known issue and help is welcome to bring test coverage back to 100%. See :issue:`2613` for more details. Zarr strives to maintain 100% test coverage under the latest Python stable release Both unit tests and docstring doctests are included when computing coverage. Running:: $ hatch env run --env test.py3.12-2.1-optional run-coverage will automatically run the test suite with coverage and produce a XML coverage report. This should be 100% before code can be accepted into the main code base. You can also generate an HTML coverage report by running:: $ hatch env run --env test.py3.12-2.1-optional run-coverage-html When submitting a pull request, coverage will also be collected across all supported Python versions via the Codecov service, and will be reported back within the pull request. Codecov coverage must also be 100% before code can be accepted. Documentation ~~~~~~~~~~~~~ Docstrings for user-facing classes and functions should follow the `numpydoc `_ standard, including sections for Parameters and Examples. All examples should run and pass as doctests under Python 3.11. Zarr uses Sphinx for documentation, hosted on readthedocs.org. Documentation is written in the RestructuredText markup language (.rst files) in the ``docs`` folder. The documentation consists both of prose and API documentation. All user-facing classes and functions are included in the API documentation, under the ``docs/api`` folder using the `autodoc `_ extension to sphinx. Any new features or important usage information should be included in the user-guide (``docs/user-guide``). Any changes should also be included as a new file in the :file:`changes` directory. The documentation can be built locally by running:: $ hatch --env docs run build The resulting built documentation will be available in the ``docs/_build/html`` folder. Hatch can also be used to serve continuously updating version of the documentation during development at `http://0.0.0.0:8000/ `_. This can be done by running:: $ hatch --env docs run serve .. _changelog: Changelog ~~~~~~~~~ zarr-python uses `towncrier`_ to manage release notes. Most pull requests should include at least one news fragment describing the changes. To add a release note, you'll need the GitHub issue or pull request number and the type of your change (``feature``, ``bugfix``, ``doc``, ``removal``, ``misc``). With that, run ```towncrier create``` with your development environment, which will prompt you for the issue number, change type, and the news text:: towncrier create Alternatively, you can manually create the files in the ``changes`` directory using the naming convention ``{issue-number}.{change-type}.rst``. See the `towncrier`_ docs for more. .. _towncrier: https://towncrier.readthedocs.io/en/stable/tutorial.html Development best practices, policies and procedures --------------------------------------------------- The following information is mainly for core developers, but may also be of interest to contributors. Merging pull requests ~~~~~~~~~~~~~~~~~~~~~ Pull requests submitted by an external contributor should be reviewed and approved by at least one core developers before being merged. Ideally, pull requests submitted by a core developer should be reviewed and approved by at least one other core developers before being merged. Pull requests should not be merged until all CI checks have passed (GitHub Actions Codecov) against code that has had the latest main merged in. Compatibility and versioning policies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Because Zarr is a data storage library, there are two types of compatibility to consider: API compatibility and data format compatibility. API compatibility """"""""""""""""" All functions, classes and methods that are included in the API documentation (files under ``docs/api/*.rst``) are considered as part of the Zarr **public API**, except if they have been documented as an experimental feature, in which case they are part of the **experimental API**. Any change to the public API that does **not** break existing third party code importing Zarr, or cause third party code to behave in a different way, is a **backwards-compatible API change**. For example, adding a new function, class or method is usually a backwards-compatible change. However, removing a function, class or method; removing an argument to a function or method; adding a required argument to a function or method; or changing the behaviour of a function or method, are examples of **backwards-incompatible API changes**. If a release contains no changes to the public API (e.g., contains only bug fixes or other maintenance work), then the micro version number should be incremented (e.g., 2.2.0 -> 2.2.1). If a release contains public API changes, but all changes are backwards-compatible, then the minor version number should be incremented (e.g., 2.2.1 -> 2.3.0). If a release contains any backwards-incompatible public API changes, the major version number should be incremented (e.g., 2.3.0 -> 3.0.0). Backwards-incompatible changes to the experimental API can be included in a minor release, although this should be minimised if possible. I.e., it would be preferable to save up backwards-incompatible changes to the experimental API to be included in a major release, and to stabilise those features at the same time (i.e., move from experimental to public API), rather than frequently tinkering with the experimental API in minor releases. Data format compatibility """"""""""""""""""""""""" The data format used by Zarr is defined by a specification document, which should be platform-independent and contain sufficient detail to construct an interoperable software library to read and/or write Zarr data using any programming language. The latest version of the specification document is available on the `Zarr specifications website `_. Here, **data format compatibility** means that all software libraries that implement a particular version of the Zarr storage specification are interoperable, in the sense that data written by any one library can be read by all others. It is obviously desirable to maintain data format compatibility wherever possible. However, if a change is needed to the storage specification, and that change would break data format compatibility in any way, then the storage specification version number should be incremented (e.g., 2 -> 3). The versioning of the Zarr software library is related to the versioning of the storage specification as follows. A particular version of the Zarr library will implement a particular version of the storage specification. For example, Zarr version 2.2.0 implements the Zarr storage specification version 2. If a release of the Zarr library implements a different version of the storage specification, then the major version number of the Zarr library should be incremented. E.g., if Zarr version 2.2.0 implements the storage spec version 2, and the next release of the Zarr library implements storage spec version 3, then the next library release should have version number 3.0.0. Note however that the major version number of the Zarr library may not always correspond to the spec version number. For example, Zarr versions 2.x, 3.x, and 4.x might all implement the same version of the storage spec and thus maintain data format compatibility, although they will not maintain API compatibility. When to make a release ~~~~~~~~~~~~~~~~~~~~~~ Ideally, any bug fixes that don't change the public API should be released as soon as possible. It is fine for a micro release to contain only a single bug fix. When to make a minor release is at the discretion of the core developers. There are no hard-and-fast rules, e.g., it is fine to make a minor release to make a single new feature available; equally, it is fine to make a minor release that includes a number of changes. Major releases obviously need to be given careful consideration, and should be done as infrequently as possible, as they will break existing code and/or affect data compatibility in some way. Release procedure ~~~~~~~~~~~~~~~~~ .. note:: Most of the release process is now handled by GitHub workflow which should automatically push a release to PyPI if a tag is pushed. Pre-release """"""""""" 1. Make sure that all pull requests which will be included in the release have been properly documented as changelog files in :file:`changes`. 2. Run ``towncrier build --version x.y.z`` to create the changelog. Releasing """"""""" To make a new release, go to https://github.com/zarr-developers/zarr-python/releases and click "Draft a new release". Choose a version number prefixed with a `v` (e.g. `v0.0.0`). For pre-releases, include the appropriate suffix (e.g. `v0.0.0a1` or `v0.0.0rc2`). Set the description of the release to:: See release notes https://zarr.readthedocs.io/en/stable/release-notes.html#release-0-0-0 replacing the correct version numbers. For pre-release versions, the URL should omit the pre-release suffix, e.g. "a1" or "rc1". Click on "Generate release notes" to auto-file the description. After creating the release, the documentation will be built on https://readthedocs.io. Full releases will be available under `/stable `_ while pre-releases will be available under `/latest `_. Post-release """""""""""" - Review and merge the pull request on the `conda-forge feedstock `_ that will be automatically generated. - Create a new "Unreleased" section in the release notes zarr-python-3.0.6/docs/developers/index.rst000066400000000000000000000001421476711733500207710ustar00rootroot00000000000000 Developer's Guide ----------------- .. toctree:: :maxdepth: 1 contributing roadmap zarr-python-3.0.6/docs/developers/roadmap.rst000066400000000000000000001024231476711733500213120ustar00rootroot00000000000000Roadmap ======= - Status: active - Author: Joe Hamman - Created On: October 31, 2023 - Input from: - Davis Bennett / @d-v-b - Norman Rzepka / @normanrz - Deepak Cherian @dcherian - Brian Davis / @monodeldiablo - Oliver McCormack / @olimcc - Ryan Abernathey / @rabernat - Jack Kelly / @JackKelly - Martin Durrant / @martindurant .. note:: This document was written in the early stages of the 3.0 refactor. Some aspects of the design have changed since this was originally written. Questions and discussion about the contents of this document should be directed to `this GitHub Discussion `__. Introduction ------------ This document lays out a design proposal for version 3.0 of the `Zarr-Python `__ package. A specific focus of the design is to bring Zarr-Python’s API up to date with the `Zarr V3 specification `__, with the hope of enabling the development of the many features and extensions that motivated the V3 Spec. The ideas presented here are expected to result in a major release of Zarr-Python (version 3.0) including significant a number of breaking API changes. For clarity, “V3” will be used to describe the version of the Zarr specification and “3.0” will be used to describe the release tag of the Zarr-Python project. Current status of V3 in Zarr-Python ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ During the development of the V3 Specification, a `prototype implementation `__ was added to the Zarr-Python library. Since that implementation, the V3 spec evolved in significant ways and as a result, the Zarr-Python library is now out of sync with the approved spec. Downstream libraries (e.g. `Xarray `__) have added support for this implementation and will need to migrate to the accepted spec when its available in Zarr-Python. Goals ----- - Provide a complete implementation of Zarr V3 through the Zarr-Python API - Clear the way for exciting extensions / ZEPs (i.e. `sharding `__, `variable chunking `__, etc.) - Provide a developer API that can be used to implement and register V3 extensions - Improve the performance of Zarr-Python by streamlining the interface between the Store layer and higher level APIs (e.g. Groups and Arrays) - Clean up the internal and user facing APIs - Improve code quality and robustness (e.g. achieve 100% type hint coverage) - Align the Zarr-Python array API with the `array API Standard `__ Examples of what 3.0 will enable? --------------------------------- 1. Reading and writing V3 spec-compliant groups and arrays 2. V3 extensions including sharding and variable chunking. 3. Improved performance by leveraging concurrency when creating/reading/writing to stores (imagine a ``create_hierarchy(zarr_objects)`` function). 4. User-developed extensions (e.g. storage-transformers) can be registered with Zarr-Python at runtime Non-goals (of this document) ---------------------------- - Implementation of any unaccepted Zarr V3 extensions - Major revisions to the Zarr V3 spec Requirements ------------ 1. Read and write spec compliant V2 and V3 data 2. Limit unnecessary traffic to/from the store 3. Cleanly define the Array/Group/Store abstractions 4. Cleanly define how V2 will be supported going forward 5. Provide a clear roadmap to help users upgrade to 3.0 6. Developer tools / hooks for registering extensions Design ------ Async API ~~~~~~~~~ Zarr-Python is an IO library. As such, supporting concurrent action against the storage layer is critical to achieving acceptable performance. The Zarr-Python 2 was not designed with asynchronous computation in mind and as a result has struggled to effectively leverage the benefits of concurrency. At one point, ``getitems`` and ``setitems`` support was added to the Zarr store model but that is only used for operating on a set of chunks in a single variable. With Zarr-Python 3.0, we have the opportunity to revisit this design. The proposal here is as follows: 1. The ``Store`` interface will be entirely async. 2. On top of the async ``Store`` interface, we will provide an ``AsyncArray`` and ``AsyncGroup`` interface. 3. Finally, the primary user facing API will be synchronous ``Array`` and ``Group`` classes that wrap the async equivalents. **Examples** - **Store** .. code:: python class Store: ... async def get(self, key: str) -> bytes: ... async def get_partial_values(self, key_ranges: List[Tuple[str, Tuple[int, Optional[int]]]]) -> bytes: ... # (no sync interface here) - **Array** .. code:: python class AsyncArray: ... async def getitem(self, selection: Selection) -> np.ndarray: # the core logic for getitem goes here class Array: _async_array: AsyncArray def __getitem__(self, selection: Selection) -> np.ndarray: return sync(self._async_array.getitem(selection)) - **Group** .. code:: python class AsyncGroup: ... async def create_group(self, path: str, **kwargs) -> AsyncGroup: # the core logic for create_group goes here class Group: _async_group: AsyncGroup def create_group(self, path: str, **kwargs) -> Group: return sync(self._async_group.create_group(path, **kwargs)) **Internal Synchronization API** With the ``Store`` and core ``AsyncArray``/ ``AsyncGroup`` classes being predominantly async, Zarr-Python will need an internal API to provide a synchronous API. The proposal here is to use the approach in `fsspec `__ to provide a high-level ``sync`` function that takes an ``awaitable`` and runs it in its managed IO Loop / thread. | **FAQ** 1. Why two levels of Arrays/groups? a. First, this is an intentional decision and departure from the current Zarrita implementation b. The idea is that users rarely want to mix interfaces. Either they are working within an async context (currently quite rare) or they are in a typical synchronous context. c. Splitting the two will allow us to clearly define behavior on the ``AsyncObj`` and simply wrap it in the ``SyncObj``. 2. What if a store is only has a synchronous backend? a. First off, this is expected to be a fairly rare occurrence. Most storage backends have async interfaces. b. But in the event a storage backend doesn’t have a async interface, there is nothing wrong with putting synchronous code in ``async`` methods. There are approaches to enabling concurrent action through wrappers like AsyncIO’s ``loop.run_in_executor`` (`ref 1 `__, `ref 2 `__, `ref 3 `__, `ref 4 `__. | 3. Will Zarr help manage the async contexts encouraged by some libraries (e.g. `AioBotoCore `__)? a. Many async IO libraries require entering an async context before interacting with the API. We expect some experimentation to be needed here but the initial design will follow something close to what fsspec does (`example in s3fs `__). 4. Why not provide a synchronous Store interface? a. We could but this design is simpler. It would mean supporting it in the ``AsyncGroup`` and ``AsyncArray`` classes which, may be more trouble than its worth. Storage backends that do not have an async API will be encouraged to wrap blocking calls in an async wrapper (e.g. ``loop.run_in_executor``). Store API ~~~~~~~~~ The ``Store`` API is specified directly in the V3 specification. All V3 stores should implement this abstract API, omitting Write and List support as needed. As described above, all stores will be expected to expose the required methods as async methods. **Example** .. code:: python class ReadWriteStore: ... async def get(self, key: str) -> bytes: ... async def get_partial_values(self, key_ranges: List[Tuple[str, int, int]) -> bytes: ... async def set(self, key: str, value: Union[bytes, bytearray, memoryview]) -> None: ... # required for writable stores async def set_partial_values(self, key_start_values: List[Tuple[str, int, Union[bytes, bytearray, memoryview]]]) -> None: ... # required for writable stores async def list(self) -> List[str]: ... # required for listable stores async def list_prefix(self, prefix: str) -> List[str]: ... # required for listable stores async def list_dir(self, prefix: str) -> List[str]: ... # required for listable stores # additional (optional methods) async def getsize(self, prefix: str) -> int: ... async def rename(self, src: str, dest: str) -> None ... Recognizing that there are many Zarr applications today that rely on the ``MutableMapping`` interface supported by Zarr-Python 2, a wrapper store will be developed to allow existing stores to plug directly into this API. Array API ~~~~~~~~~ The user facing array interface will implement a subset of the `Array API Standard `__. Most of the computational parts of the Array API Standard don’t fit into Zarr right now. That’s okay. What matters most is that we ensure we can give downstream applications a compliant API. *Note, Zarr already does most of this so this is more about formalizing the relationship than a substantial change in API.* +------------------------+------------------------+-------------------------+-------------------------+ | | Included | Not Included | Unknown / Maybe Possible| +========================+========================+=========================+=========================+ | **Attributes** | ``dtype`` | ``mT`` | ``device`` | +------------------------+------------------------+-------------------------+-------------------------+ | | ``ndim`` | ``T`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | ``shape`` | | | +------------------------+------------------------+-------------------------+-------------------------+ | | ``size`` | | | +------------------------+------------------------+-------------------------+-------------------------+ | **Methods** | ``__getitem__`` | ``__array_namespace__`` | ``to_device`` | +------------------------+------------------------+-------------------------+-------------------------+ | | ``__setitem__`` | ``__abs__`` | ``__bool__`` | +------------------------+------------------------+-------------------------+-------------------------+ | | ``__eq__`` | ``__add__`` | ``__complex__`` | +------------------------+------------------------+-------------------------+-------------------------+ | | ``__bool__`` | ``__and__`` | ``__dlpack__`` | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__floordiv__`` | ``__dlpack_device__`` | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__ge__`` | ``__float__`` | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__gt__`` | ``__index__`` | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__invert__`` | ``__int__`` | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__le__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__lshift__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__lt__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__matmul__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__mod__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__mul__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__ne__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__neg__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__or__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__pos__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__pow__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__rshift__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__sub__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__truediv__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | | | ``__xor__`` | | +------------------------+------------------------+-------------------------+-------------------------+ | **Creation functions** | ``zeros`` | | ``arange`` | | (``zarr.creation``) | | | | +------------------------+------------------------+-------------------------+-------------------------+ | | ``zeros_like`` | | ``asarray`` | +------------------------+------------------------+-------------------------+-------------------------+ | | ``ones`` | | ``eye`` | +------------------------+------------------------+-------------------------+-------------------------+ | | ``ones_like`` | | ``from_dlpack`` | +------------------------+------------------------+-------------------------+-------------------------+ | | ``full`` | | ``linspace`` | +------------------------+------------------------+-------------------------+-------------------------+ | | ``full_like`` | | ``meshgrid`` | +------------------------+------------------------+-------------------------+-------------------------+ | | ``empty`` | | ``tril`` | +------------------------+------------------------+-------------------------+-------------------------+ | | ``empty_like`` | | ``triu`` | +------------------------+------------------------+-------------------------+-------------------------+ In addition to the core array API defined above, the Array class should have the following Zarr specific properties: - ``.metadata`` (see Metadata Interface below) - ``.attrs`` - (pulled from metadata object) - ``.info`` - (repolicated from existing property †) *† In Zarr-Python 2, the info property listed the store to identify initialized chunks. By default this will be turned off in 3.0 but will be configurable.* **Indexing** Zarr-Python currently supports ``__getitem__`` style indexing and the special ``oindex`` and ``vindex`` indexers. These are not part of the current Array API standard (see `data-apis/array-api#669 `__) but they have been `proposed as a NEP `__. Zarr-Python will maintain these in 3.0. We are also exploring a new high-level indexing API that will enabled optimized batch/concurrent loading of many chunks. We expect this to be important to enable performant loading of data in the context of sharding. See `this discussion `__ for more detail. Concurrent indexing across multiple arrays will be possible using the AsyncArray API. **Async and Sync Array APIs** Most the logic to support Zarr Arrays will live in the ``AsyncArray`` class. There are a few notable differences that should be called out. =============== ============ Sync Method Async Method =============== ============ ``__getitem__`` ``getitem`` ``__setitem__`` ``setitem`` ``__eq__`` ``equals`` =============== ============ **Metadata interface** Zarr-Python 2.\* closely mirrors the V2 spec metadata schema in the Array and Group classes. In 3.0, we plan to move the underlying metadata representation to a separate interface (e.g. ``Array.metadata``). This interface will return either a ``V2ArrayMetadata`` or ``V3ArrayMetadata`` object (both will inherit from a parent ``ArrayMetadataABC`` class. The ``V2ArrayMetadata`` and ``V3ArrayMetadata`` classes will be responsible for producing valid JSON representations of their metadata, and yielding a consistent view to the ``Array`` or ``Group`` class. Group API ~~~~~~~~~ The main question is how closely we should follow the existing Zarr-Python implementation / ``MutableMapping`` interface. The table below shows the primary ``Group`` methods in Zarr-Python 2 and attempts to identify if and how they would be implemented in 3.0. +---------------------+------------------+------------------+-----------------------+ | V2 Group Methods | ``AsyncGroup`` | ``Group`` | ``h5py_compat.Group`` | +=====================+==================+==================+=======================+ | ``__len__`` | ``length`` | ``__len__`` | ``__len__`` | +---------------------+------------------+------------------+-----------------------+ | ``__iter__`` | ``__aiter__`` | ``__iter__`` | ``__iter__`` | +---------------------+------------------+------------------+-----------------------+ | ``__contains__`` | ``contains`` | ``__contains__`` | ``__contains__`` | +---------------------+------------------+------------------+-----------------------+ | ``__getitem__`` | ``getitem`` | ``__getitem__`` | ``__getitem__`` | +---------------------+------------------+------------------+-----------------------+ | ``__enter__`` | N/A | N/A | ``__enter__`` | +---------------------+------------------+------------------+-----------------------+ | ``__exit__`` | N/A | N/A | ``__exit__`` | +---------------------+------------------+------------------+-----------------------+ | ``group_keys`` | ``group_keys`` | ``group_keys`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``groups`` | ``groups`` | ``groups`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``array_keys`` | ``array_key`` | ``array_keys`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``arrays`` | ``arrays`` | ``arrays`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``visit`` | ? | ? | ``visit`` | +---------------------+------------------+------------------+-----------------------+ | ``visitkeys`` | ? | ? | ? | +---------------------+------------------+------------------+-----------------------+ | ``visitvalues`` | ? | ? | ? | +---------------------+------------------+------------------+-----------------------+ | ``visititems`` | ? | ? | ``visititems`` | +---------------------+------------------+------------------+-----------------------+ | ``tree`` | ``tree`` | ``tree`` | ``Both`` | +---------------------+------------------+------------------+-----------------------+ | ``create_group`` | ``create_group`` | ``create_group`` | ``create_group`` | +---------------------+------------------+------------------+-----------------------+ | ``require_group`` | N/A | N/A | ``require_group`` | +---------------------+------------------+------------------+-----------------------+ | ``create_groups`` | ? | ? | N/A | +---------------------+------------------+------------------+-----------------------+ | ``require_groups`` | ? | ? | ? | +---------------------+------------------+------------------+-----------------------+ | ``create_dataset`` | N/A | N/A | ``create_dataset`` | +---------------------+------------------+------------------+-----------------------+ | ``require_dataset`` | N/A | N/A | ``require_dataset`` | +---------------------+------------------+------------------+-----------------------+ | ``create`` | ``create_array`` | ``create_array`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``empty`` | ``empty`` | ``empty`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``zeros`` | ``zeros`` | ``zeros`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``ones`` | ``ones`` | ``ones`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``full`` | ``full`` | ``full`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``array`` | ``create_array`` | ``create_array`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``empty_like`` | ``empty_like`` | ``empty_like`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``zeros_like`` | ``zeros_like`` | ``zeros_like`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``ones_like`` | ``ones_like`` | ``ones_like`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``full_like`` | ``full_like`` | ``full_like`` | N/A | +---------------------+------------------+------------------+-----------------------+ | ``move`` | ``move`` | ``move`` | ``move`` | +---------------------+------------------+------------------+-----------------------+ **``zarr.h5compat.Group``** -- Zarr-Python 2.\* made an attempt to align its API with that of `h5py `__. With 3.0, we will relax this alignment in favor of providing an explicit compatibility module (``zarr.h5py_compat``). This module will expose the ``Group`` and ``Dataset`` APIs that map to Zarr-Python’s ``Group`` and ``Array`` objects. Creation API ~~~~~~~~~~~~ Zarr-Python 2.\* bundles together the creation and serialization of Zarr objects. Zarr-Python 3.\* will make it possible to create objects in memory separate from serializing them. This will specifically enable writing hierarchies of Zarr objects in a single batch step. For example: .. code:: python arr1 = Array(shape=(10, 10), path="foo/bar", dtype="i4", store=store) arr2 = Array(shape=(10, 10), path="foo/spam", dtype="f8", store=store) arr1.save() arr2.save() # or equivalently zarr.save_many([arr1 ,arr2]) *Note: this batch creation API likely needs additional design effort prior to implementation.* Plugin API ~~~~~~~~~~ Zarr V3 was designed to be extensible at multiple layers. Zarr-Python will support these extensions through a combination of `Abstract Base Classes `__ (ABCs) and `Entrypoints `__. **ABCs** Zarr V3 will expose Abstract base classes for the following objects: - ``Store``, ``ReadStore``, ``ReadWriteStore``, ``ReadListStore``, and ``ReadWriteListStore`` - ``BaseArray``, ``SynchronousArray``, and ``AsynchronousArray`` - ``BaseGroup``, ``SynchronousGroup``, and ``AsynchronousGroup`` - ``Codec``, ``ArrayArrayCodec``, ``ArrayBytesCodec``, ``BytesBytesCodec`` **Entrypoints** Lots more thinking here but the idea here is to provide entrypoints for ``data type``, ``chunk grid``, ``chunk key encoding``, ``codecs``, ``storage_transformers`` and ``stores``. These might look something like: :: entry_points=""" [zarr.codecs] blosc_codec=codec_plugin:make_blosc_codec zlib_codec=codec_plugin:make_zlib_codec """ Python type hints and static analysis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Target 100% Mypy coverage in 3.0 source. Observability ~~~~~~~~~~~~~ A persistent problem in Zarr-Python is diagnosing problems that span many parts of the stack. To address this in 3.0, we will add a basic logging framework that can be used to debug behavior at various levels of the stack. We propose to add the separate loggers for the following namespaces: - ``array`` - ``group`` - ``store`` - ``codec`` These should be documented such that users know how to activate them and developers know how to use them when developing extensions. Dependencies ~~~~~~~~~~~~ Today, Zarr-Python has the following required dependencies: .. code:: python dependencies = [ 'asciitree', 'numpy>=1.20,!=1.21.0', 'fasteners', 'numcodecs>=0.10.0', ] What other dependencies should be considered? 1. Attrs - Zarrita makes extensive use of the Attrs library 2. Fsspec - Zarrita has a hard dependency on Fsspec. This could be easily relaxed though. Breaking changes relative to Zarr-Python 2.\* --------------------------------------------- 1. H5py compat moved to a stand alone module? 2. ``Group.__getitem__`` support moved to ``Group.members.__getitem__``? 3. Others? Open questions -------------- 1. How to treat V2 a. Note: Zarrita currently implements a separate ``V2Array`` and ``V3Array`` classes. This feels less than ideal. b. We could easily convert metadata from v2 to the V3 Array, but what about writing? c. Ideally, we don’t have completely separate code paths. But if its too complicated to support both within one interface, its probably better. 2. How and when to remove the current implementation of V3. a. It’s hidden behind a hard-to-use feature flag so we probably don’t need to do anything. 3. How to model runtime configuration? 4. Which extensions belong in Zarr-Python and which belong in separate packages? a. We don’t need to take a strong position on this here. It’s likely that someone will want to put Sharding in. That will be useful to develop in parallel because it will give us a good test case for the plugin interface. Testing ------- Zarr-python 3.0 adds a major new dimension to Zarr: Async support. This also comes with a compatibility risk, we will need to thoroughly test support in key execution environments. Testing plan: - Reuse the existing test suite for testing the ``v3`` API. - ``xfail`` tests that expose breaking changes with ``3.0 - breaking change`` description. This will help identify additional and/or unintentional breaking changes - Rework tests that were only testing internal APIs. - Add a set of functional / integration tests targeting real-world workflows in various contexts (e.g. w/ Dask) Development process ------------------- Zarr-Python 3.0 will introduce a number of new APIs and breaking changes to existing APIs. In order to facilitate ongoing support for Zarr-Python 2.*, we will take on the following development process: - Create a ``v3`` branch that can be use for developing the core functionality apart from the ``main`` branch. This will allow us to support ongoing work and bug fixes on the ``main`` branch. - Put the ``3.0`` APIs inside a ``zarr.v3`` module. Imports from this namespace will all be new APIs that users can develop and test against once the ``v3`` branch is merged to ``main``. - Kickstart the process by pulling in the current state of ``zarrita`` - which has many of the features described in this design. - Release a series of 2.\* releases with the ``v3`` namespace - When ``v3`` is complete, move contents of ``v3`` to the package root **Milestones** Below are a set of specific milestones leading toward the completion of this process. As work begins, we expect this list to grow in specificity. 1. Port current version of Zarrita to Zarr-Python 2. Formalize Async interface by splitting ``Array`` and ``Group`` objects into Sync and Async versions 3. Implement “fancy” indexing operations on the ``AsyncArray`` 4. Implement an abstract base class for the ``Store`` interface and a wrapper ``Store`` to make use of existing ``MutableMapping`` stores. 5. Rework the existing unit test suite to use the ``v3`` namespace. 6. Develop a plugin interface for extensions 7. Develop a set of functional and integration tests 8. Work with downstream libraries (Xarray, Dask, etc.) to test new APIs TODOs ----- The following subjects are not covered in detail above but perhaps should be. Including them here so they are not forgotten. 1. [Store] Should Zarr provide an API for caching objects after first read/list/etc. Read only stores? 2. [Array] buffer protocol support 3. [Array] ``meta_array`` support 4. [Extensions] Define how Zarr-Python will consume the various plugin types 5. [Misc] H5py compatibility requires a bit more work and a champion to drive it forward. 6. [Misc] Define ``chunk_store`` API in 3.0 7. [Misc] Define ``synchronizer`` API in 3.0 References ---------- 1. `Zarr-Python repository `__ 2. `Zarr core specification (version 3.0) — Zarr specs documentation `__ 3. `Zarrita repository `__ 4. `Async-Zarr `__ 5. `Zarr-Python Discussion Topic `__ zarr-python-3.0.6/docs/index.rst000066400000000000000000000056761476711733500166420ustar00rootroot00000000000000.. _zarr_docs_mainpage: *********** Zarr-Python *********** .. toctree:: :maxdepth: 1 :hidden: quickstart user-guide/index API reference release-notes developers/index about **Version**: |version| **Useful links**: `Source Repository `_ | `Issue Tracker `_ | `Developer Chat `_ | `Zarr specifications `_ Zarr-Python is a Python library for reading and writing Zarr groups and arrays. Highlights include: * Specification support for both Zarr format 2 and 3. * Create and read from N-dimensional arrays using NumPy-like semantics. * Flexible storage enables reading and writing from local, cloud and in-memory stores. * High performance: Enables fast I/O with support for asynchronous I/O and multi-threading. * Extensible: Customizable with user-defined codecs and stores. .. grid:: 2 .. grid-item-card:: :img-top: _static/index_getting_started.svg Quick Start ^^^^^^^^^^^ New to Zarr? Check out the quick start guide. It contains a brief introduction to Zarr's main concepts and links to additional tutorials. +++ .. button-ref:: quickstart :expand: :color: dark :click-parent: To the Quick Start .. grid-item-card:: :img-top: _static/index_user_guide.svg Guide ^^^^^ A detailed guide for how to use Zarr-Python. +++ .. button-ref:: user-guide/index :expand: :color: dark :click-parent: To the user guide .. grid-item-card:: :img-top: _static/index_api.svg API Reference ^^^^^^^^^^^^^ The reference guide contains a detailed description of the functions, modules, and objects included in Zarr. The reference describes how the methods work and which parameters can be used. It assumes that you have an understanding of the key concepts. +++ .. button-ref:: api/zarr/index :expand: :color: dark :click-parent: To the API reference .. grid-item-card:: :img-top: _static/index_contribute.svg Contributor's Guide ^^^^^^^^^^^^^^^^^^^ Want to contribute to Zarr? We welcome contributions in the form of bug reports, bug fixes, documentation, enhancement proposals and more. The contributing guidelines will guide you through the process of improving Zarr. +++ .. button-ref:: developers/contributing :expand: :color: dark :click-parent: To the contributor's guide **Download documentation**: `PDF/Zipped HTML `_ .. _NumCodecs: https://numcodecs.readthedocs.io zarr-python-3.0.6/docs/quickstart.rst000066400000000000000000000153751476711733500177220ustar00rootroot00000000000000.. only:: doctest >>> import shutil >>> shutil.rmtree('data', ignore_errors=True) >>> >>> import numpy as np >>> np.random.seed(0) Quickstart ========== Welcome to the Zarr-Python Quickstart guide! This page will help you get up and running with the Zarr library in Python to efficiently manage and analyze multi-dimensional arrays. Zarr is a powerful library for storage of n-dimensional arrays, supporting chunking, compression, and various backends, making it a versatile choice for scientific and large-scale data. Installation ------------ Zarr requires Python 3.11 or higher. You can install it via `pip`: .. code-block:: bash pip install zarr or `conda`: .. code-block:: bash conda install --channel conda-forge zarr Creating an Array ----------------- To get started, you can create a simple Zarr array:: >>> import zarr >>> import numpy as np >>> >>> # Create a 2D Zarr array >>> z = zarr.create_array( ... store="data/example-1.zarr", ... shape=(100, 100), ... chunks=(10, 10), ... dtype="f4" ... ) >>> >>> # Assign data to the array >>> z[:, :] = np.random.random((100, 100)) >>> z.info Type : Array Zarr format : 3 Data type : DataType.float32 Shape : (100, 100) Chunk shape : (10, 10) Order : C Read-only : False Store type : LocalStore Codecs : [{'endian': }, {'level': 0, 'checksum': False}] No. bytes : 40000 (39.1K) Here, we created a 2D array of shape ``(100, 100)``, chunked into blocks of ``(10, 10)``, and filled it with random floating-point data. This array was written to a ``LocalStore`` in the ``data/example-1.zarr`` directory. Compression and Filters ~~~~~~~~~~~~~~~~~~~~~~~ Zarr supports data compression and filters. For example, to use Blosc compression:: >>> z = zarr.create_array( ... "data/example-3.zarr", ... mode="w", shape=(100, 100), ... chunks=(10, 10), dtype="f4", ... compressors=zarr.codecs.BloscCodec(cname="zstd", clevel=3, shuffle=zarr.codecs.BloscShuffle.shuffle) ... ) >>> z[:, :] = np.random.random((100, 100)) >>> >>> z.info Type : Array Zarr format : 3 Data type : DataType.float32 Shape : (100, 100) Chunk shape : (10, 10) Order : C Read-only : False Store type : LocalStore Codecs : [{'endian': }, {'level': 0, 'checksum': False}] No. bytes : 40000 (39.1K) This compresses the data using the Zstandard codec with shuffle enabled for better compression. Hierarchical Groups ------------------- Zarr allows you to create hierarchical groups, similar to directories:: >>> # Create nested groups and add arrays >>> root = zarr.group("data/example-2.zarr") >>> foo = root.create_group(name="foo") >>> bar = root.create_array( ... name="bar", shape=(100, 10), chunks=(10, 10), dtype="f4" ... ) >>> spam = foo.create_array(name="spam", shape=(10,), dtype="i4") >>> >>> # Assign values >>> bar[:, :] = np.random.random((100, 10)) >>> spam[:] = np.arange(10) >>> >>> # print the hierarchy >>> root.tree() / ├── bar (100, 10) float32 └── foo └── spam (10,) int32 This creates a group with two datasets: ``foo`` and ``bar``. Batch Hierarchy Creation ~~~~~~~~~~~~~~~~~~~~~~~~ Zarr provides tools for creating a collection of arrays and groups with a single function call. Suppose we want to copy existing groups and arrays into a new storage backend: >>> # Create nested groups and add arrays >>> root = zarr.group("data/example-3.zarr", attributes={'name': 'root'}) >>> foo = root.create_group(name="foo") >>> bar = root.create_array( ... name="bar", shape=(100, 10), chunks=(10, 10), dtype="f4" ... ) >>> nodes = {'': root.metadata} | {k: v.metadata for k,v in root.members()} >>> print(nodes) >>> from zarr.storage import MemoryStore >>> new_nodes = dict(zarr.create_hierarchy(store=MemoryStore(), nodes=nodes)) >>> new_root = new_nodes[''] >>> assert new_root.attrs == root.attrs Note that :func:`zarr.create_hierarchy` will only initialize arrays and groups -- copying array data must be done in a separate step. Persistent Storage ------------------ Zarr supports persistent storage to disk or cloud-compatible backends. While examples above utilized a :class:`zarr.storage.LocalStore`, a number of other storage options are available. Zarr integrates seamlessly with cloud object storage such as Amazon S3 and Google Cloud Storage using external libraries like `s3fs `_ or `gcsfs `_:: >>> import s3fs # doctest: +SKIP >>> >>> z = zarr.create_array("s3://example-bucket/foo", mode="w", shape=(100, 100), chunks=(10, 10), dtype="f4") # doctest: +SKIP >>> z[:, :] = np.random.random((100, 100)) # doctest: +SKIP A single-file store can also be created using the the :class:`zarr.storage.ZipStore`:: >>> # Store the array in a ZIP file >>> store = zarr.storage.ZipStore("data/example-3.zip", mode='w') >>> >>> z = zarr.create_array( ... store=store, ... mode="w", ... shape=(100, 100), ... chunks=(10, 10), ... dtype="f4" ... ) >>> >>> # write to the array >>> z[:, :] = np.random.random((100, 100)) >>> >>> # the ZipStore must be explicitly closed >>> store.close() To open an existing array from a ZIP file:: >>> # Open the ZipStore in read-only mode >>> store = zarr.storage.ZipStore("data/example-3.zip", read_only=True) >>> >>> z = zarr.open_array(store, mode='r') >>> >>> # read the data as a NumPy Array >>> z[:] array([[0.66734236, 0.15667458, 0.98720884, ..., 0.36229587, 0.67443246, 0.34315267], [0.65787303, 0.9544212 , 0.4830079 , ..., 0.33097172, 0.60423803, 0.45621237], [0.27632037, 0.9947008 , 0.42434934, ..., 0.94860053, 0.6226942 , 0.6386924 ], ..., [0.12854576, 0.934397 , 0.19524333, ..., 0.11838563, 0.4967675 , 0.43074256], [0.82029045, 0.4671437 , 0.8090906 , ..., 0.7814118 , 0.42650765, 0.95929915], [0.4335856 , 0.7565437 , 0.7828931 , ..., 0.48119593, 0.66220033, 0.6652362 ]], shape=(100, 100), dtype=float32) Read more about Zarr's storage options in the :ref:`User Guide `. Next Steps ---------- Now that you're familiar with the basics, explore the following resources: - `User Guide `_ - `API Reference `_ zarr-python-3.0.6/docs/release-notes.rst000066400000000000000000000176131476711733500202730ustar00rootroot00000000000000Release notes ============= .. towncrier release notes start 3.0.6 (2025-03-20) ------------------ Bugfixes ~~~~~~~~ - Restore functionality of `del z.attrs['key']` to actually delete the key. (:issue:`2908`) 3.0.5 (2025-03-07) ------------------ Bugfixes ~~~~~~~~ - Fixed a bug where ``StorePath`` creation would not apply standard path normalization to the ``path`` parameter, which led to the creation of arrays and groups with invalid keys. (:issue:`2850`) - Prevent update_attributes calls from deleting old attributes (:issue:`2870`) Misc ~~~~ - :issue:`2796` 3.0.4 (2025-02-23) ------------------ Features ~~~~~~~~ - Adds functions for concurrently creating multiple arrays and groups. (:issue:`2665`) Bugfixes ~~~~~~~~ - Fixed a bug where ``ArrayV2Metadata`` could save ``filters`` as an empty array. (:issue:`2847`) - Fix a bug when setting values of a smaller last chunk. (:issue:`2851`) Misc ~~~~ - :issue:`2828` 3.0.3 (2025-02-14) ------------------ Features ~~~~~~~~ - Improves performance of FsspecStore.delete_dir for remote filesystems supporting concurrent/batched deletes, e.g., s3fs. (:issue:`2661`) - Added :meth:`zarr.config.enable_gpu` to update Zarr's configuration to use GPUs. (:issue:`2751`) - Avoid reading chunks during writes where possible. :issue:`757` (:issue:`2784`) - :py:class:`LocalStore` learned to ``delete_dir``. This makes array and group deletes more efficient. (:issue:`2804`) - Add `zarr.testing.strategies.array_metadata` to generate ArrayV2Metadata and ArrayV3Metadata instances. (:issue:`2813`) - Add arbitrary `shards` to Hypothesis strategy for generating arrays. (:issue:`2822`) Bugfixes ~~~~~~~~ - Fixed bug with Zarr using device memory, instead of host memory, for storing metadata when using GPUs. (:issue:`2751`) - The array returned by ``zarr.empty`` and an empty ``zarr.core.buffer.cpu.NDBuffer`` will now be filled with the specified fill value, or with zeros if no fill value is provided. This fixes a bug where Zarr format 2 data with no fill value was written with un-predictable chunk sizes. (:issue:`2755`) - Fix zip-store path checking for stores with directories listed as files. (:issue:`2758`) - Use removeprefix rather than replace when removing filename prefixes in `FsspecStore.list` (:issue:`2778`) - Enable automatic removal of `needs release notes` with labeler action (:issue:`2781`) - Use the proper label config (:issue:`2785`) - Alters the behavior of ``create_array`` to ensure that any groups implied by the array's name are created if they do not already exist. Also simplifies the type signature for any function that takes an ArrayConfig-like object. (:issue:`2795`) - Enitialise empty chunks to the default fill value during writing and add default fill values for datetime, timedelta, structured, and other (void* fixed size) data types (:issue:`2799`) - Ensure utf8 compliant strings are used to construct numpy arrays in property-based tests (:issue:`2801`) - Fix pickling for ZipStore (:issue:`2807`) - Update numcodecs to not overwrite codec configuration ever. Closes :issue:`2800`. (:issue:`2811`) - Fix fancy indexing (e.g. arr[5, [0, 1]]) with the sharding codec (:issue:`2817`) Improved Documentation ~~~~~~~~~~~~~~~~~~~~~~ - Added new user guide on :ref:`user-guide-gpu`. (:issue:`2751`) 3.0.2 (2025-01-31) ------------------ Features ~~~~~~~~ - Test ``getsize()`` and ``getsize_prefix()`` in ``StoreTests``. (:issue:`2693`) - Test that a ``ValueError`` is raised for invalid byte range syntax in ``StoreTests``. (:issue:`2693`) - Separate instantiating and opening a store in ``StoreTests``. (:issue:`2693`) - Add a test for using Stores as a context managers in ``StoreTests``. (:issue:`2693`) - Implemented ``LogingStore.open()``. (:issue:`2693`) - ``LoggingStore`` is now a generic class. (:issue:`2693`) - Change StoreTest's ``test_store_repr``, ``test_store_supports_writes``, ``test_store_supports_partial_writes``, and ``test_store_supports_listing`` to to be implemented using ``@abstractmethod``, rather raising ``NotImplementedError``. (:issue:`2693`) - Test the error raised for invalid buffer arguments in ``StoreTests``. (:issue:`2693`) - Test that data can be written to a store that's not yet open using the store.set method in ``StoreTests``. (:issue:`2693`) - Adds a new function ``init_array`` for initializing an array in storage, and refactors ``create_array`` to use ``init_array``. ``create_array`` takes two new parameters: ``data``, an optional array-like object, and ``write_data``, a bool which defaults to ``True``. If ``data`` is given to ``create_array``, then the ``dtype`` and ``shape`` attributes of ``data`` are used to define the corresponding attributes of the resulting Zarr array. Additionally, if ``data`` given and ``write_data`` is ``True``, then the values in ``data`` will be written to the newly created array. (:issue:`2761`) Bugfixes ~~~~~~~~ - Wrap sync fsspec filesystems with ``AsyncFileSystemWrapper``. (:issue:`2533`) - Added backwards compatibility for Zarr format 2 structured arrays. (:issue:`2681`) - Update equality for ``LoggingStore`` and ``WrapperStore`` such that 'other' must also be a ``LoggingStore`` or ``WrapperStore`` respectively, rather than only checking the types of the stores they wrap. (:issue:`2693`) - Ensure that ``ZipStore`` is open before getting or setting any values. (:issue:`2693`) - Use stdout rather than stderr as the default stream for ``LoggingStore``. (:issue:`2693`) - Match the errors raised by read only stores in ``StoreTests``. (:issue:`2693`) - Fixed ``ZipStore`` to make sure the correct attributes are saved when instances are pickled. This fixes a previous bug that prevent using ``ZipStore`` with a ``ProcessPoolExecutor``. (:issue:`2762`) - Updated the optional test dependencies to include ``botocore`` and ``fsspec``. (:issue:`2768`) - Fixed the fsspec tests to skip if ``botocore`` is not installed. Previously they would have failed with an import error. (:issue:`2768`) - Optimize full chunk writes. (:issue:`2782`) Improved Documentation ~~~~~~~~~~~~~~~~~~~~~~ - Changed the machinery for creating changelog entries. Now individual entries should be added as files to the `changes` directory in the `zarr-python` repository, instead of directly to the changelog file. (:issue:`2736`) Other ~~~~~ - Created a type alias ``ChunkKeyEncodingLike`` to model the union of ``ChunkKeyEncoding`` instances and the dict form of the parameters of those instances. ``ChunkKeyEncodingLike`` should be used by high-level functions to provide a convenient way for creating ``ChunkKeyEncoding`` objects. (:issue:`2763`) 3.0.1 (Jan. 17, 2025) --------------------- Bug fixes ~~~~~~~~~ * Fixes ``order`` argument for Zarr format 2 arrays (:issue:`2679`). * Fixes a bug that prevented reading Zarr format 2 data with consolidated metadata written using ``zarr-python`` version 2 (:issue:`2694`). * Ensure that compressor=None results in no compression when writing Zarr format 2 data (:issue:`2708`). * Fix for empty consolidated metadata dataset: backwards compatibility with Zarr-Python 2 (:issue:`2695`). Documentation ~~~~~~~~~~~~~ * Add v3.0.0 release announcement banner (:issue:`2677`). * Quickstart guide alignment with V3 API (:issue:`2697`). * Fix doctest failures related to numcodecs 0.15 (:issue:`2727`). Other ~~~~~ * Removed some unnecessary files from the source distribution to reduce its size. (:issue:`2686`). * Enable codecov in GitHub actions (:issue:`2682`). * Speed up hypothesis tests (:issue:`2650`). * Remove multiple imports for an import name (:issue:`2723`). .. _release_3.0.0: 3.0.0 (Jan. 9, 2025) -------------------- 3.0.0 is a new major release of Zarr-Python, with many breaking changes. See the :ref:`v3 migration guide` for a listing of what's changed. Normal release note service will resume with further releases in the 3.0.0 series. Release notes for the zarr-python 2.x and 1.x releases can be found here: https://zarr.readthedocs.io/en/support-v2/release.html zarr-python-3.0.6/docs/talks/000077500000000000000000000000001476711733500161015ustar00rootroot00000000000000zarr-python-3.0.6/docs/talks/scipy2019/000077500000000000000000000000001476711733500175445ustar00rootroot00000000000000zarr-python-3.0.6/docs/talks/scipy2019/submission.rst000066400000000000000000000156351476711733500225030ustar00rootroot00000000000000Zarr - scalable storage of tensor data for use in parallel and distributed computing ==================================================================================== SciPy 2019 submission. Short summary ------------- Many scientific problems involve computing over large N-dimensional typed arrays of data, and reading or writing data is often the major bottleneck limiting speed or scalability. The Zarr project is developing a simple, scalable approach to storage of such data in a way that is compatible with a range of approaches to distributed and parallel computing. We describe the Zarr protocol and data storage format, and the current state of implementations for various programming languages including Python. We also describe current uses of Zarr in malaria genomics, the Human Cell Atlas, and the Pangeo project. Abstract -------- Background ~~~~~~~~~~ Across a broad range of scientific disciplines, data are naturally represented and stored as N-dimensional typed arrays, also known as tensors. The volume of data being generated is outstripping our ability to analyse it, and scientific communities are looking for ways to leverage modern multi-core CPUs and distributed computing platforms, including cloud computing. Retrieval and storage of data is often the major bottleneck, and new approaches to data storage are needed to accelerate distributed computations and enable them to scale on a variety of platforms. Methods ~~~~~~~ We have designed a new storage format and protocol for tensor data [1_], and have released an open source Python implementation [2_, 3_]. Our approach builds on data storage concepts from HDF5 [4_], particularly chunking and compression, and hierarchical organisation of datasets. Key design goals include: a simple protocol and format that can be implemented in other programming languages; support for multiple concurrent readers or writers; support for a variety of parallel computing environments, from multi-threaded execution on a single CPU to multi-process execution across a multi-node cluster; pluggable storage subsystem with support for file systems, key-value databases and cloud object stores; pluggable encoding subsystem with support for a variety of modern compressors. Results ~~~~~~~ We illustrate the use of Zarr with examples from several scientific domains. Zarr is being used within the Pangeo project [5_], which is building a community platform for big data geoscience. The Pangeo community have converted a number of existing climate modelling and satellite observation datasets to Zarr [6_], and have demonstrated their use in computations using HPC and cloud computing environments. Within the MalariaGEN project [7_], Zarr is used to store genome variation data from next-generation sequencing of natural populations of malaria parasites and mosquitoes [8_] and these data are used as input to analyses of the evolution of these organisms in response to selective pressure from anti-malarial drugs and insecticides. Zarr is being used within the Human Cell Atlas (HCA) project [9_], which is building a reference atlas of healthy human cell types. This project hopes to leverage this information to better understand the dysregulation of cellular states that underly human disease. The Human Cell Atlas uses Zarr as the output data format because it enables the project to easily generate matrices containing user-selected subsets of cells. Conclusions ~~~~~~~~~~~ Zarr is generating interest across a range of scientific domains, and work is ongoing to establish a community process to support further development of the specifications and implementations in other programming languages [10_, 11_, 12_] and building interoperability with a similar project called N5 [13_]. Other packages within the PyData ecosystem, notably Dask [14_], Xarray [15_] and Intake [16_], have added capability to read and write Zarr, and together these packages provide a compelling solution for large scale data science using Python [17_]. Zarr has recently been presented in several venues, including a webinar for the ESIP Federation tech dive series [18_], and a talk at the AGU Fall Meeting 2018 [19_]. References ~~~~~~~~~~ .. _1: https://zarr.readthedocs.io/en/stable/spec/v2.html .. _2: https://github.com/zarr-developers/zarr-python .. _3: https://github.com/zarr-developers/numcodecs .. _4: https://www.hdfgroup.org/solutions/hdf5/ .. _5: https://pangeo.io/ .. _6: https://pangeo.io/catalog.html .. _7: https://www.malariagen.net/ .. _8: http://alimanfoo.github.io/2016/09/21/genotype-compression-benchmark.html .. _9: https://www.humancellatlas.org/ .. _10: https://github.com/constantinpape/z5 .. _11: https://github.com/lasersonlab/ndarray.scala .. _12: https://github.com/meggart/ZarrNative.jl .. _13: https://github.com/saalfeldlab/n5 .. _14: http://docs.dask.org/en/latest/array-creation.html .. _15: http://xarray.pydata.org/en/stable/io.html .. _16: https://github.com/ContinuumIO/intake-xarray .. _17: http://matthewrocklin.com/blog/work/2018/01/22/pangeo-2 .. _18: http://wiki.esipfed.org/index.php/Interoperability_and_Technology/Tech_Dive_Webinar_Series#8_March.2C_2018:_.22Zarr:_A_simple.2C_open.2C_scalable_solution_for_big_NetCDF.2FHDF_data_on_the_Cloud.22:_Alistair_Miles.2C_University_of_Oxford. .. _19: https://agu.confex.com/agu/fm18/meetingapp.cgi/Paper/390015 Authors ------- Project contributors are listed in alphabetical order by surname. * `Ryan Abernathey `_, Columbia University * `Stephan Balmer `_, Meteotest * `Ambrose Carr `_, Chan Zuckerberg Initiative * `Tim Crone `_, Columbia University * `Martin Durant `_, Anaconda, inc. * `Jan Funke `_, HHMI Janelia * `Darren Gallagher `_, Satavia * `Fabian Gans `_, Max Planck Institute for Biogeochemistry * `Shikhar Goenka `_, Satavia * `Joe Hamman `_, NCAR * `Stephan Hoyer `_, Google * `Jerome Kelleher `_, University of Oxford * `John Kirkham `_, HHMI Janelia * `Alistair Miles `_, University of Oxford * `Josh Moore `_, University of Dundee * `Charles Noyes `_, University of Southern California * `Tarik Onalan `_ * `Constantin Pape `_, University of Heidelberg * `Zain Patel `_, University of Cambridge * `Matthew Rocklin `_, NVIDIA * `Stephan Saafeld `_, HHMI Janelia * `Vincent Schut `_, Satelligence * `Justin Swaney `_, MIT * `Ryan Williams `_, Chan Zuckerberg Initiative zarr-python-3.0.6/docs/user-guide/000077500000000000000000000000001476711733500170345ustar00rootroot00000000000000zarr-python-3.0.6/docs/user-guide/arrays.rst000066400000000000000000000571351476711733500211020ustar00rootroot00000000000000.. only:: doctest >>> import shutil >>> shutil.rmtree('data', ignore_errors=True) .. _user-guide-arrays: Working with arrays =================== Creating an array ----------------- Zarr has several functions for creating arrays. For example:: >>> import zarr >>> store = zarr.storage.MemoryStore() >>> z = zarr.create_array(store=store, shape=(10000, 10000), chunks=(1000, 1000), dtype='int32') >>> z The code above creates a 2-dimensional array of 32-bit integers with 10000 rows and 10000 columns, divided into chunks where each chunk has 1000 rows and 1000 columns (and so there will be 100 chunks in total). The data is written to a :class:`zarr.storage.MemoryStore` (e.g. an in-memory dict). See :ref:`user-guide-persist` for details on storing arrays in other stores. For a complete list of array creation routines see the :mod:`zarr` module documentation. .. _user-guide-array: Reading and writing data ------------------------ Zarr arrays support a similar interface to `NumPy `_ arrays for reading and writing data. For example, the entire array can be filled with a scalar value:: >>> z[:] = 42 Regions of the array can also be written to, e.g.:: >>> import numpy as np >>> >>> z[0, :] = np.arange(10000) >>> z[:, 0] = np.arange(10000) The contents of the array can be retrieved by slicing, which will load the requested region into memory as a NumPy array, e.g.:: >>> z[0, 0] array(0, dtype=int32) >>> z[-1, -1] array(42, dtype=int32) >>> z[0, :] array([ 0, 1, 2, ..., 9997, 9998, 9999], shape=(10000,), dtype=int32) >>> z[:, 0] array([ 0, 1, 2, ..., 9997, 9998, 9999], shape=(10000,), dtype=int32) >>> z[:] array([[ 0, 1, 2, ..., 9997, 9998, 9999], [ 1, 42, 42, ..., 42, 42, 42], [ 2, 42, 42, ..., 42, 42, 42], ..., [9997, 42, 42, ..., 42, 42, 42], [9998, 42, 42, ..., 42, 42, 42], [9999, 42, 42, ..., 42, 42, 42]], shape=(10000, 10000), dtype=int32) Read more about NumPy-style indexing can be found in the `NumPy documentation `_. .. _user-guide-persist: Persistent arrays ----------------- In the examples above, compressed data for each chunk of the array was stored in main memory. Zarr arrays can also be stored on a file system, enabling persistence of data between sessions. To do this, we can change the store argument to point to a filesystem path:: >>> z1 = zarr.create_array(store='data/example-1.zarr', shape=(10000, 10000), chunks=(1000, 1000), dtype='int32') The array above will store its configuration metadata and all compressed chunk data in a directory called ``'data/example-1.zarr'`` relative to the current working directory. The :func:`zarr.create_array` function provides a convenient way to create a new persistent array or continue working with an existing array. Note, there is no need to close an array: data are automatically flushed to disk, and files are automatically closed whenever an array is modified. Persistent arrays support the same interface for reading and writing data, e.g.:: >>> z1[:] = 42 >>> z1[0, :] = np.arange(10000) >>> z1[:, 0] = np.arange(10000) Check that the data have been written and can be read again:: >>> z2 = zarr.open_array('data/example-1.zarr', mode='r') >>> np.all(z1[:] == z2[:]) np.True_ If you are just looking for a fast and convenient way to save NumPy arrays to disk then load back into memory later, the functions :func:`zarr.save` and :func:`zarr.load` may be useful. E.g.:: >>> a = np.arange(10) >>> zarr.save('data/example-2.zarr', a) >>> zarr.load('data/example-2.zarr') array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) Please note that there are a number of other options for persistent array storage, see the :ref:`Storage Guide ` guide for more details. .. _user-guide-resize: Resizing and appending ---------------------- A Zarr array can be resized, which means that any of its dimensions can be increased or decreased in length. For example:: >>> z = zarr.create_array(store='data/example-3.zarr', shape=(10000, 10000), dtype='int32',chunks=(1000, 1000)) >>> z[:] = 42 >>> z.shape (10000, 10000) >>> z.resize((20000, 10000)) >>> z.shape (20000, 10000) Note that when an array is resized, the underlying data are not rearranged in any way. If one or more dimensions are shrunk, any chunks falling outside the new array shape will be deleted from the underlying store. :func:`zarr.Array.append` is provided as a convenience function, which can be used to append data to any axis. E.g.:: >>> a = np.arange(10000000, dtype='int32').reshape(10000, 1000) >>> z = zarr.create_array(store='data/example-4.zarr', shape=a.shape, dtype=a.dtype, chunks=(1000, 100)) >>> z[:] = a >>> z.shape (10000, 1000) >>> z.append(a) (20000, 1000) >>> z.append(np.vstack([a, a]), axis=1) (20000, 2000) >>> z.shape (20000, 2000) .. _user-guide-compress: Compressors ----------- A number of different compressors can be used with Zarr. Zarr includes Blosc, Zstandard and Gzip compressors. Additional compressors are available through a separate package called NumCodecs_ which provides various compressor libraries including LZ4, Zlib, BZ2 and LZMA. Different compressors can be provided via the ``compressors`` keyword argument accepted by all array creation functions. For example:: >>> compressors = zarr.codecs.BloscCodec(cname='zstd', clevel=3, shuffle=zarr.codecs.BloscShuffle.bitshuffle) >>> data = np.arange(100000000, dtype='int32').reshape(10000, 10000) >>> z = zarr.create_array(store='data/example-5.zarr', shape=data.shape, dtype=data.dtype, chunks=(1000, 1000), compressors=compressors) >>> z[:] = data >>> z.compressors (BloscCodec(typesize=4, cname=, clevel=3, shuffle=, blocksize=0),) This array above will use Blosc as the primary compressor, using the Zstandard algorithm (compression level 3) internally within Blosc, and with the bit-shuffle filter applied. When using a compressor, it can be useful to get some diagnostics on the compression ratio. Zarr arrays provide the :attr:`zarr.Array.info` property which can be used to print useful diagnostics, e.g.:: >>> z.info Type : Array Zarr format : 3 Data type : DataType.int32 Shape : (10000, 10000) Chunk shape : (1000, 1000) Order : C Read-only : False Store type : LocalStore Filters : () Serializer : BytesCodec(endian=) Compressors : (BloscCodec(typesize=4, cname=, clevel=3, shuffle=, blocksize=0),) No. bytes : 400000000 (381.5M) The :func:`zarr.Array.info_complete` method inspects the underlying store and prints additional diagnostics, e.g.:: >>> z.info_complete() Type : Array Zarr format : 3 Data type : DataType.int32 Shape : (10000, 10000) Chunk shape : (1000, 1000) Order : C Read-only : False Store type : LocalStore Filters : () Serializer : BytesCodec(endian=) Compressors : (BloscCodec(typesize=4, cname=, clevel=3, shuffle=, blocksize=0),) No. bytes : 400000000 (381.5M) No. bytes stored : 9696520 Storage ratio : 41.3 Chunks Initialized : 100 .. note:: :func:`zarr.Array.info_complete` will inspect the underlying store and may be slow for large arrays. Use :attr:`zarr.Array.info` if detailed storage statistics are not needed. If you don't specify a compressor, by default Zarr uses the Zstandard compressor. In addition to Blosc and Zstandard, other compression libraries can also be used. For example, here is an array using Gzip compression, level 1:: >>> data = np.arange(100000000, dtype='int32').reshape(10000, 10000) >>> z = zarr.create_array(store='data/example-6.zarr', shape=data.shape, dtype=data.dtype, chunks=(1000, 1000), compressors=zarr.codecs.GzipCodec(level=1)) >>> z[:] = data >>> z.compressors (GzipCodec(level=1),) Here is an example using LZMA from NumCodecs_ with a custom filter pipeline including LZMA's built-in delta filter:: >>> import lzma >>> from numcodecs.zarr3 import LZMA >>> >>> lzma_filters = [dict(id=lzma.FILTER_DELTA, dist=4), dict(id=lzma.FILTER_LZMA2, preset=1)] >>> compressors = LZMA(filters=lzma_filters) >>> data = np.arange(100000000, dtype='int32').reshape(10000, 10000) >>> z = zarr.create_array(store='data/example-7.zarr', shape=data.shape, dtype=data.dtype, chunks=(1000, 1000), compressors=compressors) >>> z.compressors (LZMA(codec_name='numcodecs.lzma', codec_config={'filters': [{'id': 3, 'dist': 4}, {'id': 33, 'preset': 1}]}),) The default compressor can be changed by setting the value of the using Zarr's :ref:`user-guide-config`, e.g.:: >>> with zarr.config.set({'array.v2_default_compressor.numeric': {'id': 'blosc'}}): ... z = zarr.create_array(store={}, shape=(100000000,), chunks=(1000000,), dtype='int32', zarr_format=2) >>> z.filters () >>> z.compressors (Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0),) To disable compression, set ``compressors=None`` when creating an array, e.g.:: >>> z = zarr.create_array(store='data/example-8.zarr', shape=(100000000,), chunks=(1000000,), dtype='int32', compressors=None) >>> z.compressors () .. _user-guide-filters: Filters ------- In some cases, compression can be improved by transforming the data in some way. For example, if nearby values tend to be correlated, then shuffling the bytes within each numerical value or storing the difference between adjacent values may increase compression ratio. Some compressors provide built-in filters that apply transformations to the data prior to compression. For example, the Blosc compressor has built-in implementations of byte- and bit-shuffle filters, and the LZMA compressor has a built-in implementation of a delta filter. However, to provide additional flexibility for implementing and using filters in combination with different compressors, Zarr also provides a mechanism for configuring filters outside of the primary compressor. Here is an example using a delta filter with the Blosc compressor:: >>> from numcodecs.zarr3 import Delta >>> >>> filters = [Delta(dtype='int32')] >>> compressors = zarr.codecs.BloscCodec(cname='zstd', clevel=1, shuffle=zarr.codecs.BloscShuffle.shuffle) >>> data = np.arange(100000000, dtype='int32').reshape(10000, 10000) >>> z = zarr.create_array(store='data/example-9.zarr', shape=data.shape, dtype=data.dtype, chunks=(1000, 1000), filters=filters, compressors=compressors) >>> z.info Type : Array Zarr format : 3 Data type : DataType.int32 Shape : (10000, 10000) Chunk shape : (1000, 1000) Order : C Read-only : False Store type : LocalStore Filters : (Delta(codec_name='numcodecs.delta', codec_config={'dtype': 'int32'}),) Serializer : BytesCodec(endian=) Compressors : (BloscCodec(typesize=4, cname=, clevel=1, shuffle=, blocksize=0),) No. bytes : 400000000 (381.5M) For more information about available filter codecs, see the `Numcodecs `_ documentation. .. _user-guide-indexing: Advanced indexing ----------------- Zarr arrays support several methods for advanced or "fancy" indexing, which enable a subset of data items to be extracted or updated in an array without loading the entire array into memory. Note that although this functionality is similar to some of the advanced indexing capabilities available on NumPy arrays and on h5py datasets, **the Zarr API for advanced indexing is different from both NumPy and h5py**, so please read this section carefully. For a complete description of the indexing API, see the documentation for the :class:`zarr.Array` class. Indexing with coordinate arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Items from a Zarr array can be extracted by providing an integer array of coordinates. E.g.:: >>> data = np.arange(10) ** 2 >>> z = zarr.create_array(store='data/example-10.zarr', shape=data.shape, dtype=data.dtype) >>> z[:] = data >>> z[:] array([ 0, 1, 4, 9, 16, 25, 36, 49, 64, 81]) >>> z.get_coordinate_selection([2, 5]) array([ 4, 25]) Coordinate arrays can also be used to update data, e.g.:: >>> z.set_coordinate_selection([2, 5], [-1, -2]) >>> z[:] array([ 0, 1, -1, 9, 16, -2, 36, 49, 64, 81]) For multidimensional arrays, coordinates must be provided for each dimension, e.g.:: >>> data = np.arange(15).reshape(3, 5) >>> z = zarr.create_array(store='data/example-11.zarr', shape=data.shape, dtype=data.dtype) >>> z[:] = data >>> z[:] array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]) >>> z.get_coordinate_selection(([0, 2], [1, 3])) array([ 1, 13]) >>> z.set_coordinate_selection(([0, 2], [1, 3]), [-1, -2]) >>> z[:] array([[ 0, -1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, -2, 14]]) For convenience, coordinate indexing is also available via the ``vindex`` property, as well as the square bracket operator, e.g.:: >>> z.vindex[[0, 2], [1, 3]] array([-1, -2]) >>> z.vindex[[0, 2], [1, 3]] = [-3, -4] >>> z[:] array([[ 0, -3, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, -4, 14]]) >>> z[[0, 2], [1, 3]] array([-3, -4]) When the indexing arrays have different shapes, they are broadcast together. That is, the following two calls are equivalent:: >>> z[1, [1, 3]] array([6, 8]) >>> z[[1, 1], [1, 3]] array([6, 8]) Indexing with a mask array ~~~~~~~~~~~~~~~~~~~~~~~~~~ Items can also be extracted by providing a Boolean mask. E.g.:: >>> data = np.arange(10) ** 2 >>> z = zarr.create_array(store='data/example-12.zarr', shape=data.shape, dtype=data.dtype) >>> z[:] = data >>> z[:] array([ 0, 1, 4, 9, 16, 25, 36, 49, 64, 81]) >>> sel = np.zeros_like(z, dtype=bool) >>> sel[2] = True >>> sel[5] = True >>> z.get_mask_selection(sel) array([ 4, 25]) >>> z.set_mask_selection(sel, [-1, -2]) >>> z[:] array([ 0, 1, -1, 9, 16, -2, 36, 49, 64, 81]) Here's a multidimensional example:: >>> data = np.arange(15).reshape(3, 5) >>> z = zarr.create_array(store='data/example-13.zarr', shape=data.shape, dtype=data.dtype) >>> z[:] = data >>> z[:] array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]) >>> sel = np.zeros_like(z, dtype=bool) >>> sel[0, 1] = True >>> sel[2, 3] = True >>> z.get_mask_selection(sel) array([ 1, 13]) >>> z.set_mask_selection(sel, [-1, -2]) >>> z[:] array([[ 0, -1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, -2, 14]]) For convenience, mask indexing is also available via the ``vindex`` property, e.g.:: >>> z.vindex[sel] array([-1, -2]) >>> z.vindex[sel] = [-3, -4] >>> z[:] array([[ 0, -3, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, -4, 14]]) Mask indexing is conceptually the same as coordinate indexing, and is implemented internally via the same machinery. Both styles of indexing allow selecting arbitrary items from an array, also known as point selection. Orthogonal indexing ~~~~~~~~~~~~~~~~~~~ Zarr arrays also support methods for orthogonal indexing, which allows selections to be made along each dimension of an array independently. For example, this allows selecting a subset of rows and/or columns from a 2-dimensional array. E.g.:: >>> data = np.arange(15).reshape(3, 5) >>> z = zarr.create_array(store='data/example-14.zarr', shape=data.shape, dtype=data.dtype) >>> z[:] = data >>> z[:] array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]) >>> z.get_orthogonal_selection(([0, 2], slice(None))) # select first and third rows array([[ 0, 1, 2, 3, 4], [10, 11, 12, 13, 14]]) >>> z.get_orthogonal_selection((slice(None), [1, 3])) # select second and fourth columns array([[ 1, 3], [ 6, 8], [11, 13]]) >>> z.get_orthogonal_selection(([0, 2], [1, 3])) # select rows [0, 2] and columns [1, 4] array([[ 1, 3], [11, 13]]) Data can also be modified, e.g.:: >>> z.set_orthogonal_selection(([0, 2], [1, 3]), [[-1, -2], [-3, -4]]) For convenience, the orthogonal indexing functionality is also available via the ``oindex`` property, e.g.:: >>> data = np.arange(15).reshape(3, 5) >>> z = zarr.create_array(store='data/example-15.zarr', shape=data.shape, dtype=data.dtype) >>> z[:] = data >>> z.oindex[[0, 2], :] # select first and third rows array([[ 0, 1, 2, 3, 4], [10, 11, 12, 13, 14]]) >>> z.oindex[:, [1, 3]] # select second and fourth columns array([[ 1, 3], [ 6, 8], [11, 13]]) >>> z.oindex[[0, 2], [1, 3]] # select rows [0, 2] and columns [1, 4] array([[ 1, 3], [11, 13]]) >>> z.oindex[[0, 2], [1, 3]] = [[-1, -2], [-3, -4]] >>> z[:] array([[ 0, -1, 2, -2, 4], [ 5, 6, 7, 8, 9], [10, -3, 12, -4, 14]]) Any combination of integer, slice, 1D integer array and/or 1D Boolean array can be used for orthogonal indexing. If the index contains at most one iterable, and otherwise contains only slices and integers, orthogonal indexing is also available directly on the array:: >>> data = np.arange(15).reshape(3, 5) >>> z = zarr.create_array(store='data/example-16.zarr', shape=data.shape, dtype=data.dtype) >>> z[:] = data >>> np.all(z.oindex[[0, 2], :] == z[[0, 2], :]) np.True_ Block Indexing ~~~~~~~~~~~~~~ Zarr also support block indexing, which allows selections of whole chunks based on their logical indices along each dimension of an array. For example, this allows selecting a subset of chunk aligned rows and/or columns from a 2-dimensional array. E.g.:: >>> data = np.arange(100).reshape(10, 10) >>> z = zarr.create_array(store='data/example-17.zarr', shape=data.shape, dtype=data.dtype, chunks=(3, 3)) >>> z[:] = data Retrieve items by specifying their block coordinates:: >>> z.get_block_selection(1) array([[30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]]) Equivalent slicing:: >>> z[3:6] array([[30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]]) For convenience, the block selection functionality is also available via the `blocks` property, e.g.:: >>> z.blocks[1] array([[30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]]) Block index arrays may be multidimensional to index multidimensional arrays. For example:: >>> z.blocks[0, 1:3] array([[ 3, 4, 5, 6, 7, 8], [13, 14, 15, 16, 17, 18], [23, 24, 25, 26, 27, 28]]) Data can also be modified. Let's start by a simple 2D array:: >>> z = zarr.create_array(store='data/example-18.zarr', shape=(6, 6), dtype=int, chunks=(2, 2)) Set data for a selection of items:: >>> z.set_block_selection((1, 0), 1) >>> z[...] array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) For convenience, this functionality is also available via the ``blocks`` property. E.g.:: >>> z.blocks[:, 2] = 7 >>> z[...] array([[0, 0, 0, 0, 7, 7], [0, 0, 0, 0, 7, 7], [1, 1, 0, 0, 7, 7], [1, 1, 0, 0, 7, 7], [0, 0, 0, 0, 7, 7], [0, 0, 0, 0, 7, 7]]) Any combination of integer and slice can be used for block indexing:: >>> z.blocks[2, 1:3] array([[0, 0, 7, 7], [0, 0, 7, 7]]) >>> >>> root = zarr.create_group('data/example-19.zarr') >>> foo = root.create_array(name='foo', shape=(1000, 100), chunks=(10, 10), dtype='float32') >>> bar = root.create_array(name='foo/bar', shape=(100,), dtype='int32') >>> foo[:, :] = np.random.random((1000, 100)) >>> bar[:] = np.arange(100) >>> root.tree() / └── foo (1000, 100) float32 .. _user-guide-sharding: Sharding -------- Using small chunk shapes in very large arrays can lead to a very large number of chunks. This can become a performance issue for file systems and object storage. With Zarr format 3, a new sharding feature has been added to address this issue. With sharding, multiple chunks can be stored in a single storage object (e.g. a file). Within a shard, chunks are compressed and serialized separately. This allows individual chunks to be read independently. However, when writing data, a full shard must be written in one go for optimal performance and to avoid concurrency issues. That means that shards are the units of writing and chunks are the units of reading. Users need to configure the chunk and shard shapes accordingly. Sharded arrays can be created by providing the ``shards`` parameter to :func:`zarr.create_array`. >>> a = zarr.create_array('data/example-20.zarr', shape=(10000, 10000), shards=(1000, 1000), chunks=(100, 100), dtype='uint8') >>> a[:] = (np.arange(10000 * 10000) % 256).astype('uint8').reshape(10000, 10000) >>> a.info_complete() Type : Array Zarr format : 3 Data type : DataType.uint8 Shape : (10000, 10000) Shard shape : (1000, 1000) Chunk shape : (100, 100) Order : C Read-only : False Store type : LocalStore Filters : () Serializer : BytesCodec(endian=) Compressors : (ZstdCodec(level=0, checksum=False),) No. bytes : 100000000 (95.4M) No. bytes stored : 3981552 Storage ratio : 25.1 Shards Initialized : 100 In this example a shard shape of (1000, 1000) and a chunk shape of (100, 100) is used. This means that 10*10 chunks are stored in each shard, and there are 10*10 shards in total. Without the ``shards`` argument, there would be 10,000 chunks stored as individual files. Missing features in 3.0 ----------------------- The following features have not been ported to 3.0 yet. .. _user-guide-objects: Object arrays ~~~~~~~~~~~~~ See the Zarr-Python 2 documentation on `Object arrays `_ for more details. .. _user-guide-strings: Fixed-length string arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~ See the Zarr-Python 2 documentation on `Fixed-length string arrays `_ for more details. .. _user-guide-datetime: Datetime and Timedelta arrays ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See the Zarr-Python 2 documentation on `Datetime and Timedelta `_ for more details. .. _user-guide-copy: Copying and migrating data ~~~~~~~~~~~~~~~~~~~~~~~~~~ See the Zarr-Python 2 documentation on `Copying and migrating data `_ for more details. zarr-python-3.0.6/docs/user-guide/attributes.rst000066400000000000000000000014471476711733500217620ustar00rootroot00000000000000.. _user-guide-attrs: Working with attributes ======================= Zarr arrays and groups support custom key/value attributes, which can be useful for storing application-specific metadata. For example:: >>> import zarr >>> store = zarr.storage.MemoryStore() >>> root = zarr.create_group(store=store) >>> root.attrs['foo'] = 'bar' >>> z = root.create_array(name='zzz', shape=(10000, 10000), dtype='int32') >>> z.attrs['baz'] = 42 >>> z.attrs['qux'] = [1, 4, 7, 12] >>> sorted(root.attrs) ['foo'] >>> 'foo' in root.attrs True >>> root.attrs['foo'] 'bar' >>> sorted(z.attrs) ['baz', 'qux'] >>> z.attrs['baz'] 42 >>> z.attrs['qux'] [1, 4, 7, 12] Internally Zarr uses JSON to store array attributes, so attribute values must be JSON serializable. zarr-python-3.0.6/docs/user-guide/config.rst000066400000000000000000000115571476711733500210440ustar00rootroot00000000000000.. _user-guide-config: Runtime configuration ===================== ``zarr.config`` is responsible for managing the configuration of zarr and is based on the `donfig `_ Python library. Configuration values can be set using code like the following:: >>> import zarr >>> >>> zarr.config.set({'array.order': 'F'}) >>> >>> # revert this change so it doesn't impact the rest of the docs >>> zarr.config.set({'array.order': 'C'}) Alternatively, configuration values can be set using environment variables, e.g. ``ZARR_ARRAY__ORDER=F``. The configuration can also be read from a YAML file in standard locations. For more information, see the `donfig documentation `_. Configuration options include the following: - Default Zarr format ``default_zarr_version`` - Default array order in memory ``array.order`` - Default filters, serializers and compressors, e.g. ``array.v3_default_filters``, ``array.v3_default_serializer``, ``array.v3_default_compressors``, ``array.v2_default_filters`` and ``array.v2_default_compressor`` - Whether empty chunks are written to storage ``array.write_empty_chunks`` - Async and threading options, e.g. ``async.concurrency`` and ``threading.max_workers`` - Selections of implementations of codecs, codec pipelines and buffers - Enabling GPU support with ``zarr.config.enable_gpu()``. See :ref:`user-guide-gpu` for more. For selecting custom implementations of codecs, pipelines, buffers and ndbuffers, first register the implementations in the registry and then select them in the config. For example, an implementation of the bytes codec in a class ``'custompackage.NewBytesCodec'``, requires the value of ``codecs.bytes.name`` to be ``'custompackage.NewBytesCodec'``. This is the current default configuration:: >>> zarr.config.pprint() {'array': {'order': 'C', 'v2_default_compressor': {'bytes': {'checksum': False, 'id': 'zstd', 'level': 0}, 'numeric': {'checksum': False, 'id': 'zstd', 'level': 0}, 'string': {'checksum': False, 'id': 'zstd', 'level': 0}}, 'v2_default_filters': {'bytes': [{'id': 'vlen-bytes'}], 'numeric': None, 'raw': None, 'string': [{'id': 'vlen-utf8'}]}, 'v3_default_compressors': {'bytes': [{'configuration': {'checksum': False, 'level': 0}, 'name': 'zstd'}], 'numeric': [{'configuration': {'checksum': False, 'level': 0}, 'name': 'zstd'}], 'string': [{'configuration': {'checksum': False, 'level': 0}, 'name': 'zstd'}]}, 'v3_default_filters': {'bytes': [], 'numeric': [], 'string': []}, 'v3_default_serializer': {'bytes': {'name': 'vlen-bytes'}, 'numeric': {'configuration': {'endian': 'little'}, 'name': 'bytes'}, 'string': {'name': 'vlen-utf8'}}, 'write_empty_chunks': False}, 'async': {'concurrency': 10, 'timeout': None}, 'buffer': 'zarr.core.buffer.cpu.Buffer', 'codec_pipeline': {'batch_size': 1, 'path': 'zarr.core.codec_pipeline.BatchedCodecPipeline'}, 'codecs': {'blosc': 'zarr.codecs.blosc.BloscCodec', 'bytes': 'zarr.codecs.bytes.BytesCodec', 'crc32c': 'zarr.codecs.crc32c_.Crc32cCodec', 'endian': 'zarr.codecs.bytes.BytesCodec', 'gzip': 'zarr.codecs.gzip.GzipCodec', 'sharding_indexed': 'zarr.codecs.sharding.ShardingCodec', 'transpose': 'zarr.codecs.transpose.TransposeCodec', 'vlen-bytes': 'zarr.codecs.vlen_utf8.VLenBytesCodec', 'vlen-utf8': 'zarr.codecs.vlen_utf8.VLenUTF8Codec', 'zstd': 'zarr.codecs.zstd.ZstdCodec'}, 'default_zarr_format': 3, 'json_indent': 2, 'ndbuffer': 'zarr.core.buffer.cpu.NDBuffer', 'threading': {'max_workers': None}} zarr-python-3.0.6/docs/user-guide/consolidated_metadata.rst000066400000000000000000000135771476711733500241130ustar00rootroot00000000000000.. _user-guide-consolidated-metadata: Consolidated metadata ===================== .. warning:: The Consolidated Metadata feature in Zarr-Python is considered experimental for v3 stores. `zarr-specs#309 `_ has proposed a formal extension to the v3 specification to support consolidated metadata. Zarr-Python implements the `Consolidated Metadata`_ for v2 and v3 stores. Consolidated metadata can reduce the time needed to load the metadata for an entire hierarchy, especially when the metadata is being served over a network. Consolidated metadata essentially stores all the metadata for a hierarchy in the metadata of the root Group. Usage ----- If consolidated metadata is present in a Zarr Group's metadata then it is used by default. The initial read to open the group will need to communicate with the store (reading from a file for a :class:`zarr.storage.LocalStore`, making a network request for a :class:`zarr.storage.FsspecStore`). After that, any subsequent metadata reads get child Group or Array nodes will *not* require reads from the store. In Python, the consolidated metadata is available on the ``.consolidated_metadata`` attribute of the ``GroupMetadata`` object. >>> import zarr >>> >>> store = zarr.storage.MemoryStore() >>> group = zarr.create_group(store=store) >>> group.create_array(shape=(1,), name='a', dtype='float64') >>> group.create_array(shape=(2, 2), name='b', dtype='float64') >>> group.create_array(shape=(3, 3, 3), name='c', dtype='float64') >>> zarr.consolidate_metadata(store) If we open that group, the Group's metadata has a :class:`zarr.core.group.ConsolidatedMetadata` that can be used.: >>> consolidated = zarr.open_group(store=store) >>> consolidated_metadata = consolidated.metadata.consolidated_metadata.metadata >>> from pprint import pprint >>> pprint(dict(sorted(consolidated_metadata.items()))) {'a': ArrayV3Metadata(shape=(1,), data_type=, chunk_grid=RegularChunkGrid(chunk_shape=(1,)), chunk_key_encoding=DefaultChunkKeyEncoding(name='default', separator='/'), fill_value=np.float64(0.0), codecs=(BytesCodec(endian=), ZstdCodec(level=0, checksum=False)), attributes={}, dimension_names=None, zarr_format=3, node_type='array', storage_transformers=()), 'b': ArrayV3Metadata(shape=(2, 2), data_type=, chunk_grid=RegularChunkGrid(chunk_shape=(2, 2)), chunk_key_encoding=DefaultChunkKeyEncoding(name='default', separator='/'), fill_value=np.float64(0.0), codecs=(BytesCodec(endian=), ZstdCodec(level=0, checksum=False)), attributes={}, dimension_names=None, zarr_format=3, node_type='array', storage_transformers=()), 'c': ArrayV3Metadata(shape=(3, 3, 3), data_type=, chunk_grid=RegularChunkGrid(chunk_shape=(3, 3, 3)), chunk_key_encoding=DefaultChunkKeyEncoding(name='default', separator='/'), fill_value=np.float64(0.0), codecs=(BytesCodec(endian=), ZstdCodec(level=0, checksum=False)), attributes={}, dimension_names=None, zarr_format=3, node_type='array', storage_transformers=())} Operations on the group to get children automatically use the consolidated metadata.: >>> consolidated['a'] # no read / HTTP request to the Store is required With nested groups, the consolidated metadata is available on the children, recursively.: >>> child = group.create_group('child', attributes={'kind': 'child'}) >>> grandchild = child.create_group('child', attributes={'kind': 'grandchild'}) >>> consolidated = zarr.consolidate_metadata(store) >>> >>> consolidated['child'].metadata.consolidated_metadata ConsolidatedMetadata(metadata={'child': GroupMetadata(attributes={'kind': 'grandchild'}, zarr_format=3, consolidated_metadata=ConsolidatedMetadata(metadata={}, kind='inline', must_understand=False), node_type='group')}, kind='inline', must_understand=False) Synchronization and Concurrency ------------------------------- Consolidated metadata is intended for read-heavy use cases on slowly changing hierarchies. For hierarchies where new nodes are constantly being added, removed, or modified, consolidated metadata may not be desirable. 1. It will add some overhead to each update operation, since the metadata would need to be re-consolidated to keep it in sync with the store. 2. Readers using consolidated metadata will regularly see a "past" version of the metadata, at the time they read the root node with its consolidated metadata. .. _Consolidated Metadata: https://github.com/zarr-developers/zarr-specs/pull/309 zarr-python-3.0.6/docs/user-guide/extending.rst000066400000000000000000000100601476711733500215500ustar00rootroot00000000000000 Extending Zarr ============== Zarr-Python 3 was designed to be extensible. This means that you can extend the library by writing custom classes and plugins. Currently, Zarr can be extended in the following ways: Custom codecs ------------- .. note:: This section explains how custom codecs can be created for Zarr format 3 arrays. For Zarr format 2, codecs should subclass the `numcodecs.abc.Codec `_ base class and register through `numcodecs.registry.register_codec `_. There are three types of codecs in Zarr: - array-to-array - array-to-bytes - bytes-to-bytes Array-to-array codecs are used to transform the array data before serializing to bytes. Examples include delta encoding or scaling codecs. Array-to-bytes codecs are used for serializing the array data to bytes. In Zarr, the main codec to use for numeric arrays is the :class:`zarr.codecs.BytesCodec`. Bytes-to-bytes codecs transform the serialized bytestreams of the array data. Examples include compression codecs, such as :class:`zarr.codecs.GzipCodec`, :class:`zarr.codecs.BloscCodec` or :class:`zarr.codecs.ZstdCodec`, and codecs that add a checksum to the bytestream, such as :class:`zarr.codecs.Crc32cCodec`. Custom codecs for Zarr are implemented by subclassing the relevant base class, see :class:`zarr.abc.codec.ArrayArrayCodec`, :class:`zarr.abc.codec.ArrayBytesCodec` and :class:`zarr.abc.codec.BytesBytesCodec`. Most custom codecs should implemented the ``_encode_single`` and ``_decode_single`` methods. These methods operate on single chunks of the array data. Alternatively, custom codecs can implement the ``encode`` and ``decode`` methods, which operate on batches of chunks, in case the codec is intended to implement its own batch processing. Custom codecs should also implement the following methods: - ``compute_encoded_size``, which returns the byte size of the encoded data given the byte size of the original data. It should raise ``NotImplementedError`` for codecs with variable-sized outputs, such as compression codecs. - ``validate`` (optional), which can be used to check that the codec metadata is compatible with the array metadata. It should raise errors if not. - ``resolve_metadata`` (optional), which is important for codecs that change the shape, dtype or fill value of a chunk. - ``evolve_from_array_spec`` (optional), which can be useful for automatically filling in codec configuration metadata from the array metadata. To use custom codecs in Zarr, they need to be registered using the `entrypoint mechanism `_. Commonly, entrypoints are declared in the ``pyproject.toml`` of your package under the ``[project.entry-points."zarr.codecs"]`` section. Zarr will automatically discover and load all codecs registered with the entrypoint mechanism from imported modules. .. code-block:: toml [project.entry-points."zarr.codecs"] "custompackage.fancy_codec" = "custompackage:FancyCodec" New codecs need to have their own unique identifier. To avoid naming collisions, it is strongly recommended to prefix the codec identifier with a unique name. For example, the codecs from ``numcodecs`` are prefixed with ``numcodecs.``, e.g. ``numcodecs.delta``. .. note:: Note that the extension mechanism for the Zarr format 3 is still under development. Requirements for custom codecs including the choice of codec identifiers might change in the future. It is also possible to register codecs as replacements for existing codecs. This might be useful for providing specialized implementations, such as GPU-based codecs. In case of multiple codecs, the :mod:`zarr.core.config` mechanism can be used to select the preferred implementation. Custom stores ------------- Coming soon. Custom array buffers -------------------- Coming soon. Other extensions ---------------- In the future, Zarr will support writing custom custom data types and chunk grids. zarr-python-3.0.6/docs/user-guide/gpu.rst000066400000000000000000000023031476711733500203570ustar00rootroot00000000000000.. _user-guide-gpu: Using GPUs with Zarr ==================== Zarr can use GPUs to accelerate your workload by running :meth:`zarr.config.enable_gpu`. .. note:: `zarr-python` currently supports reading the ndarray data into device (GPU) memory as the final stage of the codec pipeline. Data will still be read into or copied to host (CPU) memory for encoding and decoding. In the future, codecs will be available compressing and decompressing data on the GPU, avoiding the need to move data between the host and device for compression and decompression. Reading data into device memory ------------------------------- :meth:`zarr.config.enable_gpu` configures Zarr to use GPU memory for the data buffers used internally by Zarr. .. code-block:: python >>> import zarr >>> import cupy as cp # doctest: +SKIP >>> zarr.config.enable_gpu() # doctest: +SKIP >>> store = zarr.storage.MemoryStore() # doctest: +SKIP >>> z = zarr.create_array( # doctest: +SKIP ... store=store, shape=(100, 100), chunks=(10, 10), dtype="float32", ... ) >>> type(z[:10, :10]) # doctest: +SKIP cupy.ndarray Note that the output type is a ``cupy.ndarray`` rather than a NumPy array. zarr-python-3.0.6/docs/user-guide/groups.rst000066400000000000000000000136061476711733500211130ustar00rootroot00000000000000.. only:: doctest >>> import shutil >>> shutil.rmtree('data', ignore_errors=True) .. _user-guide-groups: Working with groups =================== Zarr supports hierarchical organization of arrays via groups. As with arrays, groups can be stored in memory, on disk, or via other storage systems that support a similar interface. To create a group, use the :func:`zarr.group` function:: >>> import zarr >>> store = zarr.storage.MemoryStore() >>> root = zarr.create_group(store=store) >>> root Groups have a similar API to the Group class from `h5py `_. For example, groups can contain other groups:: >>> foo = root.create_group('foo') >>> bar = foo.create_group('bar') Groups can also contain arrays, e.g.:: >>> z1 = bar.create_array(name='baz', shape=(10000, 10000), chunks=(1000, 1000), dtype='int32') >>> z1 Members of a group can be accessed via the suffix notation, e.g.:: >>> root['foo'] The '/' character can be used to access multiple levels of the hierarchy in one call, e.g.:: >>> root['foo/bar'] >>> root['foo/bar/baz'] The :func:`zarr.Group.tree` method can be used to print a tree representation of the hierarchy, e.g.:: >>> root.tree() / └── foo └── bar └── baz (10000, 10000) int32 The :func:`zarr.open_group` function provides a convenient way to create or re-open a group stored in a directory on the file-system, with sub-groups stored in sub-directories, e.g.:: >>> root = zarr.open_group('data/group.zarr', mode='w') >>> root >>> >>> z = root.create_array(name='foo/bar/baz', shape=(10000, 10000), chunks=(1000, 1000), dtype='int32') >>> z .. TODO: uncomment after __enter__ and __exit__ are implemented .. Groups can be used as context managers (in a ``with`` statement). .. If the underlying store has a ``close`` method, it will be called on exit. For more information on groups see the :class:`zarr.Group` API docs. .. _user-guide-diagnostics: Batch Group Creation -------------------- You can also create multiple groups concurrently with a single function call. :func:`zarr.create_hierarchy` takes a :class:`zarr.storage.Store` instance and a dict of ``key : metadata`` pairs, parses that dict, and writes metadata documents to storage: >>> from zarr import create_hierarchy >>> from zarr.core.group import GroupMetadata >>> from zarr.storage import LocalStore >>> node_spec = {'a/b/c': GroupMetadata()} >>> nodes_created = dict(create_hierarchy(store=LocalStore(root='data'), nodes=node_spec)) >>> print(sorted(nodes_created.items(), key=lambda kv: len(kv[0]))) [('', ), ('a', ), ('a/b', ), ('a/b/c', )] Note that we only specified a single group named ``a/b/c``, but 4 groups were created. These additional groups were created to ensure that the desired node ``a/b/c`` is connected to the root group ``''`` by a sequence of intermediate groups. :func:`zarr.create_hierarchy` normalizes the ``nodes`` keyword argument to ensure that the resulting hierarchy is complete, i.e. all groups or arrays are connected to the root of the hierarchy via intermediate groups. Because :func:`zarr.create_hierarchy` concurrently creates metadata documents, it's more efficient than repeated calls to :func:`create_group` or :func:`create_array`, provided you can statically define the metadata for the groups and arrays you want to create. Array and group diagnostics --------------------------- Diagnostic information about arrays and groups is available via the ``info`` property. E.g.:: >>> store = zarr.storage.MemoryStore() >>> root = zarr.group(store=store) >>> foo = root.create_group('foo') >>> bar = foo.create_array(name='bar', shape=1000000, chunks=100000, dtype='int64') >>> bar[:] = 42 >>> baz = foo.create_array(name='baz', shape=(1000, 1000), chunks=(100, 100), dtype='float32') >>> baz[:] = 4.2 >>> root.info Name : Type : Group Zarr format : 3 Read-only : False Store type : MemoryStore >>> foo.info Name : foo Type : Group Zarr format : 3 Read-only : False Store type : MemoryStore >>> bar.info_complete() Type : Array Zarr format : 3 Data type : DataType.int64 Shape : (1000000,) Chunk shape : (100000,) Order : C Read-only : False Store type : MemoryStore Filters : () Serializer : BytesCodec(endian=) Compressors : (ZstdCodec(level=0, checksum=False),) No. bytes : 8000000 (7.6M) No. bytes stored : 1614 Storage ratio : 4956.6 Chunks Initialized : 0 >>> baz.info Type : Array Zarr format : 3 Data type : DataType.float32 Shape : (1000, 1000) Chunk shape : (100, 100) Order : C Read-only : False Store type : MemoryStore Filters : () Serializer : BytesCodec(endian=) Compressors : (ZstdCodec(level=0, checksum=False),) No. bytes : 4000000 (3.8M) Groups also have the :func:`zarr.Group.tree` method, e.g.:: >>> root.tree() / └── foo ├── bar (1000000,) int64 └── baz (1000, 1000) float32 .. note:: :func:`zarr.Group.tree` requires the optional `rich `_ dependency. It can be installed with the ``[tree]`` extra. zarr-python-3.0.6/docs/user-guide/index.rst000066400000000000000000000005011476711733500206710ustar00rootroot00000000000000.. _user-guide: User guide ========== .. toctree:: :maxdepth: 1 installation arrays groups attributes storage config v3_migration Advanced Topics --------------- .. toctree:: :maxdepth: 1 performance consolidated_metadata extending gpu .. Coming soon async zarr-python-3.0.6/docs/user-guide/installation.rst000066400000000000000000000033631476711733500222740ustar00rootroot00000000000000Installation ============ Required dependencies --------------------- Required dependencies include: - `Python `_ (3.11 or later) - `packaging `_ (22.0 or later) - `numpy `_ (1.25 or later) - `numcodecs[crc32c] `_ (0.14 or later) - `typing_extensions `_ (4.9 or later) - `donfig `_ (0.8 or later) pip --- Zarr is available on `PyPI `_. Install it using ``pip``: .. code-block:: console $ pip install zarr There are a number of optional dependency groups you can install for extra functionality. These can be installed using ``pip install "zarr[]"``, e.g. ``pip install "zarr[gpu]"`` - ``gpu``: support for GPUs - ``remote``: support for reading/writing to remote data stores Additional optional dependencies include ``rich``, ``universal_pathlib``. These must be installed separately. conda ----- Zarr is also published to `conda-forge `_. Install it using ``conda``: .. code-block:: console $ conda install -c conda-forge zarr Conda does not support optional dependencies, so you will have to manually install any packages needed to enable extra functionality. Dependency support ------------------ Zarr has endorsed `Scientific-Python SPEC 0 `_ and now follows the version support window as outlined below: - Python: 36 months after initial release - Core package dependencies (e.g. NumPy): 24 months after initial release Development ----------- To install the latest development version of Zarr, see the :ref:`contributing guide `. zarr-python-3.0.6/docs/user-guide/performance.rst000066400000000000000000000247451476711733500221030ustar00rootroot00000000000000.. only:: doctest >>> import shutil >>> shutil.rmtree('data', ignore_errors=True) .. _user-guide-performance: Optimizing performance ====================== .. _user-guide-chunks: Chunk optimizations ------------------- .. _user-guide-chunks-shape: Chunk size and shape ~~~~~~~~~~~~~~~~~~~~ In general, chunks of at least 1 megabyte (1M) uncompressed size seem to provide better performance, at least when using the Blosc compression library. The optimal chunk shape will depend on how you want to access the data. E.g., for a 2-dimensional array, if you only ever take slices along the first dimension, then chunk across the second dimension. If you know you want to chunk across an entire dimension you can use the full size of that dimension within the ``chunks`` argument, e.g.:: >>> import zarr >>> z1 = zarr.create_array(store={}, shape=(10000, 10000), chunks=(100, 10000), dtype='int32') >>> z1.chunks (100, 10000) Alternatively, if you only ever take slices along the second dimension, then chunk across the first dimension, e.g.:: >>> z2 = zarr.create_array(store={}, shape=(10000, 10000), chunks=(10000, 100), dtype='int32') >>> z2.chunks (10000, 100) If you require reasonable performance for both access patterns then you need to find a compromise, e.g.:: >>> z3 = zarr.create_array(store={}, shape=(10000, 10000), chunks=(1000, 1000), dtype='int32') >>> z3.chunks (1000, 1000) If you are feeling lazy, you can let Zarr guess a chunk shape for your data by providing ``chunks='auto'``, although please note that the algorithm for guessing a chunk shape is based on simple heuristics and may be far from optimal. E.g.:: >>> z4 = zarr.create_array(store={}, shape=(10000, 10000), chunks='auto', dtype='int32') >>> z4.chunks (625, 625) If you know you are always going to be loading the entire array into memory, you can turn off chunks by providing ``chunks`` equal to ``shape``, in which case there will be one single chunk for the array:: >>> z5 = zarr.create_array(store={}, shape=(10000, 10000), chunks=(10000, 10000), dtype='int32') >>> z5.chunks (10000, 10000) Sharding ~~~~~~~~ If you have large arrays but need small chunks to efficiently access the data, you can use sharding. Sharding provides a mechanism to store multiple chunks in a single storage object or file. This can be useful because traditional file systems and object storage systems may have performance issues storing and accessing many files. Additionally, small files can be inefficient to store if they are smaller than the block size of the file system. Picking a good combination of chunk shape and shard shape is important for performance. The chunk shape determines what unit of your data can be read independently, while the shard shape determines what unit of your data can be written efficiently. For an example, consider you have a 100 GB array and need to read small chunks of 1 MB. Without sharding, each chunk would be one file resulting in 100,000 files. That can already cause performance issues on some file systems. With sharding, you could use a shard size of 1 GB. This would result in 1000 chunks per file and 100 files in total, which seems manageable for most storage systems. You would still be able to read each 1 MB chunk independently, but you would need to write your data in 1 GB increments. To use sharding, you need to specify the ``shards`` parameter when creating the array. >>> z6 = zarr.create_array(store={}, shape=(10000, 10000, 1000), shards=(1000, 1000, 1000), chunks=(100, 100, 100), dtype='uint8') >>> z6.info Type : Array Zarr format : 3 Data type : DataType.uint8 Shape : (10000, 10000, 1000) Shard shape : (1000, 1000, 1000) Chunk shape : (100, 100, 100) Order : C Read-only : False Store type : MemoryStore Filters : () Serializer : BytesCodec(endian=) Compressors : (ZstdCodec(level=0, checksum=False),) No. bytes : 100000000000 (93.1G) .. _user-guide-chunks-order: Chunk memory layout ~~~~~~~~~~~~~~~~~~~ The order of bytes **within each chunk** of an array can be changed via the ``order`` config option, to use either C or Fortran layout. For multi-dimensional arrays, these two layouts may provide different compression ratios, depending on the correlation structure within the data. E.g.:: >>> import numpy as np >>> >>> a = np.arange(100000000, dtype='int32').reshape(10000, 10000).T >>> c = zarr.create_array(store={}, shape=a.shape, chunks=(1000, 1000), dtype=a.dtype, config={'order': 'C'}) >>> c[:] = a >>> c.info_complete() Type : Array Zarr format : 3 Data type : DataType.int32 Shape : (10000, 10000) Chunk shape : (1000, 1000) Order : C Read-only : False Store type : MemoryStore Filters : () Serializer : BytesCodec(endian=) Compressors : (ZstdCodec(level=0, checksum=False),) No. bytes : 400000000 (381.5M) No. bytes stored : 342588911 Storage ratio : 1.2 Chunks Initialized : 100 >>> with zarr.config.set({'array.order': 'F'}): ... f = zarr.create_array(store={}, shape=a.shape, chunks=(1000, 1000), dtype=a.dtype) ... f[:] = a >>> f.info_complete() Type : Array Zarr format : 3 Data type : DataType.int32 Shape : (10000, 10000) Chunk shape : (1000, 1000) Order : F Read-only : False Store type : MemoryStore Filters : () Serializer : BytesCodec(endian=) Compressors : (ZstdCodec(level=0, checksum=False),) No. bytes : 400000000 (381.5M) No. bytes stored : 342588911 Storage ratio : 1.2 Chunks Initialized : 100 In the above example, Fortran order gives a better compression ratio. This is an artificial example but illustrates the general point that changing the order of bytes within chunks of an array may improve the compression ratio, depending on the structure of the data, the compression algorithm used, and which compression filters (e.g., byte-shuffle) have been applied. .. _user-guide-chunks-empty-chunks: Empty chunks ~~~~~~~~~~~~ It is possible to configure how Zarr handles the storage of chunks that are "empty" (i.e., every element in the chunk is equal to the array's fill value). When creating an array with ``write_empty_chunks=False``, Zarr will check whether a chunk is empty before compression and storage. If a chunk is empty, then Zarr does not store it, and instead deletes the chunk from storage if the chunk had been previously stored. This optimization prevents storing redundant objects and can speed up reads, but the cost is added computation during array writes, since the contents of each chunk must be compared to the fill value, and these advantages are contingent on the content of the array. If you know that your data will form chunks that are almost always non-empty, then there is no advantage to the optimization described above. In this case, creating an array with ``write_empty_chunks=True`` (the default) will instruct Zarr to write every chunk without checking for emptiness. The following example illustrates the effect of the ``write_empty_chunks`` flag on the time required to write an array with different values.:: >>> import zarr >>> import numpy as np >>> import time >>> >>> def timed_write(write_empty_chunks): ... """ ... Measure the time required and number of objects created when writing ... to a Zarr array with random ints or fill value. ... """ ... chunks = (8192,) ... shape = (chunks[0] * 1024,) ... data = np.random.randint(0, 255, shape) ... dtype = 'uint8' ... arr = zarr.create_array( ... f'data/example-{write_empty_chunks}.zarr', ... shape=shape, ... chunks=chunks, ... dtype=dtype, ... fill_value=0, ... config={'write_empty_chunks': write_empty_chunks} ... ) ... # initialize all chunks ... arr[:] = 100 ... result = [] ... for value in (data, arr.fill_value): ... start = time.time() ... arr[:] = value ... elapsed = time.time() - start ... result.append((elapsed, arr.nchunks_initialized)) ... return result ... # log results >>> for write_empty_chunks in (True, False): ... full, empty = timed_write(write_empty_chunks) ... print(f'\nwrite_empty_chunks={write_empty_chunks}:\n\tRandom Data: {full[0]:.4f}s, {full[1]} objects stored\n\t Empty Data: {empty[0]:.4f}s, {empty[1]} objects stored\n') write_empty_chunks=True: Random Data: ..., 1024 objects stored Empty Data: ...s, 1024 objects stored write_empty_chunks=False: Random Data: ...s, 1024 objects stored Empty Data: ...s, 0 objects stored In this example, writing random data is slightly slower with ``write_empty_chunks=True``, but writing empty data is substantially faster and generates far fewer objects in storage. .. _user-guide-rechunking: Changing chunk shapes (rechunking) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Coming soon. .. _user-guide-sync: Parallel computing and synchronization -------------------------------------- Coming soon. .. _user-guide-pickle: Pickle support -------------- Zarr arrays and groups can be pickled, as long as the underlying store object can be pickled. With the exception of the :class:`zarr.storage.MemoryStore`, any of the storage classes provided in the :mod:`zarr.storage` module can be pickled. If an array or group is backed by a persistent store such as the a :class:`zarr.storage.LocalStore`, :class:`zarr.storage.ZipStore` or :class:`zarr.storage.FsspecStore` then the store data **are not** pickled. The only thing that is pickled is the necessary parameters to allow the store to re-open any underlying files or databases upon being unpickled. E.g., pickle/unpickle an local store array:: >>> import pickle >>> data = np.arange(100000) >>> z1 = zarr.create_array(store='data/example-2.zarr', shape=data.shape, chunks=data.shape, dtype=data.dtype) >>> z1[:] = data >>> s = pickle.dumps(z1) >>> z2 = pickle.loads(s) >>> z1 == z2 True >>> np.all(z1[:] == z2[:]) np.True_ .. _user-guide-tips-blosc: Configuring Blosc ----------------- Coming soon. zarr-python-3.0.6/docs/user-guide/storage.rst000066400000000000000000000077711476711733500212460ustar00rootroot00000000000000.. only:: doctest >>> import shutil >>> shutil.rmtree('data', ignore_errors=True) .. _user-guide-storage: Storage guide ============= Zarr-Python supports multiple storage backends, including: local file systems, Zip files, remote stores via fsspec_ (S3, HTTP, etc.), and in-memory stores. In Zarr-Python 3, stores must implement the abstract store API from :class:`zarr.abc.store.Store`. .. note:: Unlike Zarr-Python 2 where the store interface was built around a generic ``MutableMapping`` API, Zarr-Python 3 utilizes a custom store API that utilizes Python's AsyncIO library. Implicit Store Creation ----------------------- In most cases, it is not required to create a ``Store`` object explicitly. Passing a string to Zarr's top level API will result in the store being created automatically.: >>> import zarr >>> >>> # Implicitly create a writable LocalStore >>> zarr.create_group(store='data/foo/bar') >>> >>> # Implicitly create a read-only FsspecStore >>> zarr.open_group( ... store='s3://noaa-nwm-retro-v2-zarr-pds', ... mode='r', ... storage_options={'anon': True} ... ) > >>> >>> # Implicitly creates a MemoryStore >>> data = {} >>> zarr.create_group(store=data) Explicit Store Creation ----------------------- In some cases, it may be helpful to create a store instance directly. Zarr-Python offers four built-in store: :class:`zarr.storage.LocalStore`, :class:`zarr.storage.FsspecStore`, :class:`zarr.storage.ZipStore`, and :class:`zarr.storage.MemoryStore`. Local Store ~~~~~~~~~~~ The :class:`zarr.storage.LocalStore` stores data in a nested set of directories on a local filesystem.: >>> store = zarr.storage.LocalStore('data/foo/bar', read_only=True) >>> zarr.open_group(store=store, mode='r') Zip Store ~~~~~~~~~ The :class:`zarr.storage.ZipStore` stores the contents of a Zarr hierarchy in a single Zip file. The `Zip Store specification`_ is currently in draft form.: >>> store = zarr.storage.ZipStore('data.zip', mode='w') >>> zarr.create_array(store=store, shape=(2,), dtype='float64') Remote Store ~~~~~~~~~~~~ The :class:`zarr.storage.FsspecStore` stores the contents of a Zarr hierarchy in following the same logical layout as the ``LocalStore``, except the store is assumed to be on a remote storage system such as cloud object storage (e.g. AWS S3, Google Cloud Storage, Azure Blob Store). The :class:`zarr.storage.FsspecStore` is backed by `fsspec`_ and can support any backend that implements the `AbstractFileSystem `_ API. ``storage_options`` can be used to configure the fsspec backend.: >>> store = zarr.storage.FsspecStore.from_url( ... 's3://noaa-nwm-retro-v2-zarr-pds', ... read_only=True, ... storage_options={'anon': True} ... ) >>> zarr.open_group(store=store, mode='r') > Memory Store ~~~~~~~~~~~~ The :class:`zarr.storage.MemoryStore` a in-memory store that allows for serialization of Zarr data (metadata and chunks) to a dictionary.: >>> data = {} >>> store = zarr.storage.MemoryStore(data) >>> # TODO: replace with create_array after #2463 >>> zarr.create_array(store=store, shape=(2,), dtype='float64') .. _user-guide-custom-stores: Developing custom stores ------------------------ Zarr-Python :class:`zarr.abc.store.Store` API is meant to be extended. The Store Abstract Base Class includes all of the methods needed to be a fully operational store in Zarr Python. Zarr also provides a test harness for custom stores: :class:`zarr.testing.store.StoreTests`. .. _Zip Store Specification: https://github.com/zarr-developers/zarr-specs/pull/311 .. _fsspec: https://filesystem-spec.readthedocs.io zarr-python-3.0.6/docs/user-guide/v3_migration.rst000066400000000000000000000237361476711733500222020ustar00rootroot00000000000000.. _v3 migration guide: 3.0 Migration Guide =================== Zarr-Python 3 represents a major refactor of the Zarr-Python codebase. Some of the goals motivating this refactor included: * adding support for the Zarr format 3 specification (along with the Zarr format 2 specification) * cleaning up internal and user facing APIs * improving performance (particularly in high latency storage environments like cloud object stores) To accommodate this, Zarr-Python 3 introduces a number of changes to the API, including a number of significant breaking changes and deprecations. This page provides a guide explaining breaking changes and deprecations to help you migrate your code from version 2 to version 3. If we have missed anything, please open a `GitHub issue `_ so we can improve this guide. Compatibility target -------------------- The goals described above necessitated some breaking changes to the API (hence the major version update), but where possible we have maintained backwards compatibility in the most widely used parts of the API. This in the :class:`zarr.Array` and :class:`zarr.Group` classes and the "top-level API" (e.g. :func:`zarr.open_array` and :func:`zarr.open_group`). Getting ready for 3.0 --------------------- Before migrating to Zarr-Python 3, we suggest projects that depend on Zarr-Python take the following actions in order: 1. Pin the supported Zarr-Python version to ``zarr>=2,<3``. This is a best practice and will protect your users from any incompatibilities that may arise during the release of Zarr-Python 3. This pin can be removed after migrating to Zarr-Python 3. 2. Limit your imports from the Zarr-Python package. Most of the primary API ``zarr.*`` will be compatible in Zarr-Python 3. However, the following breaking API changes are planned: - ``numcodecs.*`` will no longer be available in ``zarr.*``. To migrate, import codecs directly from ``numcodecs``: .. code-block:: python from numcodecs import Blosc # instead of: # from zarr import Blosc - The ``zarr.v3_api_available`` feature flag is being removed. In Zarr-Python 3 the v3 API is always available, so you shouldn't need to use this flag. - The following internal modules are being removed or significantly changed. If your application relies on imports from any of the below modules, you will need to either a) modify your application to no longer rely on these imports or b) vendor the parts of the specific modules that you need. * ``zarr.attrs`` has gone, with no replacement * ``zarr.codecs`` has gone, use ``numcodecs`` instead * ``zarr.context`` has gone, with no replacement * ``zarr.core`` remains but should be considered private API * ``zarr.hierarchy`` has gone, with no replacement (use ``zarr.Group`` inplace of ``zarr.hierarchy.Group``) * ``zarr.indexing`` has gone, with no replacement * ``zarr.meta`` has gone, with no replacement * ``zarr.meta_v1`` has gone, with no replacement * ``zarr.sync`` has gone, with no replacement * ``zarr.types`` has gone, with no replacement * ``zarr.util`` has gone, with no replacement * ``zarr.n5`` has gone, see below for an alternative N5 options 3. Test that your package works with version 3. 4. Update the pin to include ``zarr>=3,<4``. Zarr-Python 2 support window ---------------------------- Zarr-Python 2.x is still available, though we recommend migrating to Zarr-Python 3 for its performance improvements and new features. Security and bug fixes will be made to the 2.x series for at least six months following the first Zarr-Python 3 release. If you need to use the latest Zarr-Python 2 release, you can install it with: .. code-block:: console $ pip install "zarr==2.*" .. note:: Development and maintenance of the 2.x release series has moved to the `support/v2 `_ branch. Issues and pull requests related to this branch are tagged with the `V2 `_ label. Migrating to Zarr-Python 3 -------------------------- The following sections provide details on breaking changes in Zarr-Python 3. The Array class ~~~~~~~~~~~~~~~ 1. Disallow direct construction - the signature for initializing the ``Array`` class has changed significantly. Please use :func:`zarr.create_array` or :func:`zarr.open_array` instead of directly constructing the :class:`zarr.Array` class. 2. Defaulting to ``zarr_format=3`` - newly created arrays will use the version 3 of the Zarr specification. To continue using version 2, set ``zarr_format=2`` when creating arrays or set ``default_zarr_version=2`` in Zarr's :ref:`runtime configuration `. The Group class ~~~~~~~~~~~~~~~ 1. Disallow direct construction - use :func:`zarr.open_group` or :func:`zarr.create_group` instead of directly constructing the :class:`zarr.Group` class. 2. Most of the h5py compatibility methods are deprecated and will issue warnings if used. The following functions are drop in replacements that have the same signature and functionality: - Use :func:`zarr.Group.create_array` in place of :func:`zarr.Group.create_dataset` - Use :func:`zarr.Group.require_array` in place of :func:`zarr.Group.require_dataset` The Store class ~~~~~~~~~~~~~~~ The Store API has changed significant in Zarr-Python 3. The most notable changes to the Store API are: Store Import Paths ^^^^^^^^^^^^^^^^^^ Several store implementations have moved from the top-level module to ``zarr.storage``: .. code-block:: diff :caption: Store import changes from v2 to v3 # Before (v2) - from zarr import MemoryStore, DirectoryStore + from zarr.storage import MemoryStore, LocalStore # LocalStore replaces DirectoryStore Common replacements: +-------------------------+------------------------------------+ | v2 Import | v3 Import | +=========================+====================================+ | ``zarr.MemoryStore`` | ``zarr.storage.MemoryStore`` | +-------------------------+------------------------------------+ | ``zarr.DirectoryStore`` | ``zarr.storage.LocalStore`` | +-------------------------+------------------------------------+ | ``zarr.TempStore`` | Use ``tempfile.TemporaryDirectory``| | | with ``LocalStore`` | +-------------------------+------------------------------------+ 1. Replaced the ``MutableMapping`` base class in favor of a custom abstract base class (:class:`zarr.abc.store.Store`). 2. Switched to an asynchronous interface for all store methods that result in IO. This change ensures that all store methods are non-blocking and are as performant as possible. Beyond the changes store interface, a number of deprecated stores were also removed in Zarr-Python 3. See :issue:`1274` for more details on the removal of these stores. - ``N5Store`` - see https://github.com/zarr-developers/n5py for an alternative interface to N5 formatted data. - ``ABSStore`` - use the :class:`zarr.storage.FsspecStore` instead along with fsspec's `adlfs backend `_. The following stores have been removed altogether. Users who need these stores will have to implement their own version in zarr-python v3. - ``DBMStore`` - ``LMDBStore`` - ``SQLiteStore`` - ``MongoDBStore`` - ``RedisStore`` At present, the latter five stores in this list do not have an equivalent in Zarr-Python 3. If you are interested in developing a custom store that targets these backends, see :ref:`developing custom stores ` or open an `issue `_ to discuss your use case. Dependencies ~~~~~~~~~~~~ When installing using ``pip``: - The new ``remote`` dependency group can be used to install a supported version of ``fsspec``, required for remote data access. - The new ``gpu`` dependency group can be used to install a supported version of ``cuda``, required for GPU functionality. - The ``jupyter`` optional dependency group has been removed, since v3 contains no jupyter specific functionality. Miscellaneous ~~~~~~~~~~~~~ - The keyword argument ``zarr_version`` available in most creation functions in :mod:`zarr` (e.g. :func:`zarr.create`, :func:`zarr.open`, :func:`zarr.group`, :func:`zarr.array`) has been deprecated in favor of ``zarr_format``. 🚧 Work in Progress 🚧 ---------------------- Zarr-Python 3 is still under active development, and is not yet fully complete. The following list summarizes areas of the codebase that we expect to build out after the 3.0.0 release. If features listed below are important to your use case of Zarr-Python, please open (or comment on) a `GitHub issue `_. - The following functions / methods have not been ported to Zarr-Python 3 yet: * :func:`zarr.copy` (:issue:`2407`) * :func:`zarr.copy_all` (:issue:`2407`) * :func:`zarr.copy_store` (:issue:`2407`) * :func:`zarr.Group.move` (:issue:`2108`) - The following features (corresponding to function arguments to functions in :mod:`zarr`) have not been ported to Zarr-Python 3 yet. Using these features will raise a warning or a ``NotImplementedError``: * ``cache_attrs`` * ``cache_metadata`` * ``chunk_store`` (:issue:`2495`) * ``meta_array`` * ``object_codec`` (:issue:`2617`) * ``synchronizer`` (:issue:`1596`) * ``dimension_separator`` - The following features that were supported by Zarr-Python 2 have not been ported to Zarr-Python 3 yet: * Structured arrays / dtypes (:issue:`2134`) * Fixed-length string dtypes (:issue:`2347`) * Datetime and timedelta dtypes (:issue:`2616`) * Object dtypes (:issue:`2617`) * Ragged arrays (:issue:`2618`) * Groups and Arrays do not implement ``__enter__`` and ``__exit__`` protocols (:issue:`2619`) * Big Endian dtypes (:issue:`2324`) * Default filters for object dtypes for Zarr format 2 arrays (:issue:`2627`) zarr-python-3.0.6/notebooks/000077500000000000000000000000001476711733500160365ustar00rootroot00000000000000zarr-python-3.0.6/notebooks/advanced_indexing.ipynb000066400000000000000000002213001476711733500225310ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Advanced indexing" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'2.1.5.dev144'" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import sys\n", "sys.path.insert(0, '..')\n", "import zarr\n", "import numpy as np\n", "np.random.seed(42)\n", "import cProfile\n", "zarr.__version__" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Functionality and API" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Indexing a 1D array with a Boolean (mask) array\n", "\n", "Supported via ``get/set_mask_selection()`` and ``.vindex[]``. Also supported via ``get/set_orthogonal_selection()`` and ``.oindex[]``." ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "a = np.arange(10)\n", "za = zarr.array(a, chunks=2)\n", "ix = [False, True, False, True, False, True, False, True, False, True]" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([1, 3, 5, 7, 9])" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# get items\n", "za.vindex[ix]" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([1, 3, 5, 7, 9])" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# get items\n", "za.oindex[ix]" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 0, 10, 2, 30, 4, 50, 6, 70, 8, 90])" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# set items\n", "za.vindex[ix] = a[ix] * 10\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 0, 100, 2, 300, 4, 500, 6, 700, 8, 900])" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# set items\n", "za.oindex[ix] = a[ix] * 100\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([1, 3, 5, 7, 9])" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# if using .oindex, indexing array can be any array-like, e.g., Zarr array\n", "zix = zarr.array(ix, chunks=2)\n", "za = zarr.array(a, chunks=2)\n", "za.oindex[zix] # will not load all zix into memory" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Indexing a 1D array with a 1D integer (coordinate) array\n", "\n", "Supported via ``get/set_coordinate_selection()`` and ``.vindex[]``. Also supported via ``get/set_orthogonal_selection()`` and ``.oindex[]``." ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "a = np.arange(10)\n", "za = zarr.array(a, chunks=2)\n", "ix = [1, 3, 5, 7, 9]" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([1, 3, 5, 7, 9])" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# get items\n", "za.vindex[ix]" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([1, 3, 5, 7, 9])" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# get items\n", "za.oindex[ix]" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 0, 10, 2, 30, 4, 50, 6, 70, 8, 90])" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# set items\n", "za.vindex[ix] = a[ix] * 10\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 0, 100, 2, 300, 4, 500, 6, 700, 8, 900])" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# set items\n", "za.oindex[ix] = a[ix] * 100\n", "za[:]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Indexing a 1D array with a multi-dimensional integer (coordinate) array\n", "\n", "Supported via ``get/set_coordinate_selection()`` and ``.vindex[]``." ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "a = np.arange(10)\n", "za = zarr.array(a, chunks=2)\n", "ix = np.array([[1, 3, 5], [2, 4, 6]])" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[1, 3, 5],\n", " [2, 4, 6]])" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# get items\n", "za.vindex[ix]" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 0, 10, 20, 30, 40, 50, 60, 7, 8, 9])" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# set items\n", "za.vindex[ix] = a[ix] * 10\n", "za[:]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Slicing a 1D array with step > 1\n", "\n", "Slices with step > 1 are supported via ``get/set_basic_selection()``, ``get/set_orthogonal_selection()``, ``__getitem__`` and ``.oindex[]``. Negative steps are not supported." ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "a = np.arange(10)\n", "za = zarr.array(a, chunks=2)" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([1, 3, 5, 7, 9])" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# get items\n", "za[1::2]" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 0, 10, 2, 30, 4, 50, 6, 70, 8, 90])" ] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# set items\n", "za.oindex[1::2] = a[1::2] * 10\n", "za[:]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Orthogonal (outer) indexing of multi-dimensional arrays\n", "\n", "Orthogonal (a.k.a. outer) indexing is supported with either Boolean or integer arrays, in combination with integers and slices. This functionality is provided via the ``get/set_orthogonal_selection()`` methods. For convenience, this functionality is also available via the ``.oindex[]`` property." ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [ 3, 4, 5],\n", " [ 6, 7, 8],\n", " [ 9, 10, 11],\n", " [12, 13, 14]])" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "a = np.arange(15).reshape(5, 3)\n", "za = zarr.array(a, chunks=(3, 2))\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 3, 5],\n", " [ 9, 11]])" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# orthogonal indexing with Boolean arrays\n", "ix0 = [False, True, False, True, False]\n", "ix1 = [True, False, True]\n", "za.get_orthogonal_selection((ix0, ix1))" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 3, 5],\n", " [ 9, 11]])" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# alternative API\n", "za.oindex[ix0, ix1]" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 3, 5],\n", " [ 9, 11]])" ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# orthogonal indexing with integer arrays\n", "ix0 = [1, 3]\n", "ix1 = [0, 2]\n", "za.get_orthogonal_selection((ix0, ix1))" ] }, { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 3, 5],\n", " [ 9, 11]])" ] }, "execution_count": 23, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# alternative API\n", "za.oindex[ix0, ix1]" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 3, 4, 5],\n", " [ 9, 10, 11]])" ] }, "execution_count": 24, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# combine with slice\n", "za.oindex[[1, 3], :]" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 2],\n", " [ 3, 5],\n", " [ 6, 8],\n", " [ 9, 11],\n", " [12, 14]])" ] }, "execution_count": 25, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# combine with slice\n", "za.oindex[:, [0, 2]]" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [42, 4, 42],\n", " [ 6, 7, 8],\n", " [42, 10, 42],\n", " [12, 13, 14]])" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# set items via Boolean selection\n", "ix0 = [False, True, False, True, False]\n", "ix1 = [True, False, True]\n", "selection = ix0, ix1\n", "value = 42\n", "za.set_orthogonal_selection(selection, value)\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 27, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [44, 4, 44],\n", " [ 6, 7, 8],\n", " [44, 10, 44],\n", " [12, 13, 14]])" ] }, "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# alternative API\n", "za.oindex[ix0, ix1] = 44\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 28, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [46, 4, 46],\n", " [ 6, 7, 8],\n", " [46, 10, 46],\n", " [12, 13, 14]])" ] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# set items via integer selection\n", "ix0 = [1, 3]\n", "ix1 = [0, 2]\n", "selection = ix0, ix1\n", "value = 46\n", "za.set_orthogonal_selection(selection, value)\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 29, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [48, 4, 48],\n", " [ 6, 7, 8],\n", " [48, 10, 48],\n", " [12, 13, 14]])" ] }, "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# alternative API\n", "za.oindex[ix0, ix1] = 48\n", "za[:]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Coordinate indexing of multi-dimensional arrays\n", "\n", "Selecting arbitrary points from a multi-dimensional array by indexing with integer (coordinate) arrays is supported. This functionality is provided via the ``get/set_coordinate_selection()`` methods. For convenience, this functionality is also available via the ``.vindex[]`` property." ] }, { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [ 3, 4, 5],\n", " [ 6, 7, 8],\n", " [ 9, 10, 11],\n", " [12, 13, 14]])" ] }, "execution_count": 30, "metadata": {}, "output_type": "execute_result" } ], "source": [ "a = np.arange(15).reshape(5, 3)\n", "za = zarr.array(a, chunks=(3, 2))\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 31, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 3, 11])" ] }, "execution_count": 31, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# get items\n", "ix0 = [1, 3]\n", "ix1 = [0, 2]\n", "za.get_coordinate_selection((ix0, ix1))" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 3, 11])" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# alternative API\n", "za.vindex[ix0, ix1]" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [42, 4, 5],\n", " [ 6, 7, 8],\n", " [ 9, 10, 42],\n", " [12, 13, 14]])" ] }, "execution_count": 33, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# set items\n", "za.set_coordinate_selection((ix0, ix1), 42)\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [44, 4, 5],\n", " [ 6, 7, 8],\n", " [ 9, 10, 44],\n", " [12, 13, 14]])" ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# alternative API\n", "za.vindex[ix0, ix1] = 44\n", "za[:]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Mask indexing of multi-dimensional arrays\n", "\n", "Selecting arbitrary points from a multi-dimensional array by a Boolean array is supported. This functionality is provided via the ``get/set_mask_selection()`` methods. For convenience, this functionality is also available via the ``.vindex[]`` property." ] }, { "cell_type": "code", "execution_count": 35, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [ 3, 4, 5],\n", " [ 6, 7, 8],\n", " [ 9, 10, 11],\n", " [12, 13, 14]])" ] }, "execution_count": 35, "metadata": {}, "output_type": "execute_result" } ], "source": [ "a = np.arange(15).reshape(5, 3)\n", "za = zarr.array(a, chunks=(3, 2))\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 36, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 3, 11])" ] }, "execution_count": 36, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ix = np.zeros_like(a, dtype=bool)\n", "ix[1, 0] = True\n", "ix[3, 2] = True\n", "za.get_mask_selection(ix)" ] }, { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 3, 11])" ] }, "execution_count": 37, "metadata": {}, "output_type": "execute_result" } ], "source": [ "za.vindex[ix]" ] }, { "cell_type": "code", "execution_count": 38, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [42, 4, 5],\n", " [ 6, 7, 8],\n", " [ 9, 10, 42],\n", " [12, 13, 14]])" ] }, "execution_count": 38, "metadata": {}, "output_type": "execute_result" } ], "source": [ "za.set_mask_selection(ix, 42)\n", "za[:]" ] }, { "cell_type": "code", "execution_count": 39, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 1, 2],\n", " [44, 4, 5],\n", " [ 6, 7, 8],\n", " [ 9, 10, 44],\n", " [12, 13, 14]])" ] }, "execution_count": 39, "metadata": {}, "output_type": "execute_result" } ], "source": [ "za.vindex[ix] = 44\n", "za[:]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Selecting fields from arrays with a structured dtype\n", "\n", "All ``get/set_selection_...()`` methods support a ``fields`` argument which allows retrieving/replacing data for a specific field or fields. Also h5py-like API is supported where fields can be provided within ``__getitem__``, ``.oindex[]`` and ``.vindex[]``." ] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([(b'aaa', 1, 4.2), (b'bbb', 2, 8.4), (b'ccc', 3, 12.6)],\n", " dtype=[('foo', 'S3'), ('bar', '\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0ma\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'foo'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'baz'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;31mIndexError\u001b[0m: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices" ] } ], "source": [ "a['foo', 'baz']" ] }, { "cell_type": "code", "execution_count": 52, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([(b'aaa', 4.2), (b'bbb', 8.4), (b'ccc', 12.6)],\n", " dtype=[('foo', 'S3'), ('baz', '", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mza\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'foo'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'baz'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m__getitem__\u001b[0;34m(self, selection)\u001b[0m\n\u001b[1;32m 537\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 538\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mselection\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpop_fields\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 539\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_basic_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 540\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 541\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_basic_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mselection\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mEllipsis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36mget_basic_selection\u001b[0;34m(self, selection, out, fields)\u001b[0m\n\u001b[1;32m 661\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_basic_selection_zd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 662\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 663\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_basic_selection_nd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 664\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 665\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_get_basic_selection_zd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m_get_basic_selection_nd\u001b[0;34m(self, selection, out, fields)\u001b[0m\n\u001b[1;32m 701\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 702\u001b[0m \u001b[0;31m# setup indexer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 703\u001b[0;31m \u001b[0mindexer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBasicIndexer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 704\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 705\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindexer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mindexer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/indexing.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, selection, array)\u001b[0m\n\u001b[1;32m 275\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 276\u001b[0m raise IndexError('unsupported selection item for basic indexing; expected integer '\n\u001b[0;32m--> 277\u001b[0;31m 'or slice, got {!r}'.format(type(dim_sel)))\n\u001b[0m\u001b[1;32m 278\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 279\u001b[0m \u001b[0mdim_indexers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdim_indexer\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mIndexError\u001b[0m: unsupported selection item for basic indexing; expected integer or slice, got " ] } ], "source": [ "za[['foo', 'baz']]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1D Benchmarking" ] }, { "cell_type": "code", "execution_count": 53, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "800000000" ] }, "execution_count": 53, "metadata": {}, "output_type": "execute_result" } ], "source": [ "c = np.arange(100000000)\n", "c.nbytes" ] }, { "cell_type": "code", "execution_count": 54, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 480 ms, sys: 16 ms, total: 496 ms\n", "Wall time: 141 ms\n" ] }, { "data": { "text/html": [ "
Typezarr.core.Array
Data typeint64
Shape(100000000,)
Chunk shape(97657,)
OrderC
Read-onlyFalse
CompressorBlosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
Store typebuiltins.dict
No. bytes800000000 (762.9M)
No. bytes stored11854081 (11.3M)
Storage ratio67.5
Chunks initialized1024/1024
" ], "text/plain": [ "Type : zarr.core.Array\n", "Data type : int64\n", "Shape : (100000000,)\n", "Chunk shape : (97657,)\n", "Order : C\n", "Read-only : False\n", "Compressor : Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)\n", "Store type : builtins.dict\n", "No. bytes : 800000000 (762.9M)\n", "No. bytes stored : 11854081 (11.3M)\n", "Storage ratio : 67.5\n", "Chunks initialized : 1024/1024" ] }, "execution_count": 54, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%time zc = zarr.array(c)\n", "zc.info" ] }, { "cell_type": "code", "execution_count": 55, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "121 ms ± 1.49 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit c.copy()" ] }, { "cell_type": "code", "execution_count": 56, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "254 ms ± 942 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zc[:]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### bool dense selection" ] }, { "cell_type": "code", "execution_count": 57, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "9997476" ] }, "execution_count": 57, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# relatively dense selection - 10%\n", "ix_dense_bool = np.random.binomial(1, 0.1, size=c.shape[0]).astype(bool)\n", "np.count_nonzero(ix_dense_bool)" ] }, { "cell_type": "code", "execution_count": 58, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "243 ms ± 5.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit c[ix_dense_bool]" ] }, { "cell_type": "code", "execution_count": 59, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "433 ms ± 6.49 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zc.oindex[ix_dense_bool]" ] }, { "cell_type": "code", "execution_count": 60, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "548 ms ± 5.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zc.vindex[ix_dense_bool]" ] }, { "cell_type": "code", "execution_count": 61, "metadata": {}, "outputs": [], "source": [ "import tempfile\n", "import cProfile\n", "import pstats\n", "\n", "def profile(statement, sort='time', restrictions=(7,)):\n", " with tempfile.NamedTemporaryFile() as f:\n", " cProfile.run(statement, filename=f.name)\n", " pstats.Stats(f.name).sort_stats(sort).print_stats(*restrictions)\n" ] }, { "cell_type": "code", "execution_count": 62, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:17:48 2017 /tmp/tmpruua2rs_\n", "\n", " 98386 function calls in 0.483 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 83 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1025 0.197 0.000 0.197 0.000 {method 'nonzero' of 'numpy.ndarray' objects}\n", " 1024 0.149 0.000 0.159 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 1024 0.044 0.000 0.231 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 1024 0.009 0.000 0.009 0.000 {built-in method numpy.core.multiarray.count_nonzero}\n", " 1025 0.007 0.000 0.238 0.000 ../zarr/indexing.py:541(__iter__)\n", " 1024 0.006 0.000 0.207 0.000 /home/aliman/pyenv/zarr_20171023/lib/python3.6/site-packages/numpy/lib/index_tricks.py:26(ix_)\n", " 2048 0.005 0.000 0.005 0.000 ../zarr/core.py:337()\n", "\n", "\n" ] } ], "source": [ "profile('zc.oindex[ix_dense_bool]')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Method ``nonzero`` is being called internally within numpy to convert bool to int selections, no way to avoid." ] }, { "cell_type": "code", "execution_count": 63, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:18:06 2017 /tmp/tmp7_bautep\n", "\n", " 52382 function calls in 0.592 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 88 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 2 0.219 0.110 0.219 0.110 {method 'nonzero' of 'numpy.ndarray' objects}\n", " 1024 0.096 0.000 0.101 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 2 0.094 0.047 0.094 0.047 ../zarr/indexing.py:630()\n", " 1024 0.044 0.000 0.167 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 1 0.029 0.029 0.029 0.029 {built-in method numpy.core.multiarray.ravel_multi_index}\n", " 1 0.023 0.023 0.023 0.023 {built-in method numpy.core.multiarray.bincount}\n", " 1 0.021 0.021 0.181 0.181 ../zarr/indexing.py:603(__init__)\n", "\n", "\n" ] } ], "source": [ "profile('zc.vindex[ix_dense_bool]')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "``.vindex[]`` is a bit slower, possibly because internally it converts to a coordinate array first." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### int dense selection" ] }, { "cell_type": "code", "execution_count": 64, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "10000000" ] }, "execution_count": 64, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ix_dense_int = np.random.choice(c.shape[0], size=c.shape[0]//10, replace=True)\n", "ix_dense_int_sorted = ix_dense_int.copy()\n", "ix_dense_int_sorted.sort()\n", "len(ix_dense_int)" ] }, { "cell_type": "code", "execution_count": 65, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "62.2 ms ± 2.36 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit c[ix_dense_int_sorted]" ] }, { "cell_type": "code", "execution_count": 66, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "355 ms ± 3.53 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zc.oindex[ix_dense_int_sorted]" ] }, { "cell_type": "code", "execution_count": 67, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "351 ms ± 3.51 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zc.vindex[ix_dense_int_sorted]" ] }, { "cell_type": "code", "execution_count": 68, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "128 ms ± 137 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit c[ix_dense_int]" ] }, { "cell_type": "code", "execution_count": 69, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1.71 s ± 5.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zc.oindex[ix_dense_int]" ] }, { "cell_type": "code", "execution_count": 70, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1.68 s ± 3.87 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zc.vindex[ix_dense_int]" ] }, { "cell_type": "code", "execution_count": 71, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:19:09 2017 /tmp/tmpgmu5btr_\n", "\n", " 95338 function calls in 0.424 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 89 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1 0.141 0.141 0.184 0.184 ../zarr/indexing.py:369(__init__)\n", " 1024 0.099 0.000 0.106 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 1024 0.046 0.000 0.175 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 1025 0.027 0.000 0.027 0.000 ../zarr/indexing.py:424(__iter__)\n", " 1 0.023 0.023 0.023 0.023 {built-in method numpy.core.multiarray.bincount}\n", " 1 0.010 0.010 0.010 0.010 /home/aliman/pyenv/zarr_20171023/lib/python3.6/site-packages/numpy/lib/function_base.py:1848(diff)\n", " 1025 0.006 0.000 0.059 0.000 ../zarr/indexing.py:541(__iter__)\n", "\n", "\n" ] } ], "source": [ "profile('zc.oindex[ix_dense_int_sorted]')" ] }, { "cell_type": "code", "execution_count": 72, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:19:13 2017 /tmp/tmpay1gvnx8\n", "\n", " 52362 function calls in 0.398 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 85 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 2 0.107 0.054 0.107 0.054 ../zarr/indexing.py:630()\n", " 1024 0.091 0.000 0.096 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 1024 0.041 0.000 0.160 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 1 0.040 0.040 0.213 0.213 ../zarr/indexing.py:603(__init__)\n", " 1 0.029 0.029 0.029 0.029 {built-in method numpy.core.multiarray.ravel_multi_index}\n", " 1 0.023 0.023 0.023 0.023 {built-in method numpy.core.multiarray.bincount}\n", " 2048 0.011 0.000 0.011 0.000 ../zarr/indexing.py:695()\n", "\n", "\n" ] } ], "source": [ "profile('zc.vindex[ix_dense_int_sorted]')" ] }, { "cell_type": "code", "execution_count": 73, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:19:20 2017 /tmp/tmpngsf6zpp\n", "\n", " 120946 function calls in 1.793 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 92 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1 1.128 1.128 1.128 1.128 {method 'argsort' of 'numpy.ndarray' objects}\n", " 1024 0.139 0.000 0.285 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 1 0.132 0.132 1.422 1.422 ../zarr/indexing.py:369(__init__)\n", " 1 0.120 0.120 0.120 0.120 {method 'take' of 'numpy.ndarray' objects}\n", " 1024 0.116 0.000 0.123 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 1025 0.034 0.000 0.034 0.000 ../zarr/indexing.py:424(__iter__)\n", " 1 0.023 0.023 0.023 0.023 {built-in method numpy.core.multiarray.bincount}\n", "\n", "\n" ] } ], "source": [ "profile('zc.oindex[ix_dense_int]')" ] }, { "cell_type": "code", "execution_count": 74, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:19:22 2017 /tmp/tmpbskhj8de\n", "\n", " 50320 function calls in 1.730 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 86 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1 1.116 1.116 1.116 1.116 {method 'argsort' of 'numpy.ndarray' objects}\n", " 1024 0.133 0.000 0.275 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 2 0.121 0.060 0.121 0.060 ../zarr/indexing.py:654()\n", " 1024 0.113 0.000 0.119 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 2 0.100 0.050 0.100 0.050 ../zarr/indexing.py:630()\n", " 1 0.030 0.030 0.030 0.030 {built-in method numpy.core.multiarray.ravel_multi_index}\n", " 1 0.024 0.024 1.427 1.427 ../zarr/indexing.py:603(__init__)\n", "\n", "\n" ] } ], "source": [ "profile('zc.vindex[ix_dense_int]')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "When indices are not sorted, zarr needs to partially sort them so the occur in chunk order, so we only have to visit each chunk once. This sorting dominates the processing time and is unavoidable AFAIK." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### bool sparse selection" ] }, { "cell_type": "code", "execution_count": 75, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "9932" ] }, "execution_count": 75, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# relatively sparse selection\n", "ix_sparse_bool = np.random.binomial(1, 0.0001, size=c.shape[0]).astype(bool)\n", "np.count_nonzero(ix_sparse_bool)" ] }, { "cell_type": "code", "execution_count": 76, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "15.7 ms ± 38.5 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ] } ], "source": [ "%timeit c[ix_sparse_bool]" ] }, { "cell_type": "code", "execution_count": 77, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "156 ms ± 2.1 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit zc.oindex[ix_sparse_bool]" ] }, { "cell_type": "code", "execution_count": 78, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "133 ms ± 2.76 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit zc.vindex[ix_sparse_bool]" ] }, { "cell_type": "code", "execution_count": 79, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:20:09 2017 /tmp/tmpb7nqc9ax\n", "\n", " 98386 function calls in 0.191 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 83 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1024 0.093 0.000 0.098 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 1025 0.017 0.000 0.017 0.000 {method 'nonzero' of 'numpy.ndarray' objects}\n", " 1024 0.007 0.000 0.007 0.000 {built-in method numpy.core.multiarray.count_nonzero}\n", " 1024 0.007 0.000 0.129 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 1025 0.005 0.000 0.052 0.000 ../zarr/indexing.py:541(__iter__)\n", " 1024 0.005 0.000 0.025 0.000 /home/aliman/pyenv/zarr_20171023/lib/python3.6/site-packages/numpy/lib/index_tricks.py:26(ix_)\n", " 2048 0.004 0.000 0.004 0.000 ../zarr/core.py:337()\n", "\n", "\n" ] } ], "source": [ "profile('zc.oindex[ix_sparse_bool]')" ] }, { "cell_type": "code", "execution_count": 80, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:20:09 2017 /tmp/tmphsko8nvh\n", "\n", " 52382 function calls in 0.160 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 88 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1024 0.093 0.000 0.098 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 2 0.017 0.008 0.017 0.008 {method 'nonzero' of 'numpy.ndarray' objects}\n", " 1025 0.008 0.000 0.014 0.000 ../zarr/indexing.py:674(__iter__)\n", " 1024 0.006 0.000 0.127 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 2048 0.004 0.000 0.004 0.000 ../zarr/indexing.py:695()\n", " 2054 0.003 0.000 0.003 0.000 ../zarr/core.py:337()\n", " 1024 0.002 0.000 0.005 0.000 /home/aliman/pyenv/zarr_20171023/lib/python3.6/site-packages/numpy/core/arrayprint.py:381(wrapper)\n", "\n", "\n" ] } ], "source": [ "profile('zc.vindex[ix_sparse_bool]')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### int sparse selection" ] }, { "cell_type": "code", "execution_count": 81, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "10000" ] }, "execution_count": 81, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ix_sparse_int = np.random.choice(c.shape[0], size=c.shape[0]//10000, replace=True)\n", "ix_sparse_int_sorted = ix_sparse_int.copy()\n", "ix_sparse_int_sorted.sort()\n", "len(ix_sparse_int)" ] }, { "cell_type": "code", "execution_count": 82, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "18.9 µs ± 392 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n" ] } ], "source": [ "%timeit c[ix_sparse_int_sorted]" ] }, { "cell_type": "code", "execution_count": 83, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "20.3 µs ± 155 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n" ] } ], "source": [ "%timeit c[ix_sparse_int]" ] }, { "cell_type": "code", "execution_count": 84, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "125 ms ± 296 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit zc.oindex[ix_sparse_int_sorted]" ] }, { "cell_type": "code", "execution_count": 85, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "109 ms ± 428 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit zc.vindex[ix_sparse_int_sorted]" ] }, { "cell_type": "code", "execution_count": 86, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "132 ms ± 489 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit zc.oindex[ix_sparse_int]" ] }, { "cell_type": "code", "execution_count": 87, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "108 ms ± 579 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit zc.vindex[ix_sparse_int]" ] }, { "cell_type": "code", "execution_count": 88, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:21:12 2017 /tmp/tmp0b0o2quo\n", "\n", " 120946 function calls in 0.196 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 92 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1024 0.105 0.000 0.111 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 2048 0.006 0.000 0.013 0.000 /home/aliman/pyenv/zarr_20171023/lib/python3.6/site-packages/numpy/lib/index_tricks.py:26(ix_)\n", " 1025 0.006 0.000 0.051 0.000 ../zarr/indexing.py:541(__iter__)\n", " 1024 0.006 0.000 0.141 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 2048 0.005 0.000 0.005 0.000 ../zarr/core.py:337()\n", " 15373 0.004 0.000 0.010 0.000 {built-in method builtins.isinstance}\n", " 1025 0.004 0.000 0.005 0.000 ../zarr/indexing.py:424(__iter__)\n", "\n", "\n" ] } ], "source": [ "profile('zc.oindex[ix_sparse_int]')" ] }, { "cell_type": "code", "execution_count": 89, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:21:19 2017 /tmp/tmpdwju98kn\n", "\n", " 50320 function calls in 0.167 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 86 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1024 0.105 0.000 0.111 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 1025 0.009 0.000 0.017 0.000 ../zarr/indexing.py:674(__iter__)\n", " 1024 0.006 0.000 0.142 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 2048 0.005 0.000 0.005 0.000 ../zarr/indexing.py:695()\n", " 2054 0.004 0.000 0.004 0.000 ../zarr/core.py:337()\n", " 1 0.003 0.003 0.162 0.162 ../zarr/core.py:591(_get_selection)\n", " 1027 0.003 0.000 0.003 0.000 {method 'reshape' of 'numpy.ndarray' objects}\n", "\n", "\n" ] } ], "source": [ "profile('zc.vindex[ix_sparse_int]')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For sparse selections, processing time is dominated by decompression, so we can't do any better." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### sparse bool selection as zarr array" ] }, { "cell_type": "code", "execution_count": 90, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Typezarr.core.Array
Data typebool
Shape(100000000,)
Chunk shape(390625,)
OrderC
Read-onlyFalse
CompressorBlosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
Store typebuiltins.dict
No. bytes100000000 (95.4M)
No. bytes stored507131 (495.2K)
Storage ratio197.2
Chunks initialized256/256
" ], "text/plain": [ "Type : zarr.core.Array\n", "Data type : bool\n", "Shape : (100000000,)\n", "Chunk shape : (390625,)\n", "Order : C\n", "Read-only : False\n", "Compressor : Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)\n", "Store type : builtins.dict\n", "No. bytes : 100000000 (95.4M)\n", "No. bytes stored : 507131 (495.2K)\n", "Storage ratio : 197.2\n", "Chunks initialized : 256/256" ] }, "execution_count": 90, "metadata": {}, "output_type": "execute_result" } ], "source": [ "zix_sparse_bool = zarr.array(ix_sparse_bool)\n", "zix_sparse_bool.info" ] }, { "cell_type": "code", "execution_count": 91, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "387 ms ± 5.47 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zc.oindex[zix_sparse_bool]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### slice with step" ] }, { "cell_type": "code", "execution_count": 92, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "80.3 ms ± 377 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit np.array(c[::2])" ] }, { "cell_type": "code", "execution_count": 93, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "168 ms ± 837 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit zc[::2]" ] }, { "cell_type": "code", "execution_count": 94, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "136 ms ± 1.56 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit zc[::10]" ] }, { "cell_type": "code", "execution_count": 95, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "104 ms ± 1.86 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit zc[::100]" ] }, { "cell_type": "code", "execution_count": 96, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "100 ms ± 1.47 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit zc[::1000]" ] }, { "cell_type": "code", "execution_count": 97, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:22:44 2017 /tmp/tmpg9dxqcpg\n", "\n", " 49193 function calls in 0.211 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 55 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1024 0.104 0.000 0.110 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 1024 0.067 0.000 0.195 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 1025 0.005 0.000 0.013 0.000 ../zarr/indexing.py:278(__iter__)\n", " 2048 0.004 0.000 0.004 0.000 ../zarr/core.py:337()\n", " 2050 0.003 0.000 0.003 0.000 ../zarr/indexing.py:90(ceildiv)\n", " 1025 0.003 0.000 0.006 0.000 ../zarr/indexing.py:109(__iter__)\n", " 1024 0.003 0.000 0.003 0.000 {method 'reshape' of 'numpy.ndarray' objects}\n", "\n", "\n" ] } ], "source": [ "profile('zc[::2]')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2D Benchmarking" ] }, { "cell_type": "code", "execution_count": 99, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(100000000,)" ] }, "execution_count": 99, "metadata": {}, "output_type": "execute_result" } ], "source": [ "c.shape" ] }, { "cell_type": "code", "execution_count": 100, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(100000, 1000)" ] }, "execution_count": 100, "metadata": {}, "output_type": "execute_result" } ], "source": [ "d = c.reshape(-1, 1000)\n", "d.shape" ] }, { "cell_type": "code", "execution_count": 101, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Typezarr.core.Array
Data typeint64
Shape(100000, 1000)
Chunk shape(3125, 32)
OrderC
Read-onlyFalse
CompressorBlosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
Store typebuiltins.dict
No. bytes800000000 (762.9M)
No. bytes stored39228864 (37.4M)
Storage ratio20.4
Chunks initialized1024/1024
" ], "text/plain": [ "Type : zarr.core.Array\n", "Data type : int64\n", "Shape : (100000, 1000)\n", "Chunk shape : (3125, 32)\n", "Order : C\n", "Read-only : False\n", "Compressor : Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)\n", "Store type : builtins.dict\n", "No. bytes : 800000000 (762.9M)\n", "No. bytes stored : 39228864 (37.4M)\n", "Storage ratio : 20.4\n", "Chunks initialized : 1024/1024" ] }, "execution_count": 101, "metadata": {}, "output_type": "execute_result" } ], "source": [ "zd = zarr.array(d)\n", "zd.info" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### bool orthogonal selection" ] }, { "cell_type": "code", "execution_count": 102, "metadata": {}, "outputs": [], "source": [ "ix0 = np.random.binomial(1, 0.5, size=d.shape[0]).astype(bool)\n", "ix1 = np.random.binomial(1, 0.5, size=d.shape[1]).astype(bool)" ] }, { "cell_type": "code", "execution_count": 103, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "101 ms ± 577 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit d[np.ix_(ix0, ix1)]" ] }, { "cell_type": "code", "execution_count": 104, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "373 ms ± 5.45 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zd.oindex[ix0, ix1]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### int orthogonal selection" ] }, { "cell_type": "code", "execution_count": 105, "metadata": {}, "outputs": [], "source": [ "ix0 = np.random.choice(d.shape[0], size=int(d.shape[0] * .5), replace=True)\n", "ix1 = np.random.choice(d.shape[1], size=int(d.shape[1] * .5), replace=True)" ] }, { "cell_type": "code", "execution_count": 106, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "174 ms ± 4.13 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] } ], "source": [ "%timeit d[np.ix_(ix0, ix1)]" ] }, { "cell_type": "code", "execution_count": 107, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "566 ms ± 12.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zd.oindex[ix0, ix1]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### coordinate (point) selection" ] }, { "cell_type": "code", "execution_count": 108, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "10000000" ] }, "execution_count": 108, "metadata": {}, "output_type": "execute_result" } ], "source": [ "n = int(d.size * .1)\n", "ix0 = np.random.choice(d.shape[0], size=n, replace=True)\n", "ix1 = np.random.choice(d.shape[1], size=n, replace=True)\n", "n" ] }, { "cell_type": "code", "execution_count": 109, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "243 ms ± 3.37 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit d[ix0, ix1]" ] }, { "cell_type": "code", "execution_count": 110, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2.03 s ± 17 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit zd.vindex[ix0, ix1]" ] }, { "cell_type": "code", "execution_count": 111, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Wed Nov 8 17:24:31 2017 /tmp/tmp7c68z70p\n", "\n", " 62673 function calls in 2.065 seconds\n", "\n", " Ordered by: internal time\n", " List reduced from 88 to 7 due to restriction <7>\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1 1.112 1.112 1.112 1.112 {method 'argsort' of 'numpy.ndarray' objects}\n", " 3 0.244 0.081 0.244 0.081 ../zarr/indexing.py:654()\n", " 3 0.193 0.064 0.193 0.064 ../zarr/indexing.py:630()\n", " 1024 0.170 0.000 0.350 0.000 ../zarr/core.py:849(_chunk_getitem)\n", " 1024 0.142 0.000 0.151 0.000 ../zarr/core.py:1028(_decode_chunk)\n", " 1 0.044 0.044 0.044 0.044 {built-in method numpy.core.multiarray.ravel_multi_index}\n", " 1 0.043 0.043 1.676 1.676 ../zarr/indexing.py:603(__init__)\n", "\n", "\n" ] } ], "source": [ "profile('zd.vindex[ix0, ix1]')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Points need to be partially sorted so all points in the same chunk are grouped and processed together. This requires ``argsort`` which dominates time." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## h5py comparison\n", "\n", "N.B., not really fair because using slower compressor, but for interest..." ] }, { "cell_type": "code", "execution_count": 65, "metadata": {}, "outputs": [], "source": [ "import h5py\n", "import tempfile" ] }, { "cell_type": "code", "execution_count": 78, "metadata": {}, "outputs": [], "source": [ "h5f = h5py.File(tempfile.mktemp(), driver='core', backing_store=False)" ] }, { "cell_type": "code", "execution_count": 79, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 79, "metadata": {}, "output_type": "execute_result" } ], "source": [ "hc = h5f.create_dataset('c', data=c, compression='gzip', compression_opts=1, chunks=zc.chunks, shuffle=True)\n", "hc" ] }, { "cell_type": "code", "execution_count": 80, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 1.16 s, sys: 172 ms, total: 1.33 s\n", "Wall time: 1.32 s\n" ] }, { "data": { "text/plain": [ "array([ 0, 1, 2, ..., 99999997, 99999998, 99999999])" ] }, "execution_count": 80, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%time hc[:]" ] }, { "cell_type": "code", "execution_count": 81, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 1.11 s, sys: 0 ns, total: 1.11 s\n", "Wall time: 1.11 s\n" ] }, { "data": { "text/plain": [ "array([ 1063, 28396, 37229, ..., 99955875, 99979354, 99995791])" ] }, "execution_count": 81, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%time hc[ix_sparse_bool]" ] }, { "cell_type": "code", "execution_count": 82, "metadata": {}, "outputs": [], "source": [ "# # this is pathological, takes minutes \n", "# %time hc[ix_dense_bool]" ] }, { "cell_type": "code", "execution_count": 83, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 38.3 s, sys: 136 ms, total: 38.4 s\n", "Wall time: 38.1 s\n" ] }, { "data": { "text/plain": [ "array([ 0, 1000, 2000, ..., 99997000, 99998000, 99999000])" ] }, "execution_count": 83, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# this is pretty slow\n", "%time hc[::1000]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.1" } }, "nbformat": 4, "nbformat_minor": 2 } zarr-python-3.0.6/notebooks/blosc_microbench.ipynb000066400000000000000000000107211476711733500223750ustar00rootroot00000000000000{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "'2.0.1'" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import numpy as np\n", "import zarr\n", "zarr.__version__" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "10 loops, best of 3: 110 ms per loop\n", "1 loop, best of 3: 235 ms per loop\n", "Array((100000000,), int64, chunks=(200000,), order=C)\n", " nbytes: 762.9M; nbytes_stored: 11.2M; ratio: 67.8; initialized: 500/500\n", " compressor: Blosc(cname='lz4', clevel=5, shuffle=1)\n", " store: dict\n" ] } ], "source": [ "z = zarr.empty(shape=100000000, chunks=200000, dtype='i8')\n", "data = np.arange(100000000, dtype='i8')\n", "%timeit z[:] = data\n", "%timeit z[:]\n", "print(z)\n", "assert np.all(z[:] == data)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1 loop, best of 3: 331 ms per loop\n", "1 loop, best of 3: 246 ms per loop\n", "Array((100000000,), float64, chunks=(200000,), order=C)\n", " nbytes: 762.9M; nbytes_stored: 724.8M; ratio: 1.1; initialized: 500/500\n", " compressor: Blosc(cname='lz4', clevel=5, shuffle=1)\n", " store: dict\n" ] } ], "source": [ "z = zarr.empty(shape=100000000, chunks=200000, dtype='f8')\n", "data = np.random.normal(size=100000000)\n", "%timeit z[:] = data\n", "%timeit z[:]\n", "print(z)\n", "assert np.all(z[:] == data)" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "'2.0.2.dev0+dirty'" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import numpy as np\n", "import sys\n", "sys.path.insert(0, '..')\n", "import zarr\n", "zarr.__version__" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "10 loops, best of 3: 92.7 ms per loop\n", "1 loop, best of 3: 230 ms per loop\n", "Array((100000000,), int64, chunks=(200000,), order=C)\n", " nbytes: 762.9M; nbytes_stored: 11.2M; ratio: 67.8; initialized: 500/500\n", " compressor: Blosc(cname='lz4', clevel=5, shuffle=1)\n", " store: dict\n" ] } ], "source": [ "z = zarr.empty(shape=100000000, chunks=200000, dtype='i8')\n", "data = np.arange(100000000, dtype='i8')\n", "%timeit z[:] = data\n", "%timeit z[:]\n", "print(z)\n", "assert np.all(z[:] == data)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1 loop, best of 3: 338 ms per loop\n", "1 loop, best of 3: 253 ms per loop\n", "Array((100000000,), float64, chunks=(200000,), order=C)\n", " nbytes: 762.9M; nbytes_stored: 724.8M; ratio: 1.1; initialized: 500/500\n", " compressor: Blosc(cname='lz4', clevel=5, shuffle=1)\n", " store: dict\n" ] } ], "source": [ "z = zarr.empty(shape=100000000, chunks=200000, dtype='f8')\n", "data = np.random.normal(size=100000000)\n", "%timeit z[:] = data\n", "%timeit z[:]\n", "print(z)\n", "assert np.all(z[:] == data)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.1" } }, "nbformat": 4, "nbformat_minor": 1 } zarr-python-3.0.6/notebooks/dask_2d_subset.ipynb000066400000000000000000001124671476711733500220100ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "This notebook has some profiling of Dask used to make a selection along both first and second axes of a large-ish multidimensional array. The use case is making selections of genotype data, e.g., as required for making a web-browser for genotype data as in www.malariagen.net/apps/ag1000g." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "zarr 2.1.1\n", "dask 0.11.0\n" ] } ], "source": [ "import zarr; print('zarr', zarr.__version__)\n", "import dask; print('dask', dask.__version__)\n", "import dask.array as da\n", "import numpy as np" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Real data" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "Group(/, 8)\n", " arrays: 1; samples\n", " groups: 7; 2L, 2R, 3L, 3R, UNKN, X, Y_unplaced\n", " store: DirectoryStore" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# here's the real data\n", "callset = zarr.open_group('/kwiat/2/coluzzi/ag1000g/data/phase1/release/AR3.1/variation/main/zarr2/zstd/ag1000g.phase1.ar3',\n", " mode='r')\n", "callset" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "Array(/3R/calldata/genotype, (22632425, 765, 2), int8, chunks=(13107, 40, 2), order=C)\n", " nbytes: 32.2G; nbytes_stored: 1.0G; ratio: 31.8; initialized: 34540/34540\n", " compressor: Blosc(cname='zstd', clevel=1, shuffle=2)\n", " store: DirectoryStore" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# here's the array we're going to work with\n", "g = callset['3R/calldata/genotype']\n", "g" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 4 ms, sys: 0 ns, total: 4 ms\n", "Wall time: 5.13 ms\n" ] }, { "data": { "text/plain": [ "dask.array" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# wrap as dask array with very simple chunking of first dim only\n", "%time gd = da.from_array(g, chunks=(g.chunks[0], None, None))\n", "gd" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "((22632425,), dtype('bool'), 13167162)" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# load condition used to make selection on first axis\n", "dim0_condition = callset['3R/variants/FILTER_PASS'][:]\n", "dim0_condition.shape, dim0_condition.dtype, np.count_nonzero(dim0_condition)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# invent a random selection for second axis\n", "dim1_indices = sorted(np.random.choice(765, size=100, replace=False))" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 15.3 s, sys: 256 ms, total: 15.5 s\n", "Wall time: 15.5 s\n" ] }, { "data": { "text/plain": [ "dask.array" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# setup the 2D selection - this is the slow bit\n", "%time gd_sel = gd[dim0_condition][:, dim1_indices]\n", "gd_sel" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 1.21 s, sys: 152 ms, total: 1.36 s\n", "Wall time: 316 ms\n" ] }, { "data": { "text/plain": [ "array([[[0, 0],\n", " [0, 0],\n", " [0, 0],\n", " ..., \n", " [0, 0],\n", " [0, 0],\n", " [0, 0]],\n", "\n", " [[0, 0],\n", " [0, 0],\n", " [0, 0],\n", " ..., \n", " [0, 0],\n", " [0, 0],\n", " [0, 0]],\n", "\n", " [[0, 0],\n", " [0, 0],\n", " [0, 0],\n", " ..., \n", " [0, 0],\n", " [0, 0],\n", " [0, 0]],\n", "\n", " ..., \n", " [[0, 0],\n", " [0, 0],\n", " [0, 0],\n", " ..., \n", " [0, 1],\n", " [0, 0],\n", " [0, 0]],\n", "\n", " [[0, 0],\n", " [0, 0],\n", " [0, 0],\n", " ..., \n", " [0, 0],\n", " [0, 0],\n", " [0, 0]],\n", "\n", " [[0, 0],\n", " [0, 0],\n", " [0, 0],\n", " ..., \n", " [0, 0],\n", " [0, 0],\n", " [0, 0]]], dtype=int8)" ] }, "execution_count": 23, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# now load a slice from this new selection - quick!\n", "%time gd_sel[1000000:1100000].compute(optimize_graph=False)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " 105406881 function calls (79072145 primitive calls) in 26.182 seconds\n", "\n", " Ordered by: internal time\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", "13167268/6 6.807 0.000 9.038 1.506 slicing.py:623(check_index)\n", " 2 4.713 2.356 5.831 2.916 slicing.py:398(partition_by_size)\n", "13167270/2 4.470 0.000 8.763 4.382 slicing.py:540(posify_index)\n", " 52669338 4.118 0.000 4.119 0.000 {built-in method builtins.isinstance}\n", " 2 2.406 1.203 8.763 4.382 slicing.py:563()\n", " 1 0.875 0.875 0.875 0.875 slicing.py:44()\n", " 13182474 0.600 0.000 0.600 0.000 {built-in method builtins.len}\n", " 2 0.527 0.264 0.527 0.264 slicing.py:420(issorted)\n", " 13189168 0.520 0.000 0.520 0.000 {method 'append' of 'list' objects}\n", " 2 0.271 0.136 0.271 0.136 slicing.py:479()\n", " 2 0.220 0.110 0.220 0.110 {built-in method builtins.sorted}\n", " 1 0.162 0.162 0.162 0.162 {method 'tolist' of 'numpy.ndarray' objects}\n", " 2 0.113 0.056 26.071 13.035 core.py:1024(__getitem__)\n", " 2 0.112 0.056 6.435 3.217 slicing.py:441(take_sorted)\n", " 1 0.111 0.111 26.182 26.182 :1()\n", " 2 0.060 0.030 24.843 12.422 slicing.py:142(slice_with_newaxes)\n", " 106/3 0.039 0.000 1.077 0.359 slicing.py:15(sanitize_index)\n", " 3 0.037 0.012 0.037 0.012 {built-in method _hashlib.openssl_md5}\n", " 6726 0.012 0.000 0.017 0.000 slicing.py:567(insert_many)\n", " 3364 0.004 0.000 0.021 0.000 slicing.py:156()\n", " 20178 0.003 0.000 0.003 0.000 {method 'pop' of 'list' objects}\n", " 8 0.000 0.000 0.000 0.000 {method 'update' of 'dict' objects}\n", " 2 0.000 0.000 25.920 12.960 slicing.py:60(slice_array)\n", " 2 0.000 0.000 0.000 0.000 slicing.py:162()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:464()\n", " 106/4 0.000 0.000 0.037 0.009 utils.py:502(__call__)\n", " 100 0.000 0.000 0.000 0.000 arrayprint.py:340(array2string)\n", " 2 0.000 0.000 0.037 0.019 base.py:343(tokenize)\n", " 100 0.000 0.000 0.000 0.000 {built-in method builtins.repr}\n", " 2 0.000 0.000 24.763 12.381 slicing.py:170(slice_wrap_lists)\n", " 108 0.000 0.000 0.000 0.000 abc.py:178(__instancecheck__)\n", " 2 0.000 0.000 6.962 3.481 slicing.py:487(take)\n", " 1 0.000 0.000 26.182 26.182 {built-in method builtins.exec}\n", " 2 0.000 0.000 0.000 0.000 slicing.py:465()\n", " 1 0.000 0.000 0.037 0.037 base.py:314(normalize_array)\n", " 2/1 0.000 0.000 0.000 0.000 base.py:270(normalize_seq)\n", " 116 0.000 0.000 0.000 0.000 _weakrefset.py:70(__contains__)\n", " 100 0.000 0.000 0.000 0.000 numeric.py:1835(array_str)\n", " 1 0.000 0.000 0.000 0.000 slicing.py:47()\n", " 6 0.000 0.000 0.000 0.000 {built-in method builtins.sum}\n", " 2 0.000 0.000 0.000 0.000 exceptions.py:15(merge)\n", " 100 0.000 0.000 0.000 0.000 inspect.py:441(getmro)\n", " 2 0.000 0.000 0.000 0.000 slicing.py:475()\n", " 4 0.000 0.000 0.000 0.000 dicttoolz.py:19(merge)\n", " 4 0.000 0.000 0.000 0.000 functoolz.py:217(__call__)\n", " 2 0.000 0.000 0.000 0.000 core.py:1455(normalize_chunks)\n", " 4 0.000 0.000 0.000 0.000 dicttoolz.py:11(_get_factory)\n", " 2 0.000 0.000 0.000 0.000 slicing.py:467()\n", " 100 0.000 0.000 0.000 0.000 {method 'item' of 'numpy.ndarray' objects}\n", " 2 0.000 0.000 0.000 0.000 core.py:794(__init__)\n", " 8 0.000 0.000 0.000 0.000 {built-in method builtins.all}\n", " 8 0.000 0.000 0.000 0.000 slicing.py:197()\n", " 8 0.000 0.000 0.000 0.000 slicing.py:183()\n", " 5 0.000 0.000 0.000 0.000 core.py:1043()\n", " 7 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr}\n", " 5 0.000 0.000 0.000 0.000 slicing.py:125()\n", " 1 0.000 0.000 0.000 0.000 {method 'view' of 'numpy.ndarray' objects}\n", " 2 0.000 0.000 0.000 0.000 slicing.py:192()\n", " 3 0.000 0.000 0.000 0.000 {method 'hexdigest' of '_hashlib.HASH' objects}\n", " 2 0.000 0.000 0.000 0.000 slicing.py:606(replace_ellipsis)\n", " 2 0.000 0.000 0.000 0.000 slicing.py:613()\n", " 1 0.000 0.000 0.000 0.000 {method 'ravel' of 'numpy.ndarray' objects}\n", " 4 0.000 0.000 0.000 0.000 {method 'items' of 'dict' objects}\n", " 2 0.000 0.000 0.000 0.000 {method 'encode' of 'str' objects}\n", " 8 0.000 0.000 0.000 0.000 slicing.py:207()\n", " 2 0.000 0.000 0.000 0.000 core.py:826(_get_chunks)\n", " 2 0.000 0.000 0.000 0.000 core.py:1452()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:149()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:150()\n", " 1 0.000 0.000 0.000 0.000 functoolz.py:11(identity)\n", " 4 0.000 0.000 0.000 0.000 {method 'pop' of 'dict' objects}\n", " 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n", " 2 0.000 0.000 0.000 0.000 {method 'count' of 'tuple' objects}\n", "\n", "\n" ] } ], "source": [ "# what's taking so long?\n", "import cProfile\n", "cProfile.run('gd[dim0_condition][:, dim1_indices]', sort='time')" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " 105406881 function calls (79072145 primitive calls) in 25.630 seconds\n", "\n", " Ordered by: cumulative time\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1 0.000 0.000 25.630 25.630 {built-in method builtins.exec}\n", " 1 0.107 0.107 25.630 25.630 :1()\n", " 2 0.102 0.051 25.523 12.761 core.py:1024(__getitem__)\n", " 2 0.001 0.000 25.381 12.691 slicing.py:60(slice_array)\n", " 2 0.049 0.024 24.214 12.107 slicing.py:142(slice_with_newaxes)\n", " 2 0.000 0.000 24.147 12.073 slicing.py:170(slice_wrap_lists)\n", "13167268/6 6.664 0.000 8.855 1.476 slicing.py:623(check_index)\n", "13167270/2 4.354 0.000 8.466 4.233 slicing.py:540(posify_index)\n", " 2 2.277 1.139 8.465 4.233 slicing.py:563()\n", " 2 0.000 0.000 6.826 3.413 slicing.py:487(take)\n", " 2 0.111 0.056 6.331 3.165 slicing.py:441(take_sorted)\n", " 2 4.628 2.314 5.704 2.852 slicing.py:398(partition_by_size)\n", " 52669338 4.026 0.000 4.026 0.000 {built-in method builtins.isinstance}\n", " 106/3 0.071 0.001 1.167 0.389 slicing.py:15(sanitize_index)\n", " 1 0.943 0.943 0.943 0.943 slicing.py:44()\n", " 13182474 0.581 0.000 0.581 0.000 {built-in method builtins.len}\n", " 13189168 0.497 0.000 0.497 0.000 {method 'append' of 'list' objects}\n", " 2 0.495 0.248 0.495 0.248 slicing.py:420(issorted)\n", " 2 0.281 0.141 0.281 0.141 slicing.py:479()\n", " 2 0.234 0.117 0.234 0.117 {built-in method builtins.sorted}\n", " 1 0.152 0.152 0.152 0.152 {method 'tolist' of 'numpy.ndarray' objects}\n", " 2 0.000 0.000 0.039 0.020 base.py:343(tokenize)\n", " 106/4 0.000 0.000 0.039 0.010 utils.py:502(__call__)\n", " 1 0.000 0.000 0.039 0.039 base.py:314(normalize_array)\n", " 3 0.039 0.013 0.039 0.013 {built-in method _hashlib.openssl_md5}\n", " 3364 0.003 0.000 0.019 0.000 slicing.py:156()\n", " 6726 0.012 0.000 0.016 0.000 slicing.py:567(insert_many)\n", " 20178 0.003 0.000 0.003 0.000 {method 'pop' of 'list' objects}\n", " 4 0.000 0.000 0.000 0.000 dicttoolz.py:19(merge)\n", " 8 0.000 0.000 0.000 0.000 {method 'update' of 'dict' objects}\n", " 4 0.000 0.000 0.000 0.000 functoolz.py:217(__call__)\n", " 2 0.000 0.000 0.000 0.000 exceptions.py:15(merge)\n", " 2/1 0.000 0.000 0.000 0.000 base.py:270(normalize_seq)\n", " 2 0.000 0.000 0.000 0.000 slicing.py:162()\n", " 100 0.000 0.000 0.000 0.000 {built-in method builtins.repr}\n", " 1 0.000 0.000 0.000 0.000 slicing.py:47()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:464()\n", " 100 0.000 0.000 0.000 0.000 numeric.py:1835(array_str)\n", " 100 0.000 0.000 0.000 0.000 arrayprint.py:340(array2string)\n", " 108 0.000 0.000 0.000 0.000 abc.py:178(__instancecheck__)\n", " 2 0.000 0.000 0.000 0.000 slicing.py:465()\n", " 8 0.000 0.000 0.000 0.000 {built-in method builtins.all}\n", " 2 0.000 0.000 0.000 0.000 core.py:794(__init__)\n", " 116 0.000 0.000 0.000 0.000 _weakrefset.py:70(__contains__)\n", " 2 0.000 0.000 0.000 0.000 core.py:1455(normalize_chunks)\n", " 6 0.000 0.000 0.000 0.000 {built-in method builtins.sum}\n", " 8 0.000 0.000 0.000 0.000 slicing.py:183()\n", " 100 0.000 0.000 0.000 0.000 {method 'item' of 'numpy.ndarray' objects}\n", " 100 0.000 0.000 0.000 0.000 inspect.py:441(getmro)\n", " 2 0.000 0.000 0.000 0.000 {method 'encode' of 'str' objects}\n", " 2 0.000 0.000 0.000 0.000 slicing.py:606(replace_ellipsis)\n", " 2 0.000 0.000 0.000 0.000 slicing.py:475()\n", " 5 0.000 0.000 0.000 0.000 slicing.py:125()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:467()\n", " 3 0.000 0.000 0.000 0.000 {method 'hexdigest' of '_hashlib.HASH' objects}\n", " 1 0.000 0.000 0.000 0.000 {method 'view' of 'numpy.ndarray' objects}\n", " 2 0.000 0.000 0.000 0.000 slicing.py:192()\n", " 4 0.000 0.000 0.000 0.000 dicttoolz.py:11(_get_factory)\n", " 5 0.000 0.000 0.000 0.000 core.py:1043()\n", " 7 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr}\n", " 8 0.000 0.000 0.000 0.000 slicing.py:207()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:613()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:149()\n", " 1 0.000 0.000 0.000 0.000 {method 'ravel' of 'numpy.ndarray' objects}\n", " 8 0.000 0.000 0.000 0.000 slicing.py:197()\n", " 2 0.000 0.000 0.000 0.000 core.py:826(_get_chunks)\n", " 2 0.000 0.000 0.000 0.000 core.py:1452()\n", " 4 0.000 0.000 0.000 0.000 {method 'pop' of 'dict' objects}\n", " 4 0.000 0.000 0.000 0.000 {method 'items' of 'dict' objects}\n", " 2 0.000 0.000 0.000 0.000 slicing.py:150()\n", " 2 0.000 0.000 0.000 0.000 {method 'count' of 'tuple' objects}\n", " 1 0.000 0.000 0.000 0.000 functoolz.py:11(identity)\n", " 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n", "\n", "\n" ] } ], "source": [ "cProfile.run('gd[dim0_condition][:, dim1_indices]', sort='cumtime')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Synthetic data" ] }, { "cell_type": "code", "execution_count": 22, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "Array((20000000, 200, 2), int8, chunks=(10000, 100, 2), order=C)\n", " nbytes: 7.5G; nbytes_stored: 2.7G; ratio: 2.8; initialized: 4000/4000\n", " compressor: Blosc(cname='zstd', clevel=1, shuffle=2)\n", " store: dict" ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# create a synthetic dataset for profiling\n", "a = zarr.array(np.random.randint(-1, 4, size=(20000000, 200, 2), dtype='i1'),\n", " chunks=(10000, 100, 2), compressor=zarr.Blosc(cname='zstd', clevel=1, shuffle=2))\n", "a" ] }, { "cell_type": "code", "execution_count": 24, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# create a synthetic selection for first axis\n", "c = np.random.randint(0, 2, size=a.shape[0], dtype=bool)" ] }, { "cell_type": "code", "execution_count": 25, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# create a synthetic selection for second axis\n", "s = sorted(np.random.choice(a.shape[1], size=100, replace=False))" ] }, { "cell_type": "code", "execution_count": 26, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 208 ms, sys: 0 ns, total: 208 ms\n", "Wall time: 206 ms\n" ] }, { "data": { "text/plain": [ "dask.array" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%time d = da.from_array(a, chunks=(a.chunks[0], None, None))\n", "d" ] }, { "cell_type": "code", "execution_count": 27, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 12 s, sys: 200 ms, total: 12.2 s\n", "Wall time: 12.2 s\n" ] } ], "source": [ "%time ds = d[c][:, s]" ] }, { "cell_type": "code", "execution_count": 28, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " 80095589 function calls (60091843 primitive calls) in 19.467 seconds\n", "\n", " Ordered by: internal time\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", "10001773/6 4.872 0.000 6.456 1.076 slicing.py:623(check_index)\n", " 2 3.517 1.758 4.357 2.179 slicing.py:398(partition_by_size)\n", "10001775/2 3.354 0.000 6.484 3.242 slicing.py:540(posify_index)\n", " 40007358 2.965 0.000 2.965 0.000 {built-in method builtins.isinstance}\n", " 2 1.749 0.875 6.484 3.242 slicing.py:563()\n", " 1 0.878 0.878 0.878 0.878 slicing.py:44()\n", " 10019804 0.451 0.000 0.451 0.000 {built-in method builtins.len}\n", " 10027774 0.392 0.000 0.392 0.000 {method 'append' of 'list' objects}\n", " 2 0.363 0.181 0.363 0.181 slicing.py:420(issorted)\n", " 2 0.270 0.135 4.786 2.393 slicing.py:441(take_sorted)\n", " 1 0.207 0.207 0.207 0.207 {method 'tolist' of 'numpy.ndarray' objects}\n", " 2 0.158 0.079 0.158 0.079 {built-in method builtins.sorted}\n", " 1 0.094 0.094 19.467 19.467 :1()\n", " 2 0.079 0.040 19.373 9.686 core.py:1024(__getitem__)\n", " 2 0.035 0.017 18.147 9.074 slicing.py:142(slice_with_newaxes)\n", " 3 0.033 0.011 0.033 0.011 {built-in method _hashlib.openssl_md5}\n", " 106/3 0.028 0.000 1.112 0.371 slicing.py:15(sanitize_index)\n", " 8002 0.015 0.000 0.020 0.000 slicing.py:567(insert_many)\n", " 4002 0.004 0.000 0.023 0.000 slicing.py:156()\n", " 24006 0.003 0.000 0.003 0.000 {method 'pop' of 'list' objects}\n", " 8 0.001 0.000 0.001 0.000 {method 'update' of 'dict' objects}\n", " 2 0.001 0.000 0.001 0.000 slicing.py:479()\n", " 2 0.000 0.000 19.259 9.630 slicing.py:60(slice_array)\n", " 2 0.000 0.000 0.000 0.000 slicing.py:162()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:464()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:465()\n", " 106/4 0.000 0.000 0.034 0.008 utils.py:502(__call__)\n", " 2 0.000 0.000 18.089 9.044 slicing.py:170(slice_wrap_lists)\n", " 100 0.000 0.000 0.000 0.000 arrayprint.py:340(array2string)\n", " 100 0.000 0.000 0.000 0.000 {built-in method builtins.repr}\n", " 108 0.000 0.000 0.000 0.000 abc.py:178(__instancecheck__)\n", " 2 0.000 0.000 5.149 2.574 slicing.py:487(take)\n", " 2 0.000 0.000 0.034 0.017 base.py:343(tokenize)\n", " 1 0.000 0.000 0.033 0.033 base.py:314(normalize_array)\n", " 116 0.000 0.000 0.000 0.000 _weakrefset.py:70(__contains__)\n", " 2/1 0.000 0.000 0.000 0.000 base.py:270(normalize_seq)\n", " 6 0.000 0.000 0.000 0.000 {built-in method builtins.sum}\n", " 100 0.000 0.000 0.000 0.000 numeric.py:1835(array_str)\n", " 1 0.000 0.000 0.000 0.000 slicing.py:47()\n", " 1 0.000 0.000 19.467 19.467 {built-in method builtins.exec}\n", " 100 0.000 0.000 0.000 0.000 inspect.py:441(getmro)\n", " 8 0.000 0.000 0.000 0.000 {built-in method builtins.all}\n", " 4 0.000 0.000 0.001 0.000 dicttoolz.py:19(merge)\n", " 2 0.000 0.000 0.000 0.000 core.py:1455(normalize_chunks)\n", " 100 0.000 0.000 0.000 0.000 {method 'item' of 'numpy.ndarray' objects}\n", " 2 0.000 0.000 0.000 0.000 slicing.py:475()\n", " 2 0.000 0.000 0.000 0.000 core.py:794(__init__)\n", " 2 0.000 0.000 0.000 0.000 slicing.py:467()\n", " 3 0.000 0.000 0.000 0.000 {method 'hexdigest' of '_hashlib.HASH' objects}\n", " 2 0.000 0.000 0.001 0.000 exceptions.py:15(merge)\n", " 7 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr}\n", " 2 0.000 0.000 0.000 0.000 slicing.py:606(replace_ellipsis)\n", " 4 0.000 0.000 0.001 0.000 functoolz.py:217(__call__)\n", " 8 0.000 0.000 0.000 0.000 slicing.py:183()\n", " 4 0.000 0.000 0.000 0.000 dicttoolz.py:11(_get_factory)\n", " 5 0.000 0.000 0.000 0.000 core.py:1043()\n", " 2 0.000 0.000 0.000 0.000 {method 'encode' of 'str' objects}\n", " 1 0.000 0.000 0.000 0.000 {method 'view' of 'numpy.ndarray' objects}\n", " 8 0.000 0.000 0.000 0.000 slicing.py:197()\n", " 5 0.000 0.000 0.000 0.000 slicing.py:125()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:192()\n", " 8 0.000 0.000 0.000 0.000 slicing.py:207()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:613()\n", " 2 0.000 0.000 0.000 0.000 {method 'count' of 'tuple' objects}\n", " 1 0.000 0.000 0.000 0.000 {method 'ravel' of 'numpy.ndarray' objects}\n", " 1 0.000 0.000 0.000 0.000 functoolz.py:11(identity)\n", " 4 0.000 0.000 0.000 0.000 {method 'pop' of 'dict' objects}\n", " 2 0.000 0.000 0.000 0.000 slicing.py:150()\n", " 2 0.000 0.000 0.000 0.000 core.py:826(_get_chunks)\n", " 2 0.000 0.000 0.000 0.000 core.py:1452()\n", " 2 0.000 0.000 0.000 0.000 slicing.py:149()\n", " 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n", " 4 0.000 0.000 0.000 0.000 {method 'items' of 'dict' objects}\n", "\n", "\n" ] } ], "source": [ "cProfile.run('d[c][:, s]', sort='time')" ] }, { "cell_type": "code", "execution_count": 29, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 452 ms, sys: 8 ms, total: 460 ms\n", "Wall time: 148 ms\n" ] }, { "data": { "text/plain": [ "array([[[ 2, -1],\n", " [ 2, 3],\n", " [ 3, 0],\n", " ..., \n", " [ 1, 3],\n", " [-1, -1],\n", " [ 1, 1]],\n", "\n", " [[ 1, -1],\n", " [ 2, 2],\n", " [-1, 2],\n", " ..., \n", " [ 2, -1],\n", " [ 1, 3],\n", " [-1, -1]],\n", "\n", " [[ 1, -1],\n", " [ 2, 0],\n", " [ 0, 3],\n", " ..., \n", " [ 2, 2],\n", " [ 3, 2],\n", " [ 0, 2]],\n", "\n", " ..., \n", " [[ 1, 2],\n", " [ 3, -1],\n", " [ 2, 1],\n", " ..., \n", " [ 1, 2],\n", " [ 1, 0],\n", " [ 2, 0]],\n", "\n", " [[ 1, 2],\n", " [ 1, 0],\n", " [ 2, 3],\n", " ..., \n", " [-1, 2],\n", " [ 3, 3],\n", " [ 1, -1]],\n", "\n", " [[-1, 3],\n", " [ 2, 2],\n", " [ 1, 1],\n", " ..., \n", " [ 3, 3],\n", " [ 0, 0],\n", " [ 0, 2]]], dtype=int8)" ] }, "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%time ds[1000000:1100000].compute(optimize_graph=False)" ] }, { "cell_type": "code", "execution_count": 30, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " 80055494 function calls (60052157 primitive calls) in 19.425 seconds\n", "\n", " Ordered by: internal time\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", "10001670/3 5.032 0.000 6.671 2.224 slicing.py:623(check_index)\n", " 1 3.459 3.459 4.272 4.272 slicing.py:398(partition_by_size)\n", "10001671/1 3.287 0.000 6.378 6.378 slicing.py:540(posify_index)\n", " 40006704 2.999 0.000 2.999 0.000 {built-in method builtins.isinstance}\n", " 1 1.731 1.731 6.378 6.378 slicing.py:563()\n", " 1 0.849 0.849 0.849 0.849 slicing.py:44()\n", " 10011685 0.433 0.000 0.433 0.000 {built-in method builtins.len}\n", " 10015670 0.381 0.000 0.381 0.000 {method 'append' of 'list' objects}\n", " 1 0.355 0.355 0.355 0.355 slicing.py:420(issorted)\n", " 1 0.196 0.196 0.196 0.196 {method 'tolist' of 'numpy.ndarray' objects}\n", " 1 0.193 0.193 0.193 0.193 slicing.py:479()\n", " 1 0.157 0.157 0.157 0.157 {built-in method builtins.sorted}\n", " 1 0.085 0.085 4.707 4.707 slicing.py:441(take_sorted)\n", " 1 0.085 0.085 19.425 19.425 :1()\n", " 1 0.079 0.079 19.341 19.341 core.py:1024(__getitem__)\n", " 1 0.034 0.034 18.157 18.157 slicing.py:142(slice_with_newaxes)\n", " 2 0.033 0.017 0.033 0.017 {built-in method _hashlib.openssl_md5}\n", " 1 0.026 0.026 1.071 1.071 slicing.py:15(sanitize_index)\n", " 4001 0.007 0.000 0.009 0.000 slicing.py:567(insert_many)\n", " 2001 0.002 0.000 0.011 0.000 slicing.py:156()\n", " 12003 0.001 0.000 0.001 0.000 {method 'pop' of 'list' objects}\n", " 4 0.000 0.000 0.000 0.000 {method 'update' of 'dict' objects}\n", " 1 0.000 0.000 19.228 19.228 slicing.py:60(slice_array)\n", " 1 0.000 0.000 0.000 0.000 slicing.py:464()\n", " 1 0.000 0.000 0.000 0.000 slicing.py:162()\n", " 1 0.000 0.000 0.033 0.033 base.py:314(normalize_array)\n", " 1 0.000 0.000 18.111 18.111 slicing.py:170(slice_wrap_lists)\n", " 1 0.000 0.000 0.000 0.000 slicing.py:465()\n", " 1 0.000 0.000 5.062 5.062 slicing.py:487(take)\n", " 1 0.000 0.000 0.033 0.033 base.py:343(tokenize)\n", " 1 0.000 0.000 19.425 19.425 {built-in method builtins.exec}\n", " 2 0.000 0.000 0.000 0.000 functoolz.py:217(__call__)\n", " 3 0.000 0.000 0.000 0.000 {built-in method builtins.sum}\n", " 2 0.000 0.000 0.000 0.000 abc.py:178(__instancecheck__)\n", " 1 0.000 0.000 0.000 0.000 core.py:1455(normalize_chunks)\n", " 2 0.000 0.000 0.000 0.000 dicttoolz.py:19(merge)\n", " 4 0.000 0.000 0.000 0.000 _weakrefset.py:70(__contains__)\n", " 2 0.000 0.000 0.000 0.000 dicttoolz.py:11(_get_factory)\n", " 1 0.000 0.000 0.000 0.000 exceptions.py:15(merge)\n", " 1 0.000 0.000 0.000 0.000 core.py:794(__init__)\n", " 4 0.000 0.000 0.000 0.000 {built-in method builtins.all}\n", " 1 0.000 0.000 0.000 0.000 slicing.py:467()\n", " 1 0.000 0.000 0.000 0.000 {method 'view' of 'numpy.ndarray' objects}\n", " 4 0.000 0.000 0.000 0.000 slicing.py:183()\n", " 2 0.000 0.000 0.000 0.000 {method 'hexdigest' of '_hashlib.HASH' objects}\n", " 1 0.000 0.000 0.000 0.000 slicing.py:606(replace_ellipsis)\n", " 1 0.000 0.000 0.000 0.000 slicing.py:192()\n", " 4 0.000 0.000 0.000 0.000 slicing.py:207()\n", " 1 0.000 0.000 0.000 0.000 slicing.py:475()\n", " 2 0.000 0.000 0.033 0.017 utils.py:502(__call__)\n", " 2 0.000 0.000 0.000 0.000 slicing.py:125()\n", " 2 0.000 0.000 0.000 0.000 core.py:1043()\n", " 4 0.000 0.000 0.000 0.000 slicing.py:197()\n", " 1 0.000 0.000 0.000 0.000 core.py:826(_get_chunks)\n", " 2 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr}\n", " 1 0.000 0.000 0.000 0.000 {method 'ravel' of 'numpy.ndarray' objects}\n", " 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n", " 1 0.000 0.000 0.000 0.000 {method 'encode' of 'str' objects}\n", " 1 0.000 0.000 0.000 0.000 slicing.py:613()\n", " 1 0.000 0.000 0.000 0.000 core.py:1452()\n", " 1 0.000 0.000 0.000 0.000 slicing.py:149()\n", " 2 0.000 0.000 0.000 0.000 {method 'pop' of 'dict' objects}\n", " 2 0.000 0.000 0.000 0.000 {method 'items' of 'dict' objects}\n", " 1 0.000 0.000 0.000 0.000 slicing.py:150()\n", " 1 0.000 0.000 0.000 0.000 {method 'count' of 'tuple' objects}\n", "\n", "\n" ] } ], "source": [ "# problem is in fact just the dim0 selection\n", "cProfile.run('d[c]', sort='time')" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.2" } }, "nbformat": 4, "nbformat_minor": 1 } zarr-python-3.0.6/notebooks/dask_copy.ipynb000066400000000000000000014737751476711733500211050ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Profile array copy via dask threaded scheduler" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This notebook profiles a very simple array copy operation, using synthetic data." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "zarr 1.0.1.dev18+dirty\n" ] }, { "data": { "text/html": [ "\n", "
\n", " \n", " Loading BokehJS ...\n", "
" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/javascript": [ "\n", "(function(global) {\n", " function now() {\n", " return new Date();\n", " }\n", "\n", " if (typeof (window._bokeh_onload_callbacks) === \"undefined\") {\n", " window._bokeh_onload_callbacks = [];\n", " }\n", "\n", " function run_callbacks() {\n", " window._bokeh_onload_callbacks.forEach(function(callback) { callback() });\n", " delete window._bokeh_onload_callbacks\n", " console.info(\"Bokeh: all callbacks have finished\");\n", " }\n", "\n", " function load_libs(js_urls, callback) {\n", " window._bokeh_onload_callbacks.push(callback);\n", " if (window._bokeh_is_loading > 0) {\n", " console.log(\"Bokeh: BokehJS is being loaded, scheduling callback at\", now());\n", " return null;\n", " }\n", " if (js_urls == null || js_urls.length === 0) {\n", " run_callbacks();\n", " return null;\n", " }\n", " console.log(\"Bokeh: BokehJS not loaded, scheduling load and callback at\", now());\n", " window._bokeh_is_loading = js_urls.length;\n", " for (var i = 0; i < js_urls.length; i++) {\n", " var url = js_urls[i];\n", " var s = document.createElement('script');\n", " s.src = url;\n", " s.async = false;\n", " s.onreadystatechange = s.onload = function() {\n", " window._bokeh_is_loading--;\n", " if (window._bokeh_is_loading === 0) {\n", " console.log(\"Bokeh: all BokehJS libraries loaded\");\n", " run_callbacks()\n", " }\n", " };\n", " s.onerror = function() {\n", " console.warn(\"failed to load library \" + url);\n", " };\n", " console.log(\"Bokeh: injecting script tag for BokehJS library: \", url);\n", " document.getElementsByTagName(\"head\")[0].appendChild(s);\n", " }\n", " };\n", "\n", " var js_urls = ['https://cdn.pydata.org/bokeh/release/bokeh-0.12.0.min.js', 'https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.0.min.js', 'https://cdn.pydata.org/bokeh/release/bokeh-compiler-0.12.0.min.js'];\n", "\n", " var inline_js = [\n", " function(Bokeh) {\n", " Bokeh.set_log_level(\"info\");\n", " },\n", " \n", " function(Bokeh) {\n", " Bokeh.$(\"#d4821cb3-378c-411d-a941-d0708c0c532b\").text(\"BokehJS successfully loaded\");\n", " },\n", " function(Bokeh) {\n", " console.log(\"Bokeh: injecting CSS: https://cdn.pydata.org/bokeh/release/bokeh-0.12.0.min.css\");\n", " Bokeh.embed.inject_css(\"https://cdn.pydata.org/bokeh/release/bokeh-0.12.0.min.css\");\n", " console.log(\"Bokeh: injecting CSS: https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.0.min.css\");\n", " Bokeh.embed.inject_css(\"https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.0.min.css\");\n", " }\n", " ];\n", "\n", " function run_inline_js() {\n", " for (var i = 0; i < inline_js.length; i++) {\n", " inline_js[i](window.Bokeh);\n", " }\n", " }\n", "\n", " if (window._bokeh_is_loading === 0) {\n", " console.log(\"Bokeh: BokehJS loaded, going straight to plotting\");\n", " run_inline_js();\n", " } else {\n", " load_libs(js_urls, function() {\n", " console.log(\"Bokeh: BokehJS plotting callback run at\", now());\n", " run_inline_js();\n", " });\n", " }\n", "}(this));" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "import sys\n", "sys.path.insert(0, '..')\n", "import zarr\n", "print('zarr', zarr.__version__)\n", "from zarr import blosc\n", "import numpy as np\n", "import h5py\n", "import bcolz\n", "# don't let bcolz use multiple threads internally, we want to \n", "# see whether dask can make good use of multiple CPUs\n", "bcolz.set_nthreads(1)\n", "import multiprocessing\n", "import dask\n", "import dask.array as da\n", "from dask.diagnostics import Profiler, ResourceProfiler, CacheProfiler\n", "from dask.diagnostics.profile_visualize import visualize\n", "from cachey import nbytes\n", "import bokeh\n", "from bokeh.io import output_notebook\n", "output_notebook()" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": false }, "outputs": [], "source": [ "import tempfile\n", "import operator\n", "from functools import reduce\n", "from zarr.util import human_readable_size\n", "\n", "\n", "def h5fmem(**kwargs):\n", " \"\"\"Convenience function to create an in-memory HDF5 file.\"\"\"\n", "\n", " # need a file name even tho nothing is ever written\n", " fn = tempfile.mktemp()\n", "\n", " # file creation args\n", " kwargs['mode'] = 'w'\n", " kwargs['driver'] = 'core'\n", " kwargs['backing_store'] = False\n", "\n", " # open HDF5 file\n", " h5f = h5py.File(fn, **kwargs)\n", "\n", " return h5f\n", "\n", "\n", "def h5d_diagnostics(d):\n", " \"\"\"Print some diagnostics on an HDF5 dataset.\"\"\"\n", " \n", " print(d)\n", " nbytes = reduce(operator.mul, d.shape) * d.dtype.itemsize\n", " cbytes = d._id.get_storage_size()\n", " if cbytes > 0:\n", " ratio = nbytes / cbytes\n", " else:\n", " ratio = np.inf\n", " r = ' compression: %s' % d.compression\n", " r += '; compression_opts: %s' % d.compression_opts\n", " r += '; shuffle: %s' % d.shuffle\n", " r += '\\n nbytes: %s' % human_readable_size(nbytes)\n", " r += '; nbytes_stored: %s' % human_readable_size(cbytes)\n", " r += '; ratio: %.1f' % ratio\n", " r += '; chunks: %s' % str(d.chunks)\n", " print(r)\n", " " ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [], "source": [ "def profile_dask_copy(src, dst, chunks, num_workers=multiprocessing.cpu_count(), dt=0.1, lock=True):\n", " dsrc = da.from_array(src, chunks=chunks)\n", " with Profiler() as prof, ResourceProfiler(dt=dt) as rprof:\n", " da.store(dsrc, dst, num_workers=num_workers, lock=lock)\n", " visualize([prof, rprof], min_border_top=60, min_border_bottom=60)\n", " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## NumPy arrays" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "array([1314, 2727, 2905, ..., 1958, 1325, 1971], dtype=uint16)" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# a1 = np.arange(400000000, dtype='i4')\n", "a1 = np.random.normal(2000, 1000, size=200000000).astype('u2')\n", "a1" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "'381.5M'" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "human_readable_size(a1.nbytes)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": false }, "outputs": [], "source": [ "a2 = np.empty_like(a1)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": true }, "outputs": [], "source": [ "chunks = 2**20, # 4M" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 56 ms, sys: 36 ms, total: 92 ms\n", "Wall time: 91.7 ms\n" ] } ], "source": [ "%time a2[:] = a1" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
\n", "
\n", "
\n", "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "profile_dask_copy(a1, a2, chunks, lock=True, dt=.01)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
\n", "
\n", "
\n", "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "profile_dask_copy(a1, a2, chunks, lock=False, dt=.01)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Zarr arrays (in-memory)" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "zarr.core.Array((200000000,), uint16, chunks=(1048576,), order=C)\n", " compression: blosc; compression_opts: {'clevel': 1, 'cname': 'lz4', 'shuffle': 2}\n", " nbytes: 381.5M; nbytes_stored: 318.2M; ratio: 1.2; initialized: 191/191\n", " store: builtins.dict" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "z1 = zarr.array(a1, chunks=chunks, compression='blosc', \n", " compression_opts=dict(cname='lz4', clevel=1, shuffle=2))\n", "z1" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "zarr.core.Array((200000000,), uint16, chunks=(1048576,), order=C)\n", " compression: blosc; compression_opts: {'clevel': 1, 'cname': 'lz4', 'shuffle': 2}\n", " nbytes: 381.5M; nbytes_stored: 294; ratio: 1360544.2; initialized: 0/191\n", " store: builtins.dict" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "z2 = zarr.empty_like(z1)\n", "z2" ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
\n", "
\n", "
\n", "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "profile_dask_copy(z1, z2, chunks, lock=True, dt=.02)" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
\n", "
\n", "
\n", "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "profile_dask_copy(z1, z2, chunks, lock=False, dt=0.02)" ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "3 loops, best of 5: 251 ms per loop\n" ] } ], "source": [ "# for comparison, using blosc internal threads\n", "%timeit -n3 -r5 z2[:] = z1" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " " ] } ], "source": [ "%prun z2[:] = z1" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Without the dask lock, we get better CPU utilisation. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## HDF5 datasets (in-memory)" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "h5f = h5fmem()\n", "h5f" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", " compression: lzf; compression_opts: None; shuffle: True\n", " nbytes: 381.5M; nbytes_stored: 357.4M; ratio: 1.1; chunks: (1048576,)\n" ] } ], "source": [ "h1 = h5f.create_dataset('h1', data=a1, chunks=chunks, compression='lzf', shuffle=True)\n", "h5d_diagnostics(h1)" ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", " compression: lzf; compression_opts: None; shuffle: True\n", " nbytes: 762.9M; nbytes_stored: 0; ratio: inf; chunks: (1048576,)\n" ] } ], "source": [ "h2 = h5f.create_dataset('h2', shape=h1.shape, chunks=h1.chunks, \n", " compression=h1.compression, compression_opts=h1.compression_opts, \n", " shuffle=h1.shuffle)\n", "h5d_diagnostics(h2)" ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
\n", "
\n", "
\n", "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "profile_dask_copy(h1, h2, chunks, lock=True, dt=0.1)" ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
\n", "
\n", "
\n", "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "profile_dask_copy(h1, h2, chunks, lock=False, dt=0.1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Bcolz carrays (in-memory)" ] }, { "cell_type": "code", "execution_count": 27, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "carray((200000000,), uint16)\n", " nbytes := 381.47 MB; cbytes := 318.98 MB; ratio: 1.20\n", " cparams := cparams(clevel=1, shuffle=2, cname='lz4', quantize=0)\n", " chunklen := 1048576; chunksize: 2097152; blocksize: 16384\n", "[1314 2727 2905 ..., 1958 1325 1971]" ] }, "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ "c1 = bcolz.carray(a1, chunklen=chunks[0],\n", " cparams=bcolz.cparams(cname='lz4', clevel=1, shuffle=2))\n", "c1" ] }, { "cell_type": "code", "execution_count": 28, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "carray((200000000,), uint16)\n", " nbytes := 381.47 MB; cbytes := 2.00 MB; ratio: 190.73\n", " cparams := cparams(clevel=1, shuffle=2, cname='lz4', quantize=0)\n", " chunklen := 1048576; chunksize: 2097152; blocksize: 4096\n", "[0 0 0 ..., 0 0 0]" ] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" } ], "source": [ "c2 = bcolz.zeros(a1.shape, chunklen=chunks[0], dtype=a1.dtype, \n", " cparams=bcolz.cparams(cname='lz4', clevel=1, shuffle=2))\n", "c2" ] }, { "cell_type": "code", "execution_count": 29, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
\n", "
\n", "
\n", "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "profile_dask_copy(c1, c2, chunks, lock=True, dt=0.05)" ] }, { "cell_type": "code", "execution_count": 30, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
\n", "
\n", "
\n", "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "# not sure it's safe to use bcolz without a lock, but what the heck...\n", "profile_dask_copy(c1, c2, chunks, lock=False, dt=0.05)" ] }, { "cell_type": "code", "execution_count": 31, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "3 loops, best of 5: 649 ms per loop\n" ] } ], "source": [ "# for comparison\n", "%timeit -n3 -r5 c2[:] = c1" ] }, { "cell_type": "code", "execution_count": 32, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "3 loops, best of 5: 557 ms per loop\n" ] } ], "source": [ "# for comparison\n", "%timeit -n3 -r5 c1.copy()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.1" } }, "nbformat": 4, "nbformat_minor": 0 } zarr-python-3.0.6/notebooks/dask_count_alleles.ipynb000066400000000000000000126045021476711733500227460ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Profile allele count from genotype data via dask.array" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "zarr 1.0.1.dev18+dirty\n" ] }, { "data": { "text/html": [ "\n", "
\n", " \n", " Loading BokehJS ...\n", "
" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/javascript": [ "\n", "(function(global) {\n", " function now() {\n", " return new Date();\n", " }\n", "\n", " if (typeof (window._bokeh_onload_callbacks) === \"undefined\") {\n", " window._bokeh_onload_callbacks = [];\n", " }\n", "\n", " function run_callbacks() {\n", " window._bokeh_onload_callbacks.forEach(function(callback) { callback() });\n", " delete window._bokeh_onload_callbacks\n", " console.info(\"Bokeh: all callbacks have finished\");\n", " }\n", "\n", " function load_libs(js_urls, callback) {\n", " window._bokeh_onload_callbacks.push(callback);\n", " if (window._bokeh_is_loading > 0) {\n", " console.log(\"Bokeh: BokehJS is being loaded, scheduling callback at\", now());\n", " return null;\n", " }\n", " if (js_urls == null || js_urls.length === 0) {\n", " run_callbacks();\n", " return null;\n", " }\n", " console.log(\"Bokeh: BokehJS not loaded, scheduling load and callback at\", now());\n", " window._bokeh_is_loading = js_urls.length;\n", " for (var i = 0; i < js_urls.length; i++) {\n", " var url = js_urls[i];\n", " var s = document.createElement('script');\n", " s.src = url;\n", " s.async = false;\n", " s.onreadystatechange = s.onload = function() {\n", " window._bokeh_is_loading--;\n", " if (window._bokeh_is_loading === 0) {\n", " console.log(\"Bokeh: all BokehJS libraries loaded\");\n", " run_callbacks()\n", " }\n", " };\n", " s.onerror = function() {\n", " console.warn(\"failed to load library \" + url);\n", " };\n", " console.log(\"Bokeh: injecting script tag for BokehJS library: \", url);\n", " document.getElementsByTagName(\"head\")[0].appendChild(s);\n", " }\n", " };\n", "\n", " var js_urls = ['https://cdn.pydata.org/bokeh/release/bokeh-0.12.0.min.js', 'https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.0.min.js', 'https://cdn.pydata.org/bokeh/release/bokeh-compiler-0.12.0.min.js'];\n", "\n", " var inline_js = [\n", " function(Bokeh) {\n", " Bokeh.set_log_level(\"info\");\n", " },\n", " \n", " function(Bokeh) {\n", " Bokeh.$(\"#b153ad5f-436a-4afb-945c-87790add89c8\").text(\"BokehJS successfully loaded\");\n", " },\n", " function(Bokeh) {\n", " console.log(\"Bokeh: injecting CSS: https://cdn.pydata.org/bokeh/release/bokeh-0.12.0.min.css\");\n", " Bokeh.embed.inject_css(\"https://cdn.pydata.org/bokeh/release/bokeh-0.12.0.min.css\");\n", " console.log(\"Bokeh: injecting CSS: https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.0.min.css\");\n", " Bokeh.embed.inject_css(\"https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.0.min.css\");\n", " }\n", " ];\n", "\n", " function run_inline_js() {\n", " for (var i = 0; i < inline_js.length; i++) {\n", " inline_js[i](window.Bokeh);\n", " }\n", " }\n", "\n", " if (window._bokeh_is_loading === 0) {\n", " console.log(\"Bokeh: BokehJS loaded, going straight to plotting\");\n", " run_inline_js();\n", " } else {\n", " load_libs(js_urls, function() {\n", " console.log(\"Bokeh: BokehJS plotting callback run at\", now());\n", " run_inline_js();\n", " });\n", " }\n", "}(this));" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "import sys\n", "sys.path.insert(0, '..')\n", "import zarr\n", "print('zarr', zarr.__version__)\n", "from zarr import blosc\n", "import numpy as np\n", "import h5py\n", "import multiprocessing\n", "import dask\n", "import dask.array as da\n", "from dask.diagnostics import Profiler, ResourceProfiler, CacheProfiler\n", "from dask.diagnostics.profile_visualize import visualize\n", "from cachey import nbytes\n", "import bokeh\n", "from bokeh.io import output_notebook\n", "output_notebook()\n", "from functools import reduce\n", "import operator\n", "import allel" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "callset = h5py.File('/data/coluzzi/ag1000g/data/phase1/release/AR3/variation/main/hdf5/ag1000g.phase1.ar3.pass.h5',\n", " mode='r')\n", "genotype = callset['3R/calldata/genotype']\n", "genotype" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "zarr.core.Array((13167162, 765, 2), int8, chunks=(6553, 200, 2), order=C)\n", " compression: blosc; compression_opts: {'clevel': 1, 'cname': 'lz4', 'shuffle': 2}\n", " nbytes: 18.8G; nbytes_stored: 683.2M; ratio: 28.1; initialized: 8040/8040\n", " store: builtins.dict" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# copy into a zarr array\n", "# N.B., chunks in HDF5 are too small really, use something bigger\n", "chunks = (genotype.chunks[0], genotype.chunks[1] * 20, genotype.chunks[2])\n", "genotype_zarr = zarr.array(genotype, chunks=chunks, compression='blosc',\n", " compression_opts=dict(cname='lz4', clevel=1, shuffle=2))\n", "genotype_zarr" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We want to perform an allele count. Compare serial and parallel implementations, and compare working direct from HDF5 versus from Zarr." ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 1min 50s, sys: 512 ms, total: 1min 51s\n", "Wall time: 1min 50s\n" ] }, { "data": { "text/html": [ "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "
AlleleCountsChunkedArray((13167162, 4), int32, chunks=(65536, 4))
nbytes: 200.9M; cbytes: 38.3M; cratio: 5.2;
compression: blosc; compression_opts: cparams(clevel=5, shuffle=1, cname='lz4', quantize=0);
data: bcolz.carray_ext.carray
0123
01523500
11527100
21527100
31527100
41527100
\n", "

...

" ], "text/plain": [ "AlleleCountsChunkedArray((13167162, 4), int32, chunks=(65536, 4))\n", " nbytes: 200.9M; cbytes: 38.3M; cratio: 5.2;\n", " compression: blosc; compression_opts: cparams(clevel=5, shuffle=1, cname='lz4', quantize=0);\n", " data: bcolz.carray_ext.carray" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%%time\n", "# linear implementation from HDF5 on disk\n", "allel.GenotypeChunkedArray(genotype).count_alleles()" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 2min 27s, sys: 2.14 s, total: 2min 29s\n", "Wall time: 1min 23s\n" ] }, { "data": { "text/html": [ "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "
AlleleCountsChunkedArray((13167162, 4), int32, chunks=(65536, 4))
nbytes: 200.9M; cbytes: 38.3M; cratio: 5.2;
compression: blosc; compression_opts: cparams(clevel=5, shuffle=1, cname='lz4', quantize=0);
data: bcolz.carray_ext.carray
0123
01523500
11527100
21527100
31527100
41527100
\n", "

...

" ], "text/plain": [ "AlleleCountsChunkedArray((13167162, 4), int32, chunks=(65536, 4))\n", " nbytes: 200.9M; cbytes: 38.3M; cratio: 5.2;\n", " compression: blosc; compression_opts: cparams(clevel=5, shuffle=1, cname='lz4', quantize=0);\n", " data: bcolz.carray_ext.carray" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%%time\n", "# linear implementation from zarr in memory\n", "# (although blosc can use multiple threads internally)\n", "allel.GenotypeChunkedArray(genotype_zarr).count_alleles()" ] }, { "cell_type": "code", "execution_count": 21, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
\n", "
\n", "
\n", "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "# multi-threaded implementation from HDF5 on disk\n", "gd = allel.model.dask.GenotypeDaskArray.from_array(genotype, chunks=chunks)\n", "ac = gd.count_alleles(max_allele=3)\n", "with Profiler() as prof, ResourceProfiler(dt=1) as rprof:\n", " ac.compute(num_workers=8)\n", "visualize([prof, rprof], min_border_bottom=60, min_border_top=60);" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
\n", "
\n", "
\n", "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "# multi-threaded implementation from zarr in memory\n", "gdz = allel.model.dask.GenotypeDaskArray.from_array(genotype_zarr, chunks=chunks)\n", "acz = gdz.count_alleles(max_allele=3)\n", "with Profiler() as prof, ResourceProfiler(dt=1) as rprof:\n", " acz.compute(num_workers=8)\n", "visualize([prof, rprof], min_border_bottom=60, min_border_top=60);" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.1" } }, "nbformat": 4, "nbformat_minor": 0 } zarr-python-3.0.6/notebooks/genotype_benchmark_compressors.ipynb000066400000000000000000013037731476711733500254220ustar00rootroot00000000000000{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "zarr 1.1.1.dev7+dirty\n", "blosc ('1.10.0.dev', '$Date:: 2016-07-20 #$')\n" ] } ], "source": [ "import sys\n", "sys.path.insert(0, '..')\n", "import functools\n", "import timeit\n", "import zarr\n", "print('zarr', zarr.__version__)\n", "from zarr import blosc\n", "print('blosc', blosc.version())\n", "import numpy as np\n", "import h5py\n", "%matplotlib inline\n", "import matplotlib.pyplot as plt" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "callset = h5py.File('/data/coluzzi/ag1000g/data/phase1/release/AR3/variation/main/hdf5/ag1000g.phase1.ar3.pass.h5',\n", " mode='r')\n", "genotype = callset['3R/calldata/genotype']\n", "genotype" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": true }, "outputs": [], "source": [ "n_variants = 500000" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "(500000, 765, 2)" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "genotype_sample = genotype[1000000:1000000+n_variants, ...]\n", "genotype_sample.shape" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "765000000" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "nbytes = genotype_sample.nbytes\n", "nbytes" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "(685, 765, 2)" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# 1M chunks of first dimension\n", "chunks = (int(2**20 / (genotype_sample.shape[1] * genotype_sample.shape[2])), \n", " genotype_sample.shape[1], \n", " genotype_sample.shape[2])\n", "chunks" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "8" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "blosc.get_nthreads()" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "zarr.core.Array((500000, 765, 2), int8, chunks=(685, 765, 2), order=C)\n", " compression: blosc; compression_opts: {'cname': 'lz4', 'clevel': 1, 'shuffle': 2}\n", " nbytes: 729.6M; nbytes_stored: 23.0M; ratio: 31.7; initialized: 730/730\n", " store: builtins.dict" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "zarr.array(genotype_sample, chunks=chunks, compression_opts=dict(cname='lz4', clevel=1, shuffle=2))" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "zarr.core.Array((500000, 765, 2), int8, chunks=(685, 765, 2), order=C)\n", " compression: blosc; compression_opts: {'cname': 'zstd', 'clevel': 1, 'shuffle': 2}\n", " nbytes: 729.6M; nbytes_stored: 12.0M; ratio: 60.7; initialized: 730/730\n", " store: builtins.dict" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "zarr.array(genotype_sample, chunks=chunks, compression_opts=dict(cname='zstd', clevel=1, shuffle=2))" ] }, { "cell_type": "code", "execution_count": 38, "metadata": { "collapsed": false }, "outputs": [], "source": [ "compression_configs = (\n", " (None, None),\n", " ('zlib', 1),\n", " ('bz2', 1),\n", " ('lzma', dict(preset=1)),\n", " ('blosc', dict(cname='snappy', clevel=0, shuffle=0)),\n", " ('blosc', dict(cname='snappy', clevel=0, shuffle=2)),\n", " ('blosc', dict(cname='snappy', clevel=9, shuffle=0)),\n", " ('blosc', dict(cname='snappy', clevel=9, shuffle=2)),\n", " ('blosc', dict(cname='blosclz', clevel=1, shuffle=0)),\n", " ('blosc', dict(cname='blosclz', clevel=1, shuffle=2)),\n", " ('blosc', dict(cname='blosclz', clevel=5, shuffle=0)),\n", " ('blosc', dict(cname='blosclz', clevel=5, shuffle=2)),\n", " ('blosc', dict(cname='blosclz', clevel=9, shuffle=0)),\n", " ('blosc', dict(cname='blosclz', clevel=9, shuffle=2)),\n", " ('blosc', dict(cname='lz4', clevel=1, shuffle=0)),\n", " ('blosc', dict(cname='lz4', clevel=1, shuffle=2)),\n", " ('blosc', dict(cname='lz4', clevel=5, shuffle=0)),\n", " ('blosc', dict(cname='lz4', clevel=5, shuffle=2)),\n", " ('blosc', dict(cname='lz4', clevel=9, shuffle=0)),\n", " ('blosc', dict(cname='lz4', clevel=9, shuffle=2)),\n", " ('blosc', dict(cname='lz4hc', clevel=1, shuffle=0)),\n", " ('blosc', dict(cname='lz4hc', clevel=1, shuffle=2)),\n", " ('blosc', dict(cname='lz4hc', clevel=3, shuffle=0)),\n", " ('blosc', dict(cname='lz4hc', clevel=3, shuffle=2)),\n", " ('blosc', dict(cname='zstd', clevel=1, shuffle=0)),\n", " ('blosc', dict(cname='zstd', clevel=1, shuffle=2)),\n", " ('blosc', dict(cname='zstd', clevel=3, shuffle=0)),\n", " ('blosc', dict(cname='zstd', clevel=3, shuffle=2)),\n", " ('blosc', dict(cname='zstd', clevel=5, shuffle=0)),\n", " ('blosc', dict(cname='zstd', clevel=5, shuffle=2)),\n", " ('blosc', dict(cname='zlib', clevel=1, shuffle=0)),\n", " ('blosc', dict(cname='zlib', clevel=1, shuffle=2)),\n", " ('blosc', dict(cname='zlib', clevel=3, shuffle=0)),\n", " ('blosc', dict(cname='zlib', clevel=3, shuffle=2)),\n", " ('blosc', dict(cname='zlib', clevel=5, shuffle=0)),\n", " ('blosc', dict(cname='zlib', clevel=5, shuffle=2)),\n", ")" ] }, { "cell_type": "code", "execution_count": 39, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def log(*msg):\n", " print(*msg, file=sys.stdout)\n", " sys.stdout.flush()" ] }, { "cell_type": "code", "execution_count": 40, "metadata": { "collapsed": false }, "outputs": [], "source": [ "@functools.lru_cache(maxsize=None)\n", "def compression_ratios():\n", " x = list()\n", " for compression, compression_opts in compression_configs:\n", " z = zarr.array(genotype_sample, chunks=chunks, compression=compression, \n", " compression_opts=compression_opts)\n", " ratio = z.nbytes / z.nbytes_stored\n", " x.append(ratio)\n", " log(compression, compression_opts, ratio)\n", " return x\n" ] }, { "cell_type": "code", "execution_count": 43, "metadata": { "collapsed": false }, "outputs": [ { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1gAAAMWCAYAAADszSe0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzs3XmcFNW99/HPFwQJKLKYsCkDEjUuERk3FAKjqNEECdcg\ngrhcgxqveSIuSR6vG+CCmohPTKIm5iGokSTiDXpRQxSExmhERDYFxIVNUBTZrugjRub3/FGnsaan\nl+phZnpGfu/Xq19TXXWqzq9+XWKdPudUy8xwzjnnnHPOObfrmpQ6AOecc84555z7svAGlnPOOeec\nc87VEm9gOeecc84551wt8QaWc84555xzztUSb2A555xzzjnnXC3xBpZzzjnnnHPO1RJvYDnnnGv0\nJM2S9KtSx1EsSZWSzix1HM4552qP/HewnHOucZH0NeA64LvAfsAGYDHwGzObVsrYSkVSG+BfZvZx\nqWPJRtJEoL2ZDcpY/zVgs5n9qzSROeecq217lDoA55xzyUkqA/4JbAX+N1HDqglwMnAf0K1kweUg\nqVldNyDMbEtdHj+XXT03M/ugNuNxzjlXej5E0DnnGpf7gErgKDP7q5m9aWbLzewe4Ih0IUn7S3pM\n0v+E118ldYltHy3pVUnnS1opaZukCZKaSfqxpHckfSjpF/HKQ9nRkv4o6SNJ70m6OqNMpaTLQp3b\ngFvD+kMlPRnieV/SnyR1iO13uKQZkraGYy+Q1D9s20PSryStk/SppNWSxsX2rTJEUFIbSQ9K2iTp\nE0nTJR0a235BqOOkkIdtkmaGBmxO2c5NUhNJ/1fSilDXG5J+Gs81cAHw3bD/Dkn9Ysc7M1b28BDr\nJ5I2SpooqXW+mJxzzjUs3sByzrlGQlJb4NtEQwH/X+Z2M/ufUE7AVOCrQH+gAugMPJaxSzdgENFQ\nw38DhgJPAUcS9YiNBEZJ+l7GflcCS4BewI3AOEmDM8rcGI51OHCPpI7AbKIet6OBAUAr4L9j+/wJ\neDds7wmMAT4N20YB3wsxfh04G1hePUs7PQgcA5wR/n4C/F3SnrEyewLXAP8O9AbaAL/Nc8ys50b0\n/9K1wBDgG8C1wH9KujCUvxOYDMwAOgCdiHohq5DUEnga+J+Qg8HACcCEBDE555xrIHyIoHPONR5f\nBwS8XqDcyUQ3/weY2TsAks4B3pJ0kpnNDOWaAP9uZtuApZL+DvQDvmNmnwPLJb1A1BiKN4ReMrPb\nw/Jbko4FrgIej5X5i5n9If1G0lhgoZldG1v378BGSUeb2TygDPiFmb0ZiqyIHa8r8IaZvRDerwXm\nZDt5SQcSNay+lS4v6TxgDTACSMfVFLjMzN4KZe4kWWOmyrkFY2LLayQdBQwHJprZx5L+H9DSzDbk\nOe4IoCVwnpl9EmK6BJgl6QAzW5FnX+eccw2E92A551zjoYTlvgG8m25cAZjZSqLeoUNj5daExlXa\n+0SNmM8z1n0t4/gvZnl/aMa6VzLeHwX0D8PyPpL0EVGDx4AeocxdwARJz0q6VtLBsf0fAHqF4Xe/\nkfSd0FOXzTeAHcQaYKF379WMOLenG1fBu0Dz8MCMfDLPDUmXSnpZ0gfh3K4kahQW4xvA4nTjKvgn\n0ZDQzPw655xroLyB5ZxzjcebRA2SQ3bhGPFHx2Y+nMFyrKvJ/ysyn+bXBHiSaJ5Yz9jrwLAeMxtL\ndG6PEQ2NWxx6uTCzBUQ9XNcQNTQfBJ6pQVzx8/88x7ZC51vl3CSdDfwfop6xU4nO616geQ3iy8Uf\n+eucc42EN7Ccc66RMLPNRHN0/leYr1OFpH3C4jKgs6SusW0HEM3DWlILofTOeH98qDOf+cBhRL1m\nKzJeOxssZva2mf3GzAYSDde7KLbtYzObYmY/Ipo3NkDS17PUtYzo/2/Hp1eEB0V8k9o5/0x9gDlm\ndp+ZLQxD+TLj+oxoSGI+y4BvSmqVcWxROL/OOecaCG9gOedc4/IjohvueZKGSDpI0sGS/gNYBGBm\nM4iGw02SdJSko4GHgXlmlqqFGHpL+t+Svi7pYuBcouF9+dwD7ANMlnSspO6STpb0O0mtJLUIQ//6\nSyqTdBzQl9AgknSlpGGSvhEaVSOIHlW/NrOiMOxvKvA7SX0lfTOc/1bgzwXiTDoMM+4NoFzSaSEn\nNxDNZYtbBRwePq/2krI1tiYRPYzjofA0wX5ED934q8+/cs65xsMbWM4514iEuVTlwHTgdqJG1bNE\nT9i7IlZ0ENEPEM8M298lelJgbbiLaKjfAuAm4AYziz+hsNpwNjN7j6g3ZgcwDXgN+DXRUwK3h/Vt\ngYlED/H4K/ACkH4E/EfAT4GXgHmh/tPNLP2Uwcw6/x2YS/RwjjlETww8zcy2Fzi3QkPxsm3/HdFT\nAieFOrsSPTkw7vdEvVDzgA+IclHleOHJkN8GWhOd52NEORhZICbnnHMNiMx8WLdzzrlkJK0Efm1m\nhXqsnHPOud2S92A555xzzjnnXC3xBpZzzrli+LAH55xzLg8fIuicc84555xztcR7sJxzzjnnnHOu\nlngDyznnnHPOOedqiTewnHPOOeecc66W7FHqAJxzDYskn5jpnHPOOZeAmVX7gXrvwXLOVWNm/irw\nuuCCC0oeQ2N4eZ48T54nz1NDfXmePE+7+srFG1jOOVcD3bp1K3UIjYLnKRnPUzKep2Q8T8l4npLx\nPBXPG1jOOeecc845V0v8d7Ccc1X4HCznnHP5dOhQxvr1q0odxi775S9/yRVXXFHqMBo8z1NukrAs\nc7D8IRfOuSy8jVVYCqgocQyNQQrPUxIpPE9JpPA8JZGiLvP0/vvV7icbpSOPPLLUITQKnqfieQ+W\nc66KqAfL/11wzjmXi/JO8HfF69atG6tXry51GC6HsrIyVq1aVW19rh6sepuDJalM0qs5ts2SVF5f\nsWTU3VXSfEnTYutWliKWXCT1lzQxQbm8cUv6KPztJGlyWL5A0q9rcryEdY6WdFWh4xQjfkxJEyX1\nK1C+v6Qt4XOeL+n6BHXMktS1wPairllJQyQtlfRseP9nSQsljQrncWaB/ZOc6zmSFoXX85KOiG0b\nL2mJpP7FxO2cc865urV69eqSPxHPX7lfxTZ+6/shFw3x647BwDNmdnpsXUOMM0lMhcoYgJm9Z2ZD\nE+xXG3U2FM+ZWXl43VKiGEYCF5nZAEkdgaPN7Egzu7sW61gB9DOznsAtwP3pDWZ2NXAT8INarG83\nlip1AI1EqtQBNBKpUgfQSKRKHUAjkSp1AI1CKpUqdQjuS6q+G1jNJD0cvsWfLKlFZgFJwyUtDq/b\nw7om4dv7xeGb+VFhfQ9J00MvwDxJ3WsQUxvgg4x1G2LxnB/qXCDpwbBuoqS7Jb0g6a10z4OkVpJm\nhFgWSRoU1pdJWhb2Wy5pkqRTwv7LJR0dyrWUNEHSHEmvSDojhPEZsDXBuWwIxxkb4p0vaa2kCenT\nicUT703sGnpklku6MVseCtWZK1dxkg6QNE3Sy5JmSzpIUmtJq2JlWkpaI6lptvJZ6t9ClJ9Cih0w\nvhHYkevaC4ZKeknS65L6hPir9AhKekJSP0k3AH2BCZJ+DjwNdAmfUd8qgUrlklLhvKdJ6pD0XM1s\njpmlr5U5QJeMIuuJrnnnnHPOOVcH6vshFwcDF5rZnHDTfxlwV3qjpE7A7UAvopvJ6aGRshboYmZH\nhHKtwy6TgHFmNlVSc2rWYGwKVMZXmNlxoZ5DgWuB481ss6T4jWlHM+sj6RBgKjAF+BQYbGbbJLUn\nusGdGsr3AL5vZkslzQOGhf0HhTrOBK4DnjWzkZL2AeZKmmFmLwIvhpiOAn5oZpdknkg6bjMbDYwO\nx3gOSN/wx3ub4svHAIeF+F+W9KSZzU8fL5+EuUq7P8T+tqRjgftCb84CSf3NbDYwEPi7me2QVK08\nMCCj/ivTy5LGAi+b2ZNZ6j5e0kJgHfBTM1ta4LyGhGOWk/3aA2hqZsdJOh0YA5yS3j3L8W6WdBJw\nlZktkHQP8ISZlYfjjgx/9yD6vAaZ2UZJQ4FxwMgizjXtImBaxrpKomu+gDGx5Qp8Unk2FaUOoJGo\nKHUAjURFqQNoJCpKHUAjUVHqABqFioqKUofgGplUKpWo57O+G1hrzGxOWH4Y+DGxBhbRjf4sM9sE\nIGkS0I9oqFN3SXcDfwOekbQX0NnMpgKYWZJejCokCegZYsnmJOBRM9sc6tgS2/Z4WLdM0tfShwRu\nUzRPphLoHNu2MnZTvwSYEZZfBbqF5VOBMyT9NLxvDnQFlqcrNbNXgGqNqxweBu4ys4UFyk1Pn5uk\nKUQ9LfMT1pGWL1dIagWcADwa8g7QLPydDJwNzAaGAfcUKJ9VaFhm8wrQ1cw+CY2hx4FsvWHZrCDj\n2ottmxI7flnC4xXqSTsYOJzoywURfWnwbmahPOcaVSKdCFxI9FnGrQMOkrSnmW3PfYQxBcJ0zjnn\nnNu9VFRUVGmYjx07Nmu5Us/ByjZ/p9oNaLhZ70k0qPhS4Pe5ylY5kHRZbKhcx4xtTYCVwCHAU4mi\nryp+c5qOYwSwL9DLzHoRDT1skaV8Zex9JV80dEXUy9UrvLqb2XJqQNIYogZttaF6WST5XHZVE2Bz\nmAOVPr/Dw7apwGmS2gLlwMwC5YtiZtvM7JOwPI1oqGq7hPvmuvbgi89wB198hp9T9b+rasNgCxDw\nWuy8e2bMDyx8gOjBFvcT9YJtjm8zsxXAMmC1pMOKjM1VkSp1AI1EqtQBNBKpUgfQSKRKHUAjkSp1\nAI1CQ5+D1bFjNyTV2atjx26J4ujevTszZ87Muu3555/nkEMOqZXzzVdPIZ9++ilnnHEGbdq04eyz\nzwbg+uuv56tf/SqdO3dm9erVNGnShMrKygJHqh313cAqk5QednYO8I+M7XOBfpLaSWoKDAdmh+F2\nTc3sMeB6oNzMtgHvSPoegKTmkr4SP5iZ3RtuUsvNbH3Gtkoz6wbMI+o9yWYmcFb6Zjw0ALJJN7D2\nAT4ws8rQg1CWpUw+TwOX79xBqtEPDyiau3UyMCpzU45dTpHUJuRvMPBClmMuK1Bt3lyZ2UfASklD\nYsc8Imz7mOhzuBt40iI5yxcrNoeJMNRQsV7SGWFoaq59q117uYqGv6uAIxXZHzg2X2hZ1i0Hviqp\nd6h/jzD8MhFFTz78K3Cemb2dZfsRQHei3t8lSY/rnHPOufr1/vurib7zrptXdPxd07dvX5Yt++IW\ncVcaSbviv/7rv9iwYQObN2/mkUce4Z133uGuu+7i9ddf5913o4FAXwyIqnv13cB6HfiRpKVEE+1/\nG9ann263HriG6KuXBURzTJ4gmqifkrQA+GMoA3A+cLmkRUSNgp030kV4A8jamxGG9N1K1MhbAIyP\nxxsvGv5OAo4J8ZxL1FOQWSbb/mk3E/WuLFb0EIqbMgtIOirMTcrnSqAz0Xyq+aE3K1+9c4mGuy0k\nGuZXZXhgaGTklSdXcecCIxU9lOQ1YFBs2yNEPYB/ia0bkad8NYoe7jEwy6Yhkl4Lcf2SaBhieoho\nD2BTnsPmuvayXgNm9gJRI2tJqOuVzDI53qf3/xcwBLhD0ZyxBcDxRZzrDUTX872h93Zuxva2wCoz\nq5+vcL7UKkodQCNRUeoAGomKUgfQSFSUOoBGoqLUATQKPgfry2P16tUcdNBBOxtRq1evZt9996V9\n+4K3sHUj3zPfd4cX8FPg9lLH0ZBfwHeB/1XqOOrgvA4D7ix1HPV8zkOBPxcoY2D+8pe//OUvf+V4\nYa52Zctp3f//ONnn2K1bN7vtttvs0EMPtXbt2tkPfvAD2759u5mZpVIp22+//czM7LzzzrMmTZpY\ny5Ytbe+997Zf/OIX1Y714Ycf2sCBA61NmzbWrl0769evX5V67rzzTjviiCOsTZs2NmzYsJ31PPDA\nA9a3b98qx5Jkb7/9to0ePdqaN29uzZo1s7333tt+97vf2Ve+8hVr2rSp7b333nbhhRfaqlWrrEmT\nJrZjxw4zM9u6dauNHDnSOnXqZPvtt59df/31VllZWdTnE1tP5qu+H3LREE0BHpA0zYqc67K7MLOa\nzFFr8CwaIveTUsdRXySNB74F/GeC0nUdjnPOuUaqQ4eyUodQK1KplPdiJfSnP/2J6dOn07JlSwYO\nHMgtt9zCTTdFA63SvUYPPfQQ//jHP/jDH/7AiSeemPU448ePZ//992fjxo2YGXPmzKmy/dFHH+WZ\nZ55hzz335IQTTuCBBx7gkksuqVJPWvr9mDFjkMTbb7/NQw89BMDBBx/Meeedx5o1awCq/VDwBRdc\nQKdOnVixYgXbtm1j4MCBdO3alYsvvnhX0rTTbt/AsmieyrdKHYdzdc2iHxpOWrYuQ/lS8P8xJ+N5\nSsbzlIznKRnPk6ttP/7xj+ncuTMA1113HZdffvnOBlamfPcQzZo147333mPlypX06NGDPn36VNk+\natQoOnSIZvycccYZLFyY+0HYNb1Xef/995k2bRpbt25lzz33pEWLFlxxxRXcf//9tdbAqu85WM45\n96XgNy/JeJ6S8Twl43lKxvOUjOcpuf3222/ncllZ2c4HRxTrZz/7GT169ODUU0/l61//OnfccUeV\n7enGFUDLli3Ztm1bzQLOY82aNfzrX/+iU6dOtGvXjrZt23LppZfy4Ycf1lodu30PlnPOOeeccy63\nd955Z+fy6tWrd/ZmZSr0pL5WrVpx5513cuedd7J06VJOPPFEjj322JxDCuP7ffLJJzvfr1+/vsZP\nBdx///1p0aIFGzdurLMnC3oPlnPO1UBD//2UhsLzlIznKRnPUzKep2Q8T8ndc889rFu3jk2bNjFu\n3DiGDRuWtVzHjh1ZsWJFzuM89dRTvP129Csye++9N3vssQdNmzYtWH/Pnj1ZsmQJixcvZvv27Tl/\n4Def9JDCjh07cuqpp3LllVfy0UcfYWasWLGC5557ruhj5uINLOecc8455xqY6GEiqrNX0oeVSOKc\nc87ZOazvwAMP5Lrrrsta9pprruHmm2+mXbt23HXXXdW2v/nmm5x88snsvffe9OnThx/96Ef069dv\nZz25HHjggdx4440MGDCAgw46iG99q/jHJ8SP/9BDD/HZZ59x6KGH0q5dO8466yzWr1+fZ+8i6/LJ\n7M65OEnm/y4455xz9UeSP2CqAcv1+YT11VqG3oPlnHPOOeecc7XEH3LhnKumriZ9Oufc7q5Dlw6s\nX1t7Q5Gy8ce0J+N5cnXFG1jOuerGlDqARmAl0L3UQTQCnqdkPE/JfAny9P6Y90sdgnOujvkcLOdc\nFZLMG1jOOVdHxviPubvqfA5Ww9Zg52BJKpP0ao5tsySV11csGXV3lTRf0rTYupWliCUXSf0lTUxQ\nLm/ckj4KfztJmhyWL5D065ocL2GdoyVdVeg4xYgfU9JESf0KlO8vaUv4nOdLuj5BHbMkdS2wvahr\nVtIQSUslPRve/1nSQkmjwnmcWWD/gucayv1K0pvh2EfG1o+XtERS/2Lids4555xzydX3Qy4aYtN8\nMPCMmZ0eW9cQ40wSU6EyBmBm75nZ0AT71UadDcVzZlYeXreUKIaRwEVmNkBSR+BoMzvSzO6urQok\nnQ70MLMDgR8Cv01vM7OrgZuAH9RWfbu1BvU1TAPmeUrG85SM5ykR/32nZDxPrq7UdwOrmaSHw7f4\nkyW1yCwgabikxeF1e1jXJHx7v1jSIkmjwvoekqaHb+rnSarJyOw2wAcZ6zbE4jk/1LlA0oNh3URJ\nd0t6QdJb6Z4HSa0kzQixLJI0KKwvk7Qs7Ldc0iRJp4T9l0s6OpRrKWmCpDmSXpF0RgjjM2BrgnPZ\nEI4zNsQ7X9JaSRPSpxOLJ96b2DX0yCyXdGO2PBSqM1eu4iQdIGmapJclzZZ0kKTWklbFyrSUtEZS\n02zls9S/hSg/hRT71IaNwI5c114wVNJLkl6X1CfEX6VHUNITkvpJugHoC0yQ9HPgaaBL+Iz6VglU\nKpeUCuc9TVKHIs71e8BDAGb2ErBPbH+A9UTXvHPOOeecqwP1/ZCLg4ELzWxOuOm/DNj5K2SSOgG3\nA72Ibianh0bKWqCLmR0RyrUOu0wCxpnZVEnNqVmDsSlQGV9hZseFeg4FrgWON7PNkuI3ph3NrI+k\nQ4CpwBTgU2CwmW2T1B6YE7YB9AC+b2ZLJc0DhoX9B4U6zgSuA541s5GS9gHmSpphZi8CL4aYjgJ+\naGaXZJ5IOm4zGw2MDsd4Dkjf8Md7m+LLxwCHhfhflvSkmc1PHy+fhLlKuz/E/rakY4H7Qm/OAkn9\nzWw2MBD4u5ntkFStPDAgo/4r08uSxgIvm9mTWeo+XtJCYB3wUzNbWuC8hoRjlpP92gNoambHhV6j\nMcAp6d2zHO9mSScBV5nZAkn3AE+YWXk47sjwdw+iz2uQmW2UNBQYB4xMeK5dgHdi79eFdelZ1ZVE\n13x+s2LL3Wj0k8rrhOckGc9TMp6nZDxPifiT8ZLxPLlipVKpRD2f9d2DtcbM5oTlh4m+0Y87Bphl\nZpvMrJKoAdUPWAF0D71G3wY+krQX0NnMpgKY2Wdm9mkxwUgS0JOoAZfNScCjZrY51LEltu3xsG4Z\n8LX0IYHbJC0CZgCdJaW3rYzd1C8J2wFeJbqFBTgVuEbSAiAFNAeqzAMys1eyNa5yeBi4y8wWFig3\n3cy2hPxNofrnkkS+XCGpFXAC8Gg4v98B6Z6VycDZYXkY8EiB8lmZ2egcjatXgK5mdiTwG8Jnl1C1\nay+2bUrs+Ml+Dr1wT9rBwOFEXy4sIGp0d84slOdcC1kHHCRpz7ylToy9/IbGOeecq3cd9+uIpDp7\nddyvY6lPcacTTzyRP/zhDzXe/8ILL6Rdu3b07t0bgPvuu4+OHTvSunVrNm3aRJMmTVixYsUux1lR\nUcGYMWN2vnKp7x6szG/2s83fqXYDamZbJPUEvg1cCpwFXJGtbJUDSZcBF4d6vmNm62PbmhDdPG8H\nniriHNK2Z4l5BLAv0MvMKhU9AKJFlvKVsfeVfPE5iKiX680axFOFpDFEDdpqQ/WySPK57KomwOZ0\nj02GqcCtktoC5cBMYK885YtiZttiy9Mk3SupnZltSrBvtmvvorA5/Rnu4IvP8HOqfnFRbRhsAQJe\nM7M+Re6Xtg7YP/Z+v7AOADNbIWkZsFrSADNbUsN63JfgcdH1wvOUjOcpGc9TIv77Tsk09Dy9v+79\nOv3ZlC/LTwY8//zzPPvss7z77ru0aNGCzz//nKuvvpq5c+dy+OGHA/X/+5713YNVJik97Owc4B8Z\n2+cC/SS1k9QUGA7MDsPtmprZY8D1QHm4aX5H0vcAJDWX9JX4wczsXjPrFR5ssD5jW6WZdQPm8UXv\nSaaZwFmS2oU62uYol/7U9gE+CI2rE6naq5Hkk30auHznDrEnwBVD0dytk4FRmZty7HKKpDYhf4OB\nF7Icc1mBavPmysw+AlZKGhI75hFh28dEn8PdwJMWyVm+WPE5SGGoodKNK0Vz5jrl2bfatZeraPi7\nCjhSkf2BY/OFlmXdcuCrknqH+vcIwy+TmgqcH/btDWwxs53/goYcdifq/fXGlXPOOecatVWrVtGt\nWzdatIi+016/fj3bt2/nkEMO2Vmmvh+BX98NrNeBH0laSjTRPv2Es/TT7dYD1xANj1tANMfkCaI5\nJKkwZOqPoQxEN5KXhyF5L1BgCFkObwDtsm0IQ/puJWrkLQDGx+ONFw1/JwHHhHjOBZZlKZNt/7Sb\niR4EsljRQyhuyiwg6agwNymfK4mGlb2s6CEKYwrUO5douNtComF+8zPqbF+gvny5ijsXGKnooSSv\nAYNi2x4h6gH8S2zdiDzlq1H0cI+BWTYNkfRaiOuXRMMQ00NEewD5erJyXXtZrwEze4GokbUk1PVK\nZpkc79P7/wsYAtyhaM7YAuD4pOdqZn8japi+RTSs8rKMIm2BVWEIrtsV/i16Mp6nZDxPyXieEmnI\nvTINiecpme7duzN+/Hh69uxJ27ZtGT58OJ999sUzt37/+99z4IEHsu+++zJ48GDee++9rMfZvn07\n5513Hvvuuy9t27bluOOOY8OGL56ntmrVKvr27Uvr1q057bTT2LQpuj2bPXs2+++/f5Vjde/enZkz\nZ/KHP/yBiy++mBdffJHWrVszYsQIvvGNbwDQtm1bTj755GpxfPbZZ/zkJz+hrKyMTp06cdlll7F9\n+/Zq5XbFbv9Dw5J+CrQ3s2sKFt5NSfou0N3MflPqWGqTpMOIHrryk1LHUl8UPTTj38xseJ4y/kPD\nzjlXV8b4Dw276pTlh2wl1ekQwaTXYvfu3enQoQP//d//zZ577skJJ5zAFVdcwSWXXMLMmTM5++yz\nmTFjBoceeihXX301ixYtYvbs2dWOc//99/PUU08xefJkmjdvzsKFCznwwAPZa6+9OPHEE1m7di1/\n//vf2W+//TjttNM4/vjjGTduHLNnz+a8885jzZo1VWKaMGECJ510Eg8++CATJkzgueeeA2D16tUc\ncMABfP755zuHBjZp0oS33nqLAw44gCuvvJKVK1fy4IMPsscee3DOOedw+OGHc+utt+bMQbbPJ7a+\n2oik+p6D1RBNAR6QNC3jt7BcYGY1maPW4IUhcrtT42o88C3gPwsWHlPX0Tjn3O6pQ5eaDLYpTkOf\nW9RQeJ6SGzVqFB06RNfuGWecwcKF0fPT/vSnPzFy5Eh69uwJwG233Ubbtm1Zs2YNXbtWeU4bzZo1\nY+PGjbzxxht885vfpFevXlW2X3jhhfTo0QOAoUOH8sQTT+xSzGaWde7V73//e1599VX22WcfAK65\n5hpGjBiRt4FVrN2+gWVmbxPddDr3pRZ+aDhp2boM5UvB/8ecjOcpGc9TMp4n50oj3bgCaNmy5c5h\ngO+++y5HHXXUzm2tWrWiffv2rFu3rloD6/zzz2ft2rUMGzaMrVu3MmLECMaNG0fTptGvx3Ts2LFK\nHdu2baO2bdiwgU8++aRKzJWVlbV+31Pfc7Ccc+5LwW/ykvE8JeN5SsbzlIznKRnP067r3Lkzq1ev\n3vn+448/ZuPGjXTp0qVa2aZNm3LDDTewZMkS/vnPf/Lkk0/y0EMPFayjVatWfPLJJzvf79ixo8rc\nrWLsu+87K3VnAAAgAElEQVS+tGzZkiVLlrBp0yY2bdrEli1b2Lp1a42Ol4s3sJxzzjnnnHNFGz58\nOBMnTmTx4sVs376da6+9lt69e1frvYKoB/q1116jsrKSvfbai2bNmu3svcrnoIMO4tNPP2XatGl8\n/vnn3HLLLVUespFNrh4pSVx88cVcccUVOxtp69at45lnnklwtsnt9kMEnXOuJnyoUjKep2Q8T8l4\nnpLxPCXT0PPUoUuHOv2tqqTzAfP9htSAAQO4+eabOfPMM9myZQsnnHACf/nLX7KWXb9+PZdeeinr\n1q1jr732YtiwYZx77rkF62jdujX33nsvI0eOpLKykp/97Gfst99+RcUcf3/HHXcwduxYevfuvbO3\n7T/+4z849dRT8x6zGLv9UwSdc1VJMv93obCG/j/mhsLzlIznKRnPUzKep2QaUp5yPaXONQzFPkXQ\nG1jOuSq8geWcc87VL29gNWzFNrB8DpZzzjnnnHPO1RKfg+WcqybfWGjnnGvsOnQoY/36VaUOo840\npKFvDZnnydUVb2A557LwYQqFpYCKEsfQGKTwPCWRwvOURIrayNP77/uXSM65uuNzsJxzVUgyb2A5\n577cfL6La1h8DlbD1mDnYEkqk/Rqjm2zJJXXVywZdXeVNF/StNi6laWIJRdJ/SVNTFAub9ySPgp/\nO0maHJYvkPTrmhwvYZ2jJV1V6DjFiB9T0kRJ/QqUHyRpkaQFkuZJOilBHbMkVf8Rh6rbi7pmJQ2R\ntFTSs+H9nyUtlDQqnMeZBfZPcq7nhHNdJOl5SUfEto2XtERS/2Lids4551zdKisrQ5K/GuirrKys\nqM+zvh9y0RCb5oOBZ8zs9Ni6hhhnkpgKlTEAM3vPzIYm2K826mwIZphZTzPrBVwI3F+iOEYCF5nZ\nAEkdgaPN7Egzu7sW61gB9DOznsAtxM7VzK4GbgJ+UIv17cZSpQ6gkUiVOoBGIlXqABqJVKkDaBRS\nqVSpQ2gUGlKeVq1ahZk1yNesWbNKHkOpX6tWrSrq86zvBlYzSQ+Hb/EnS2qRWUDScEmLw+v2sK5J\n+PZ+cfhmflRY30PSdEW9APMkda9BTG2ADzLWbYjFc76+6P14MKybKOluSS9Iekuh50FSK0kzQiyL\nJA0K68skLQv7LZc0SdIpYf/lko4O5VpKmiBpjqRXJJ0RwvgM2JrgXDaE44wN8c6XtFbShPTpxOKJ\n9yZ2VdQjs1zSjdnyUKjOXLmKk3SApGmSXpY0W9JBklpLWhUr01LSGklNs5XPUv8WovzkZGafxN7u\nBXyY4Lw2AjtyXXvBUEkvSXpdUp8Qf5UeQUlPSOon6QagLzBB0s+Bp4Eu4TPqm5GnckmpcN7TJKV/\nCTDJuc4xs/S1MgfoklFkPdE175xzzjnn6kJ9tfyAMqAS6B3eTwCuCsuzgHKgE7AaaEfU+HsWGBS2\nPRM7Vuvwdw4wKCw3B1rUIK6xwBU5th0KvA60De/bhL8TgUfC8iHAm2G5KbBXWG4fW19GdGN8aHg/\nD5gQlgcBU8LyrcA5YXkfYDnwlYyYjgLuT3hu+wCLgCPD+/+JxbM4LF8ArCO66W4BvAqU1yCPuXI1\nOvY5zwB6hOVjgWfD8mNA/7A8NH1+ecrvPGaWz3JgjvgGA8uAzcCxRZxXrmtvFvCLsHw6MD2Wz1/F\nyj9B1KOU3qdX5mcQu6bOJHrwzAtA+1g+JhRzrrEyP8m8VoBvAU8W2M9gdOw1y8D85S9/+etL9MKc\nc65Ys2bNstGjR+98hX9LyHzV91ME15jZnLD8MPBj4K7Y9mOAWWa2CUDSJKAf0VCn7pLuBv4GPCNp\nL6CzmU0FMLO83+xnI0lAzxBLNicBj5rZ5lDHlti2x8O6ZZK+lj4kcJuieTKVQOfYtpVmtjQsLyFq\nPEDUoOkWlk8FzpD00/C+OdCVqKFFqO8V4JKEp/gwcJeZLSxQbnr63CRNIeppmZ+wjrR8uUJSK+AE\n4NGQd4Bm4e9k4GxgNjAMuKdA+azMbHSebY8Dj4feoj8CByc8rxVkXHuxbVPC31eIGkxJFHp01cHA\n4cD0cN5NgHczC+U7VwBJJxINh+ybsWkdcJCkPc1se+4jjCkQpnPOOefc7qWioqLKo/3Hjh2btVyp\n52BlvocsN6DhZr0n0eDrS4Hf5ypb5UDSZbGhch0ztjUBVhL1QD2VKPqq4jen6ThGAPsS9VL0Ihp6\n2CJL+crY+0q+eFy+gO+bWa/w6m5my6kBSWOIGrTVhuplkeRz2VVNgM1mVh47v8PDtqnAaZLaEvUY\nzSxQvsbM7HlgD0ntE5bPde3BF5/hDr74DD+n6n9X1YbBFiDgtdh597Sq8wMLHyB6sMX9RL27m+Pb\nzGwFUU/eakmHFRmbqyJV6gAaiVSpA2gkUqUOoJFIlTqARqEhzS1qyDxPyXieilffDawySceF5XOA\nf2Rsnwv0k9ROUlNgODA73Aw3NbPHgOuJhrBtA96R9D0ASc0lfSV+MDO7N9yklpvZ+oxtlWbWjWi4\n3tk54p0JnCWpXaijbY5y6QbWPsAHZlYZehDKspTJ52ng8p07SEcm2Kd6MNHcrZOBUZmbcuxyiqQ2\nIX+DiYaoZR5zWYFq8+bKzD4CVkoaEjvmEWHbx0Sfw91Ew9csX/liSeoRWy4PdW4M72dI6pRn32rX\nXq6i4e8q4EhF9ica2pjz8FnWLQe+Kql3qH8PSYfmOUZmvF2BvwLnmdnbWbYfAXQn6v1dkvS4zjnn\nnHMumfpuYL0O/EjSUqI5P78N6w0gNIKuIfqKagHwspk9QTRRPyVpAdHwrmvCfucDl0taRNQoSD8M\noBhvEM35qiYM6buVqJG3ABgfjzdeNPydBBwT4jmXqKcgs0y2/dNuJnoQyOLwEIqbMgtIOkpSoafg\nXQl0Bl4OvXdjCtQ7l2i420KiYX5Vhgcm6e3Jk6u4c4GRih5K8hrR/LO0R4h6AP8SWzciT/lqFD3c\nY2CWTd+X9Jqk+USNuGGhvIAewKY8h8117WW9BszsBaJG1hLgl0TDB8m3T8b+/wKGAHdIWkj038Hx\nRZzrDUTX872h93Zuxva2wCozq8yyrytKRakDaCQqSh1AI1FR6gAaiYpSB9AoxIcwudw8T8l4noq3\n2//QcJjv1N7MrilYeDcl6btAdzP7TaljqU1hiNyFZvaTUsdSXyQNBf7NzIbnKWN1M0rUOecaCv9R\nV+fcrlOpf2i4AZsC9FHsh4ZdVWb21JetcQVgZkt2s8bVeKInC/7fUsfy5ZAqdQCNRKrUATQSqVIH\n0EikSh1Ao+BzZpLxPCXjeSpefT9FsMEJ81S+Veo4nKtrFv3QcEJJpgw651zj1KFD0ge/Oudc8Xb7\nIYLOuaokmf+74JxzzjmXnw8RdM4555xzzrk65g0s55yrAR+TnoznKRnPUzKep2Q8T8l4npLxPBXP\nG1jOOeecc845V0t8DpZzrgqfg+Wcc845V5jPwXLOOeecc865OuYNLOecqwEfk56M5ykZz1Mynqdk\nPE/JeJ6S8TwVb7f/HSznXHWS/w6Wc7u7Dl06sH7t+lKH4ZxzjY7PwXLOVSHJGFPqKJxzJTcG/B7B\nOedy8zlYzjnnnHPOOVfH6q2BJalM0qs5ts2SVF5fsWTU3VXSfEnTYutWliKWXCT1lzQxQbm8cUv6\nKPztJGlyWL5A0q9rcryEdY6WdFWh4xQjfkxJEyX1K1B+kKRFkhZImifppAR1zJLUtcD2oq5ZSUMk\nLZX0bHj/Z0kLJY0K53Fmgf0Lnmso9ytJb4ZjHxlbP17SEkn9i4nb5dCg/pVowDxPyXieEvG5IMl4\nnpLxPCXjeSpefc/BaohjDQYDz5jZNbF1DTHOJDEVKmMAZvYeMDTBfrVRZ0Mww8ymAkj6JvAY8PUS\nxDESuMjM/impI3C0mR0Y4irYgE5C0ulADzM7UNJxwG+B3gBmdrWkucAPgNm1UZ9zzjnnnKuqvocI\nNpP0cPgWf7KkFpkFJA2XtDi8bg/rmoRv7xeHnohRYX0PSdPDN/XzJHWvQUxtgA8y1m2IxXN+rPfj\nwbBuoqS7Jb0g6a10z4OkVpJmhFgWSRoU1pdJWhb2Wy5pkqRTwv7LJR0dyrWUNEHSHEmvSDojhPEZ\nsDXBuWwIxxkb4p0vaa2kCenTicUT703sGnpklku6MVseCtWZK1dxkg6QNE3Sy5JmSzpIUmtJq2Jl\nWkpaI6lptvJZ6t9ClJ+czOyT2Nu9gA8TnNdGYEeuay8YKuklSa9L6hPir9IjKOkJSf0k3QD0BSZI\n+jnwNNAlfEZ9M/JULikVznuapA5JzxX4HvBQOO+XgH1i+wOsJ7rm3a6qyb82uyPPUzKep0QqKipK\nHUKj4HlKxvOUjOepePXdg3UwcKGZzQk3/ZcBd6U3SuoE3A70IrqZnB4aKWuBLmZ2RCjXOuwyCRhn\nZlMlNadmDcamQGV8hZkdF+o5FLgWON7MNkuK35h2NLM+kg4BpgJTgE+BwWa2TVJ7YE7YBtAD+L6Z\nLZU0DxgW9h8U6jgTuA541sxGStoHmCtphpm9CLwYYjoK+KGZXZJ5Ium4zWw0MDoc4zkgfcMf722K\nLx8DHBbif1nSk2Y2P328fBLmKu3+EPvbko4F7jOzAaFB1t/MZgMDgb+b2Q5J1coDAzLqvzK9LGks\n8LKZPZlZsaTBwG1AR+DbCc5rSNivnOzXHkBTMztOUa/RGOCU9O5ZjnezoqGJV5nZAkn3AE+YWXk4\n7sjwdw+iz2uQmW2UNBQYB4xMeK5dgHdi79eFde+H95VE13x+s2LL3fCbP+ecc87t9lKpVKIhk/Xd\ng7XGzOaE5YeJvtGPOwaYZWabzKySqAHVD1gBdA+9Rt8GPpK0F9A5PfTLzD4zs0+LCUaSgJ5EDbhs\nTgIeNbPNoY4tsW2Ph3XLgK+lDwncJmkRMAPoLCm9baWZLQ3LS8J2gFeJbmEBTgWukbQASAHNgSrz\ngMzslWyNqxweBu4ys4UFyk03sy0hf1Oo/rkkkS9XSGoFnAA8Gs7vd0C6Z2UycHZYHgY8UqB8VmY2\nOlvjKmx73MwOAc4A/ljEeVW79mLbpoS/rwBlCY9X6PnnBwOHE325sICo0d05s1C+cy1gHXCQpD3z\nljox9vLGVXY+ZyYZz1MynqdEfC5IMp6nZDxPyXievlBRUcGYMWN2vnIp9RysbPN3qt2AmtkWST2J\neh4uBc4CrshWtsqBpMuAi0M93zGz9bFtTYhunrcDTxVxDmnbs8Q8AtgX6GVmlYoeANEiS/nK2PtK\nvvgcRNTL9WYN4qlC0hiiBm21oXpZJPlcdlUTYHO6xybDVOBWSW2BcmAm0VC+XOVrzMyel7SHpPZm\ntjFB+WzX3kVhc/oz3MEXn+HnVP3iotow2AIEvGZmfYrcL20dsH/s/X5hHQBmtkLSMmC1pAFmtqSG\n9TjnnHPOuSzquwerTNHEe4BzgH9kbJ8L9JPUTlJTYDgwOwy3a2pmjwHXA+Vmtg14R9L3ACQ1l/SV\n+MHM7F4z62Vm5fHGVdhWaWbdgHl80XuSaSZwlqR2oY62OcqlG1j7AB+ExtWJVO3VSPLLrU8Dl+/c\nIfYEuGIomrt1MjAqc1OOXU6R1CbkbzDwQpZjLitQbd5cmdlHwEpJQ2LHPCJs+5joc7gbeNIiOcsX\nS1KP2HJ5qHNjeD8jDE3NtW+1ay9X0fB3FXCkIvsDx+YLLcu65cBXJfUO9e8Rhl8mNRU4P+zbG9hi\nZunhgekcdifq/fXG1a7wnr1kPE/JeJ4S8bkgyXiekvE8JeN5Kl59N7BeB34kaSnRRPvfhvXpp9ut\nB64hGh63gGiOyRNEc0hSYcjUH0MZiG4kLw9D8l6gwBCyHN4A2mXbEIb03UrUyFsAjI/HGy8a/k4C\njgnxnAssy1Im2/5pNxM9CGSxoodQ3JRZQNJRYW5SPlcSDSt7OTxEYUyBeucSDXdbSDTMb35Gne0L\n1JcvV3HnAiMVPZTkNWBQbNsjRD2Af4mtG5GnfDWKHu4xMMum70t6TdJ8okbcsFBeRHPjNuU5bK5r\nL+s1YGYvEDWylgC/JBo+SL59Mvb/FzAEuEPSQqL/Do5Peq5m9jeihulbRMMqL8so0hZYFYbgOuec\nc865Wqbd/VfaJf0UaJ/xmHYXI+m7QHcz+02pY6lNkg4jeujKT0odS30JD834NzMbnqeMMab+Ymq0\nVuK9Dkl4npJpiHkaAw3tHiGVSvm36Ql4npLxPCXjecpNEmZWbURSfc/BaoimAA9ImmZmp5c6mIbI\nzGoyR63BC0PkdqfG1XjgW8B/Fiw8pq6jcc41dB261GRQiHPOud2+B8s5V5Uk838XnHPOOefyy9WD\nVd9zsJxzzjnnnHPuS8sbWM45VwP+uyDJeJ6S8Twl43lKxvOUjOcpGc9T8byB5ZxzzjnnnHO1xOdg\nOeeq8DlYzjnnnHOF+Rws55xzzjnnnKtj3sByzrka8DHpyXiekvE8JeN5SsbzlIznKRnPU/H8d7Cc\nc9VI1Xq7nXOuQejQoYz161eVOgznnMvJ52A556qQZOD/LjjnGirh9y7OuYbA52A555xzzjnnXB2r\ntwaWpDJJr+bYNktSeX3FklF3V0nzJU2LrVtZilhykdRf0sQE5fLGLemj8LeTpMlh+QJJv67J8RLW\nOVrSVYWOU4z4MSVNlNSvQPmDJf1T0qdJYwnXZNcC24u6ZiUNkbRU0rPh/Z8lLZQ0KpzHmQX2T3Ku\n50haFF7PSzoitm28pCWS+hcTt8slVeoAGolUqQNoJFKlDqCRSJU6gEbB58wk43lKxvNUvPqeg9UQ\n+/QHA8+Y2TWxdQ0xziQxFSpjAGb2HjA0wX61UWdDsBH4MdFnXUojgYvM7J+SOgJHm9mBEDWeaqmO\nFUA/M9sq6TTgfqA3gJldLWku8ANgdi3V55xzzjnnYup7iGAzSQ+Hb/EnS2qRWUDScEmLw+v2sK5J\n+PZ+cfhmflRY30PS9NALME9S9xrE1Ab4IGPdhlg854c6F0h6MKybKOluSS9Ieivd8yCplaQZIZZF\nkgaF9WWSloX9lkuaJOmUsP9ySUeHci0lTZA0R9Irks4IYXwGbE1wLhvCccaGeOdLWitpQvp0YvHE\nexO7hh6Z5ZJuzJaHQnXmylWcpAMkTZP0sqTZkg6S1FrSqliZlpLWSGqarXyW+rcQ5ScnM/vQzF4B\nPk9wPmkbgR25rr1gqKSXJL0uqU+Iv0qPoKQnJPWTdAPQF5gg6efA00CX8Bn1zchTuaRUOO9pkjoU\nca5zzCx9rcwBumQUWU90zbtdVlHqABqJilIH0EhUlDqARqKi1AE0ChUVFaUOoVHwPCXjeSpeffdg\nHQxcaGZzwk3/ZcBd6Y2SOgG3A72Ibianh0bKWqCLmR0RyrUOu0wCxpnZVEnNqVmDsSlQGV9hZseF\neg4FrgWON7PNkuI3ph3NrI+kQ4CpwBTgU2CwmW2T1J7oBndqKN8D+L6ZLZU0DxgW9h8U6jgTuA54\n1sxGStoHmCtphpm9CLwYYjoK+KGZXZJ5Ium4zWw0MDoc4zkgfcMf722KLx8DHBbif1nSk2Y2P328\nfBLmKu3+EPvbko4F7jOzAaFB1t/MZgMDgb+b2Q5J1coDAzLqvzK9LGks8LKZPVko7gTnNSQcs5zs\n1x5AUzM7TtLpwBjglPTuWY53s6STgKvMbIGke4AnzKw8HHdk+LsH0ec1yMw2ShoKjANG1uBcLwKm\nZayrJLrmCxgTW67Ab2qcc845t7tLpVKJhkzWdwNrjZnNCcsPEw3buiu2/RhglpltApA0CegH3AJ0\nl3Q38DfgGUl7AZ3NbCqAmeX9Zj8bSQJ6hliyOQl41Mw2hzq2xLY9HtYtk/S19CGB2xTNk6kEOse2\nrTSzpWF5CTAjLL8KdAvLpwJnSPppeN8c6AosT1caemKqNa5yeBi4y8wWFig3PX1ukqYQ9bTMT1hH\nWr5cIakVcALwaMg7QLPwdzJwNtGwtWHAPQXKZxUalrVtBRnXXmzblPD3FaAs4fEKPf/8YOBwoi8X\nRPSlwbuZhQqdq6QTgQuJPsu4dcBBkvY0s+25jzCmQJgumgtSUeIYGoMUnqckUniekkiVOoBGIZVK\nea9DAp6nZDxPX6ioqKiSi7Fjx2YtV+o5WNnm71S7ATWzLZJ6At8GLgXOAq7IVrbKgaTLgItDPd8x\ns/WxbU2Ibp63A08VcQ5p8ZvTdBwjgH2BXmZWqegBEC2ylK+Mva/ki89BRL1cb9YgniokjSFq0FYb\nqpdFks9lVzUBNqd7bDJMBW6V1BYoB2YCe+UpX29yXHsXhc3pz3AHX3yGn1O1J7XaMNgCBLxmZn1q\nFjEoerDF/cBp6QZvmpmtkLQMWC1pgJktqWk9zjnnnHOuuvqeg1UmKT3s7BzgHxnb5wL9JLWT1BQY\nDswOw+2amtljwPVAuZltA96R9D0ASc0lfSV+MDO718x6mVl5vHEVtlWaWTdgHlHvSTYzgbMktQt1\ntM1RLt3A2gf4IDSuTqRqr0aSX259Grh85w7SkQn2qR5MNHfrZGBU5qYcu5wiqU3I32DghSzHXFag\n2ry5MrOPgJWShsSOeUTY9jHR53A38KRFcpbfRVVyoGjOXKechbNcewWOuwo4UpH9gWOTxhIsB74q\nqXeof48w/DIRRU8+/Ctwnpm9nWX7EUB3ot5fb1ztkopSB9BIVJQ6gEaiotQBNBIVpQ6gUfDehmQ8\nT8l4nopX3w2s14EfSVpKNNH+t2F9+ul264FriMYALCCaY/IE0UT9lKQFwB9DGYDzgcslLSJqFKQf\nBlCMN4B22TaEIX23EjXyFgDj4/HGi4a/k4BjQjznAsuylMm2f9rNRA8CWazoIRQ3ZRaQdFSYm5TP\nlUBnovlU80NvVr565xINd1tINMyvyvDA0MjIK0+u4s4FRip6KMlrwKDYtkeIegD/Els3Ik/5ahQ9\n3GNglvUdJL1DlJfrFD1EY68wBK8HsCnPYXNde1mvATN7gaiRtQT4JdHwQfLtk7H/v4AhwB2SFhL9\nd3B80nMFbiC6nu8Nc9vmZmxvC6wys8rquzrnnHPOuV2l3f3X0MN8p/YZj2l3MZK+C3Q3s9+UOpba\nJOkwooeu/KTUsdSX8NCMfzOz4XnKWON4+n6ppfBv05NI4XlKIoXnKYkUcCK7+71LIT5nJhnPUzKe\np9wkYWbVRiTV9xyshmgK8ICkaWZ2eqmDaYjMrCZz1Bq8MERud2pcjQe+BfxngtJ1HY5zztVIhw5J\nnynknHOlsdv3YDnnqpJk/u+Cc84551x+uXqw6nsOlnPOOeecc859aXkDyznnaiDJDw06z1NSnqdk\nPE/JeJ6S8Twl43kqnjewnHPOOeecc66W+Bws51wVPgfLOeecc64wn4PlnHPOOeecc3XMG1jOOVcD\nPiY9Gc9TMp6nZDxPyXiekvE8JeN5Kp7/DpZzrhrJfwfLuYakQ5cOrF+7vtRhOOecS8DnYDnnqpBk\njCl1FM65KsaA///aOecaFp+D5ZxzzjnnnHN1rN4aWJLKJL2aY9ssSeX1FUtG3V0lzZc0LbZuZSli\nyUVSf0kTE5TLG7ekj8LfTpImh+ULJP26JsdLWOdoSVcVOk4x4seUNFFSvwLlD5b0T0mfJo0lXJNd\nC2wv6pqVNETSUknPhvd/lrRQ0qhwHmcW2L/guYZyv5L0Zjj2kbH14yUtkdS/mLhdDg3qX4kGzPOU\njOcpEZ8LkoznKRnPUzKep+LV9xyshji+YTDwjJldE1vXEONMElOhMgZgZu8BQxPsVxt1NgQbgR8T\nfdalNBK4yMz+KakjcLSZHQhR46k2KpB0OtDDzA6UdBzwW6A3gJldLWku8ANgdm3U55xzzjnnqqrv\nIYLNJD0cvsWfLKlFZgFJwyUtDq/bw7om4dv7xZIWSRoV1veQND18Uz9PUvcaxNQG+CBj3YZYPOeH\nOhdIejCsmyjpbkkvSHor3fMgqZWkGSGWRZIGhfVlkpaF/ZZLmiTplLD/cklHh3ItJU2QNEfSK5LO\nCGF8BmxNcC4bwnHGhnjnS1oraUL6dGLxxHsTu4YemeWSbsyWh0J15spVnKQDJE2T9LKk2ZIOktRa\n0qpYmZaS1khqmq18lvq3EOUnJzP70MxeAT5PcD5pG4Edua69YKiklyS9LqlPiL9Kj6CkJyT1k3QD\n0BeYIOnnwNNAl/AZ9c3IU7mkVDjvaZI6JD1X4HvAQ+G8XwL2ie0PsJ7omne7qib/2uyOPE/JeJ4S\nqaioKHUIjYLnKRnPUzKep+LVdw/WwcCFZjYn3PRfBtyV3iipE3A70IvoZnJ6aKSsBbqY2RGhXOuw\nyyRgnJlNldScmjUYmwKV8RVmdlyo51DgWuB4M9ssKX5j2tHM+kg6BJgKTAE+BQab2TZJ7YE5YRtA\nD+D7ZrZU0jxgWNh/UKjjTOA64FkzGylpH2CupBlm9iLwYojpKOCHZnZJ5omk4zaz0cDocIzngPQN\nf7y3Kb58DHBYiP9lSU+a2fz08fJJmKu0+0Psb0s6FrjPzAaEBll/M5sNDAT+bmY7JFUrDwzIqP/K\n9LKkscDLZvZkobgTnNeQcMxysl97AE3N7DhFvUZjgFPSu2c53s2STgKuMrMFku4BnjCz8nDckeHv\nHkSf1yAz2yhpKDAOGJnwXLsA78Terwvr3g/vK4mu+fxmxZa74Td/zjnnnNvtpVKpREMm67uBtcbM\n5oTlh4mGbd0V234MMMvMNgFImgT0A24Buku6G/gb8IykvYDOZjYVwMwKfbNfjSQBPUMs2ZwEPGpm\nm0MdW2LbHg/rlkn6WvqQwG2K5slUAp1j21aa2dKwvASYEZZfJbqFBTiV/8/e/UdJVd353n9/QBlF\nUcEf/DBja1wa442jYvwVvdr6RExMNF6UJGoGV+JosnCpmJi1fDJ50uAvjI5JdObJGI1DouIa5F6S\nQY0iYpejKILQgAqiTtCIT1Bv/MksRq/yff7Y34LT1XW6ThXQRdHf11q1+tQ5+5z9Pd9zGmr33vsU\nnDQBvhkAACAASURBVC7ph/5+ELAPsLJcqffE9Ghc5bgb+JmZLalRbk753CTNJPW0LC5YR1lvuULS\nTsAXgBmed4Dt/ee9wDdIw9a+Cfy/NcpX5Q3Lze2PVNx7mW0z/ecioK3g8Wo9//wzwOdIf1wQ6Y8G\n/19loU0419eBAyX9lZl9mFvqpAaP3p+sIhqeRUSeiok8FVIqleKv6QVEnoqJPBUTedqovb29Wy4m\nT55ctVyz52BVm7/T4wOomb0r6VDgVOB7wDhgYrWy3Q4kTQAu9HpOM7M1mW0DSB+ePwQeqOMcyrIf\nTstxnAfsARxuZuuVHgCxQ5Xy6zPv17PxOojUy/VSA/F0I2kSqUHbY6heFUWuy6YaALxT7rGpMAu4\nVtJQYDTwKLBzL+X7TM6993e+uXwNP2HjNfyY7j2pPYbB1iDgOTM7rrGIeR3468z7T/k6AMzsj5JW\nAK9K+r/M7PkG6wkhhBBCCFX09RysNqWJ9wDnAo9XbF8AnCBpmKSBwDnAYz7cbqCZ/Q74MTDazNYC\nr0n6GoCkQZJ2zB7MzH5pZoeb2ehs48q3rTezfYFnSL0n1TwKjJM0zOsYmlOu3MDaFXjTG1cn0b1X\no8g3t84GLt2wQ+YJcPVQmrv1ReCyyk05u5wiaTfP35nAvCrHXFGj2l5zZWYfAKsknZ055t/4tv8k\nXYebgfstyS2/ibrlQGnO3MjcwlXuvRrHfQU4TMlfA0cVjcWtBPaUdIzXv50PvyxqFjDe9z0GeNfM\nysMDyzncj9T7G42rTRG9DcVEnoqJPBUSf0UvJvJUTOSpmMhT/fq6gfUCcLGk5aSJ9rf6+vLT7dYA\nVwIloIs0x+Q+0hySkqQu4C4vA+mD5KWSlpIaBdnJ/EW9CAyrtsGH9F1LauR1ATdl480W9Z/TgCM9\nnm8BK6qUqbZ/2dWkB4EsU3oIxVWVBSQd4XOTenM5MIo0n2qx92b1Vu8C0nC3JaRhft2GB3ojo1e9\n5CrrW8AFSg8leQ44I7NtOqkH8F8z687rpXwPSg/3+GqV9cMlvUbKy98rPURjZx+Ctz/wdi+Hzbv3\nqt4DZjaP1Mh6HvgFafggve1Tsf//Ac4GfippCen34Nii52pmfyA1TF8GfkWa55g1FHjFzNZX7htC\nCCGEEDad+vs3w/t8p90rHtMeMiR9BdjPzP6p2bFsTpL+G+mhK1c0O5a+4g/N+B9mdk4vZYxJfRdT\ny4o5M8VEnoqpladJ0N//v4aYC1JU5KmYyFMxkad8kjCzHiOS+noO1tZoJvAbSQ+a2ZebHczWyMwa\nmaO21fMhcv2pcXUT8N+B/7tm4UlbOpoQQj2G793IAI0QQgjN0O97sEII3Umy+HchhBBCCKF3eT1Y\nfT0HK4QQQgghhBC2WdHACiGEBhT5osEQeSoq8lRM5KmYyFMxkadiIk/1iwZWCCGEEEIIIWwmMQcr\nhNBNzMEKIYQQQqgt5mCFEEIIIYQQwhYWDawQQmhAjEkvJvJUTOSpmMhTMZGnYiJPxUSe6hffgxVC\n6EHq0dsdQgghhD40fHgba9a80uwwQgNiDlYIoRtJBvHvQgghhNBcIj6nb91iDlYIIYQQQgghbGFb\ntIElqU3SsznbOiWN3pL155G0j6TFkh7MrFvVjFjySDpR0tQC5eqOW9JlknbI2Xa+pFt8uUPS+BrH\nOl9SR40yH9QbYy3lY/o91lmgfKekFyR1+bXfo0b5XvPv2++rM+ZBkuZ4/eMkHS/pOX9/UN7vSmb/\nmucqaUdJ90taIelZSddlth3o9U2vJ+6Qp9TsAFpEqdkBtIhSswNoEaVmB9AiSs0OoEWUmh1AS4g5\nWPXrix6srbFv80zgYTP7cmbd1hhnkZgaiXsiMLiB/RqNYUvk1nKWe3OOmR1uZqPN7H/XWUcj2yuN\nBszrnwGcB1xnZqOBdQWPV6TMjWb2WeBw4HhJp5IqftHMPgccImm/OmMPIYQQQggF9EUDa3tJd0ta\nLuneaj0nks6RtMxf1/u6AZKm+rqlki7z9ft7L8ASSc80+EFxN+DNinVvZeIZ73V2Sfqtr5sq6WZJ\n8yS9LGmsr99J0iMey1JJZ/j6Nu9FmCpppaRpkk7x/VdK+ryXGyzpDknzJS2SdLqH8RHwXoFzecuP\nMznTO7PajznYezO6PI/jJF0CjAI6Jc31fb/tMc0Hjsscey3pg39v1nk5JO0laaZfmy5Jx5RTmsnt\nFZIWeJkOXzdF0oRMmQ5J388rX+ET4O0CeYL67vcN+ffeqnJuF0naycsMkTTDr/NdmfhXSRrmy0d4\n79mewF3AkX6ci4CvA1dn9/V9Bki6QdLTft4XFj1XM1tnZo/58sfAYuBTFcXeIP0OhE3S3uwAWkR7\nswNoEe3NDqBFtDc7gBbR3uwAWkR7swNoCe3t7c0OofWY2RZ7AW3AeuAYf38H8H1f7iT9RX8k8Cow\njPQBeC5whm97OHOsXfznfOAMXx4E7NBAXJOBiTnbDgZeAIb6+93851Rgui9/FnjJlwcCO/vy7pn1\nbaQP6Qf7+2eAO3z5DGCmL18LnOvLuwIrgR0rYjoCuK3gue0KLCX1XowFfpXZNsR//jFzfiMy+d8O\neAK4pcHr/a/Apb6sTH3v+89TyvH49vuA44HDgFLmOM8De+eV9/cfVKl/JHB/TmydwLOkBseP6zyv\nWcCxvjzY79MTgXe8TgFPAl/I5HdY5to96ssnArMyx50KjM3cL8t8+ULgR5l7fCHQVvRcM2V2A/4D\n2Ldi/Vzg873sZ9CReXUaWLziFa94xSte8erTFxa2Lp2dndbR0bHh5deIyldf9GD9yczm+/LdpA/U\nWUcCnWb2tpmtB6YBJ5A+pO7nvUanAh9I2hkYZWazSGf0kZn9Vz3BSBJwKLA6p8jJwAwze8freDez\n7fe+bgWwV/mQwBRJS4FHgFGSyttWmdlyX37et0P6oL+vL48BrpTURRoMPAjYJxuQmS0ys4sKnuLd\nwE1m1uX1nOI9RMebWXkulNjYq3Q0G/P/MbAp83NOBv7ZY7ZMfWVjPJ7FpIbOZ4ADzGwJsKekEZL+\nBnjbzF7PK59XuZn92cy+mrP5XDM7BPjvwH+X9K06zmse8HPv/Rvq9ynAAq/TgCVsvKab+ozzMcB4\nvyeeJjV+u513jXNF0kDgHuAXZvZKxebVpN+BXkzKvNqLR96vlJodQIsoNTuAFlFqdgAtotTsAFpE\nqdkBtIhSswNoCTEHa6P29nYmTZq04ZWnL74Hy2q8hyofSM3sXUmHAqcC3wPGkeYO9frh1YeaXej1\nnGZmazLbBpAabh8CD9RxDmUfVon5PGAP4HAzW6/00IkdqpRfn3m/no25F3CWmb3UQDzdSJpEatDe\nCWBmLyk9SOQ04BpJj5jZNdV23dS6XbVrW1nPFDO7vcq2GaRrPIKNjbzeyteqq3thsz/7z/+UdA9w\nFKkxWmTfn0q6H/gKME/SGN+Uvb6fsPGafszG4YhVHyZSg4BLzGxOA/uW3QasNLN/rLLtV8BsSUeZ\n2Xc3oY4QQgghhFChL3qw2iQd7cvnAo9XbF8AnCBpmP/V/RzgMUm7AwPN7HfAj4HRZrYWeE3S12DD\nU9l2zB7MzH5pGx9ksKZi23oz25c0XO8bOfE+CozLzKEZmlOu3CjZFXjTG1cnkYZ6VZbpzWzg0g07\nSIcV2KdnMGnu1heByzLrRgLrzOwe4EbSsEuA94FdfPlpUv6HStqe1MipdvyLs/OkcswFJnj5AZKG\nlHf3n7OB75TnMEka5XOTAO4FvgmcRWps5ZXfo+KYNUka6PcTfo5fBZ7z92cq86S9nP0/bWbPm9kN\npOF6B9WochVpaCB+PvWaDUyQtJ3Xf0DlfV4j3mtIQ2ovzylyBXBBNK42VXuzA2gR7c0OoEW0NzuA\nFtHe7ABaRHuzA2gR7c0OoCXEHKz69UUD6wXgYknLSXNCbvX1BuCNoCtJ/bRdwEIzu480B6fkw6Tu\n8jIA44FLfUjePGB4AzG9SBp21YMP6buW1MjrAm7Kxpst6j+nkR5csBT4FrCiSplq+5ddTXoQyDKl\nx3RfVVnAH5RwWy/nA3A56eEVC/0hCpOAQ4AFfh4/Acq9V7cDD0ma6/mfTJrb9jiwvMeRk4OAv9SI\nYSJwkqRlpEbswb6+fK3nkIatPeVlZgA7+7blwBBgtZm90Uv5IdljZkka6T1Nlf6K1GOzhDTUcLXn\nAGB/aj9MZKLSI8+XkubVPVilTDaeq4BbJC0g9Wblybsnfk26Dov9nriVit7mvHOVtDfwI+DgzIM5\nvlNRbCjwci9xhRBCCCGEBilNH+lfJP0Q2N3MrqxZOAAgaRbpgQy9NRhajqQ7gcvNrFbjcZvgcxCX\nAWeb2cqcMlbnCMx+qkT89bOIEpGnIkpEnoooEXkqokTkqYgSW3eexNbwOb1UKkUvVg5JmFmPUVV9\n0YO1NZoJHKfMFw2H3pnZGdta4wrAzMb3o8bVgaRe4i5SL24IIYQQQtjM+mUPVgghX+rBCiGEEEIz\nDR/expo1rzQ7jNCLvB6svniKYAihxcQfXkIIIYQQGtNfhwiGEMImie8FKSbyVEzkqZjIUzGRp2Ii\nT8VEnuoXDawQQgghhBBC2ExiDlYIoRtJFv8uhBBCCCH0Lp4iGEIIIYQQQghbWDSwQgihATEmvZjI\nUzGRp2IiT8VEnoqJPBUTeapfNLBCCCGEEEIIYTOJOVghhG7ie7BCCCFUM3zv4axZvabZYYSw1cib\ngxUNrBBCN5KMSc2OIoQQwlZnUnxPYghZ8ZCLEELYnFY1O4AWEXkqJvJUTOSpmMhTITG3qJjIU/22\naANLUpukZ3O2dUoavSXrzyNpH0mLJT2YWbdV/XMk6URJUwuUqztuSZdJ2iFn2/mSbvHlDknjaxzr\nfEkdNcp8UG+MtZSP6fdYZ4HynZJekNTl136PGuV7zb9vv6/OmAdJmuP1j5N0vKTn/P1Beb8rmf2L\nnutoScskvSjpF5n1B3p90+uJO4QQQgghFNcXPVhbY1/ymcDDZvblzLqtMc4iMTUS90RgcAP7NRrD\nlsit5Sz35hwzO9zMRpvZ/66zjka2VxoNmNc/AzgPuM7MRgPrCh6vSJl/Bi4wswOBAyWdSqr4RTP7\nHHCIpP3qjD1UigwWE3kqJvJUTOSpmMhTIe3t7c0OoSVEnurXFw2s7SXdLWm5pHur9ZxIOsf/4r5M\n0vW+boCkqb5uqaTLfP3+3guwRNIzDX5Q3A14s2LdW5l4xnudXZJ+6+umSrpZ0jxJL0sa6+t3kvSI\nx7JU0hm+vk3SCt9vpaRpkk7x/VdK+ryXGyzpDknzJS2SdLqH8RHwXoFzecuPMznTO7PajzlY0v2+\nfpn3mlwCjAI6Jc31fb/tMc0Hjsscey3pg39v1nk5JO0laaZfmy5Jx5RTmsntFZIWeJkOXzdF0oRM\nmQ5J388rX+ET4O0CeYL67vcN+ffeqnJuF0naycsMkTTDr/NdmfhXSRrmy0d479mewF3AkX6ci4Cv\nA1dn9/V9Bki6QdLTft4XFj1XSSOAIWa20FfdSfqDQtYbpN+BEEIIIYSwmW3XB3V8Bvi2mc2XdAcw\nAfhZeaOkkcD1wOHAu8Acb6SsBvY2s7/xcrv4LtNIf/WfJWkQjTUSBwLrsyvM7Giv52DgR8CxZvaO\npOwH0RFmdpykzwKzgJnAfwFnmtlaSbsD830bwP7AWWa2XNIzwDd9/zO8jrHA3wNzzewCSbsCCyQ9\nYmZPAU95TEcA3zWziypPpBy3mXUAHX6Mfwf+CfgS8LqZfdWPM8TMPpB0OdDu5zcCmETK//tACVjs\nx7ypViLN7N7M21uAkpmNlSRg53Ixr/8U4AAzO8q3z5J0PDAd+AXwSy//dWBMXnkzewJvtJnZauBs\nP/5I4Pby+VbxG0n/B5hpZtfUOK8N+Qd+AEwws6ckDSZdc4DDgIOBNcA8SV8wsyfp2ctkZvaWpL8D\nfmBm5Ub4scB9ZjZTUlum/AXAu2Z2tN/j8yQ9bGavFjjXvUm/O2WrfV3WetLvQL7sQMR9ib+GVrOK\nyEsRkadiIk/FRJ6KiTwVUiqVonemgMjTRqVSqdCctL5oYP3JzOb78t3AJWQaWMCRQKeZvQ0gaRpw\nAnANsJ+km4E/AA9L2hkYZWazAMzso3qD8Q/qh3os1ZwMzDCzd7yOdzPbfu/rVkjaq3xIYIqkE0gf\nXEdltq0ys+W+/DzwiC8/S/rYCjAGOF3SD/39IGAfYGW5UjNbBPRoXOW4G7jJzLokrQX+QdIU4AFv\nmJRjLvcqHU33/E8HDihYV6WTgb/1mA2onHs1BjhF0mKvfydSA2qqpD29sbcX8LaZvS5pYrXywBNU\nYWZ/BvIaV+ea2Z+992mmpG+ZWd49UGke8HO/N2d6bAALvE4kLSFd0yfJ9Ng1aAxpGN84f78L6bxf\nLReoca61rCb9DjyTW+KkBo8cQgghhLCNam9v79bYnDx5ctVyfdHA6vHX/CplenwgNbN3JR0KnAp8\nDxhHmjvU64dXH2p2oddzmpmtyWwbAPwR+BB4oI5zKPuwSsznAXsAh5vZeqWHTuxQpfz6zPv1bMy9\nSL1cLzUQTzeSJpEatHcCmNlLSg8SOQ24xnvGqvXcbGqDoKzW/CABU8zs9irbZpCu8QhSj1at8nXN\nfyo3hMzsPyXdAxxFfiO7ct+fSrof+AqpN2mMb8pe30/YeE0/ZmPPatWHidQg4BIzm9PAvq8Df515\n/ylfl/UrYLako8zsuw3UESD+OlxU5KmYyFMxkadiIk+FRK9MMZGn+vXFHKw2SUf78rnA4xXbFwAn\nSBomaSBwDvCYD7cbaGa/A34MjDaztcBrkr4GG57KtmP2YGb2y8yDDNZUbFtvZvuS/nL/jZx4HwXG\nZebQDM0pV26U7Aq86Y2rk4C2KmV6Mxu4dMMO0mEF9ukZTJq79UXgssy6kcA6M7sHuJH0kAVIQwHL\nQy6fJuV/qKTtSY2case/ODtPKsdc0hDQ8jyiIeXd/eds4DvlOUySRvncJIB7gW8CZ5EaW3nl96g4\nZk2SBvr9hJ/jV4Hn/P2Zkq6rsf+nzex5M7sBWAgcVKPKVcARvnxW0TgzZgMTJG3n9R9QeZ/n8Xv+\nPUnlYZXjgX+rKHYF6SEY0bgKIYQQQtjM+qKB9QJwsaTlpIn1t/p6gw0fCK8kzf3pAhaa2X2keSMl\nSV2khwNc6fuNBy6VtJQ0dGt4AzG9CAyrtsGH9F1LauR1AeV5SHk9cdNIDy5YCnwLWFGlTLX9y64m\nPQhkmdJjuq+qLKD0oITbejkfgMtJD69YqPQQhUnAIaQ5XV3AT0jDLgFuBx6SNNfzP5k0d+xxYHmP\nIycHAX+pEcNE4CRJy0iN2IN9fflazwHuAZ7yMjPweVqe9yHAajN7o5fyQ7LHzJI00nuaKv0Vqcdm\nCWl+2WrPAaR5crUeJjJR0rN+jT8CHqxSJhvPVcAtkhaQerPy5N0TvyZdh8V+T9xKRW9zL+cKcDFw\nB+k+f8nMHqrYPhR4uZe4QhFb1Rc7bMUiT8VEnoqJPBUTeSokvt+pmMhT/dQfv5Hb5zvtbmZX1iwc\nAJA0CxhrZr01GFqOpDuBy82sVuNxm+C9WsuAs81sZU4ZY1KfhtWaYhJ5MZGnYiJPxUSeitlSeZoE\n29Lnxnh4QzGRp3ySMLMeo6r6awNrf+A3wNqK78IKYZsl6UDSUMxlwPmW88svqf/9oxBCCKGm4XsP\nZ83qNbULhtBPRAMrhFCIpLy2VwghhBBCcHkNrL6YgxVCCNucGJNeTOSpmMhTMZGnYiJPxUSeiok8\n1S8aWCGEEEIIIYSwmcQQwRBCNzFEMIQQQgihthgiGEIIIYQQQghbWDSwQgihATEmvZjIUzGRp2Ii\nT8VEnoqJPBUTeapfNLBCCCGEEEIIYTOJOVghhG7ie7BCCCH0d8OHt7FmzSvNDiNs5eJ7sEIIhaQG\nVvy7EEIIoT8T8Rk51BIPuQghhM2q1OwAWkSp2QG0iFKzA2gRpWYH0CJKzQ6gRZSaHUBLiDlY9dui\nDSxJbZKezdnWKWn0lqw/j6R9JC2W9GBm3apmxJJH0omSphYoV3fcki6TtEPOtvMl3eLLHZLG1zjW\n+ZI6apT5oN4Yaykf0++xzgLlH5TUJek5Sb+WtF2N8r3m37ffV2fMgyTN8XtvnKTjPZ7Fkg7K+13J\n7F/zXCXtKOl+SSskPSvpusy2A72+6fXEHUIIIYQQiuuLHqytsX/1TOBhM/tyZt3WGGeRmBqJeyIw\nuIH9Go1hS+TWcpbzjDOzw83sc8BuwDfqrKOR7ZVGA2Zmo81sBnAecJ2ZjQbWFTxekTI3mtlngcOB\n4yWdSqr4RT//QyTtV2fsoYf2ZgfQItqbHUCLaG92AC2ivdkBtIj2ZgfQItqbHUBLaG9vb3YILacv\nGljbS7pb0nJJ91brOZF0jqRl/rre1w2QNNXXLZV0ma/f33sBlkh6psEPirsBb1aseysTz3ivs0vS\nb33dVEk3S5on6WVJY339TpIe8ViWSjrD17d5L8JUSSslTZN0iu+/UtLnvdxgSXdImi9pkaTTPYyP\ngPcKnMtbfpzJHu9iSav9mIO9N6PL8zhO0iXAKKBT0lzf99se03zguMyx15I++PdmnZdD0l6SZvq1\n6ZJ0TDmlmdxeIWmBl+nwdVMkTciU6ZD0/bzyFT4B3q6VJDMrx7g9MAj4S41dNuTfe6vKuV0kaScv\nM0TSDL/Od2XiXyVpmC8fodRbuydwF3CkH+ci4OvA1dl9fZ8Bkm6Q9LSf94VFz9XM1pnZY778MbAY\n+FRFsTdIvwMhhBBCCGFzM7Mt9gLagPXAMf7+DuD7vtxJ+ov+SOBVYBipwTcXOMO3PZw51i7+cz5w\nhi8PAnZoIK7JwMScbQcDLwBD/f1u/nMqMN2XPwu85MsDgZ19effM+jbSh/SD/f0zwB2+fAYw05ev\nBc715V2BlcCOFTEdAdxW8Nx2BZaSei/GAr/KbBviP/+YOb8RmfxvBzwB3NLg9f5X4FJfVqa+9/3n\nKeV4fPt9wPHAYUApc5zngb3zyvv7D6rUPxK4v5f4HiI1rKbXeV6zgGN9ebDfpycC73idAp4EvpDJ\n77DMtXvUl08EZmWOOxUYm7lflvnyhcCPMvf4QqCtnnMt37vAfwD7VqyfC3y+l/0MOjKvTgOLV49X\n5CXyFHmKPG2tr8jTpucJC0lnZ2ezQ9hqdHZ2WkdHx4aX3ydUvnqdh7KZ/MnM5vvy3cAlwM8y248E\nOs3sbQBJ04ATgGuA/STdDPwBeFjSzsAoM5tFOqOP6g1GkoBDPZZqTgZmmNk7Xse7mW2/93UrJO1V\nPiQwRdIJpMbkqMy2VWa23JefBx7x5WeBfX15DHC6pB/6+0HAPqSGFl7fIuCigqd4N3CTmXVJWgv8\ng6QpwANm9kQm5nKv0tF0z/904ICCdVU6Gfhbj9mAyrlXY4BTJC32+ncCDjCzqZL2lDQC2At428xe\nlzSxWnlSI7AHM/sz8NW84MzsS5IGAfdKGm9mdxY8r3nAz/3enOmxASzwOpG0hHRNnyTTY9egMaRh\nfOP8/S6k8341cy69nqukgcA9wC/M7JWKzatJvwPP5Icwqf6oQwghhBC2Ye3t7d2GTE6ePLlqub5o\nYFmN91DlA6mZvSvpUOBU4HvAONLcoV4/vPpQswu9ntPMbE1m2wBS78KHwAN1nEPZh1ViPg/YAzjc\nzNYrPXRihyrl12fer2dj7gWcZWYvNRBPN5ImkRq0dwKY2UtKDxI5DbhG0iNmdk21XTe1blft2lbW\nM8XMbq+ybQbpGo8AphcoX6uu6gGafSTpfwFHAYUaWGb2U0n3A18B5kka45uy1/cTNl7Tj9k4/Lbq\nw0RqEHCJmc1pYN+y24CVZvaPVbb9Cpgt6Sgz++4m1NHPtTc7gBbR3uwAWkR7swNoEe3NDqBFtDc7\ngBbR3uwAWkLMwapfX8zBapN0tC+fCzxesX0BcIKkYf5X93OAxyTtDgw0s98BPwZGW5pH85qkr8GG\np7LtmD2Ymf3S0sMMRmcbV75tvZntS/rLfd5DDh4FxmXm0AzNKVdulOwKvOmNq5NIQ70qy/RmNnDp\nhh2kwwrs0zOYNHfri8BlmXUjgXVmdg9wI2nYJcD7pF4RgKdJ+R/q85PGUYWki7PzpHLMBSZ4+QGS\nhpR395+zge+U5zBJGuVzkwDuBb4JnEVqbOWV36PimDUpzZMb4cvbkRpKS/z9mco8aS9n/0+b2fNm\ndgNpuN5BNapcRRoaiJ9PvWYDEzxWJB1QeZ/XiPca0pDay3OKXAFcEI2rEEIIIYTNry8aWC8AF0ta\nTpoTcquvNwBvBF1J+jKCLmChmd1HmoNTktRFejjAlb7feOBSSUtJQ7eGNxDTi6Q5Rz34kL5rSY28\nLuCmbLzZov5zGunBBUuBbwErqpSptn/Z1aQHgSxTekz3VZUF/EEJt/VyPgCXkx5esdAfojAJOARY\n4OfxE9KwS4DbgYckzfX8TybNbXscWN7jyMlB1H4wxETgJEnLSI3Yg319+VrPIQ1be8rLzAB29m3L\ngSHAajN7o5fyQ7LHzJI00nuaKu0EzPJhfIuA14B/8W37U/thIhOVHnm+lDSv7sEqZbLxXAXcImkB\nqTcrT9498WvSdVjs98StVPQ2552rpL2BHwEHZx7M8Z2KYkOBl3uJKxRSanYALaLU7ABaRKnZAbSI\nUrMDaBGlZgfQIkrNDqAlxPdg1U9pqkz/4vOddjezK2sWDgBImkV6IENvDYaWI+lO4HIzq9V43Cb4\nHMRlwNlmtjKnjDU4ArOfKRHDS4ooEXkqokTkqYgSkaciSkSeiiiRnyfRHz8jV1MqlWKYYA5JmFmP\nUVX9tYG1P/AbYK11/y6sELZZkg4kDcVcBpxvOb/8qYEVQggh9F/Dh7exZs0rzQ4jbOWigRVCKERS\nXtsrhBBCCCG4vAZWX8zBCiGEbU6MSS8m8lRM5KmYyFMxkadiIk/FRJ7qFw2sEEIIIYQQQthMbNT0\nHgAAIABJREFUYohgCKGbGCIYQgghhFBbDBEMIYQQQgghhC0sGlghhNCAGJNeTOSpmMhTMZGnYiJP\nxUSeiok81S8aWCGEEEIIIYSwmcQcrBBCN/E9WCGE0DeG7z2cNavXNDuMEEKD4nuwQgiFSDImNTuK\nEELoByZBfA4LoXXFQy5CCGFzWtXsAFpE5KmYyFMxkadCYs5MMZGnYiJP9duiDSxJbZKezdnWKWn0\nlqw/j6R9JC2W9GBm3Vb1z7akEyVNLVCu7rglXSZph5xt50u6xZc7JI2vcazzJXXUKPNBvTHWUj6m\n32OdBco/KKlL0nOSfi1puxrle82/b7+vzpgHSZrj9944Scd7PIslHZT3u5LZv+i5jpa0TNKLkn6R\nWX+g1ze9nrhDCCGEEEJxfdGDtTX2fZ8JPGxmX86s2xrjLBJTI3FPBAY3sF+jMWyJ3FrOcp5xZna4\nmX0O2A34Rp11NLK90mjAzGy0mc0AzgOuM7PRwLqCxytS5p+BC8zsQOBASaeSKn7Rz/8QSfvVGXuo\nFBksJvJUTOSpmMhTIe3t7c0OoSVEnoqJPNWvLxpY20u6W9JySfdW6zmRdI7/xX2ZpOt93QBJU33d\nUkmX+fr9vRdgiaRnGvyguBvwZsW6tzLxjPc6uyT91tdNlXSzpHmSXpY01tfvJOkRj2WppDN8fZuk\nFb7fSknTJJ3i+6+U9HkvN1jSHZLmS1ok6XQP4yPgvQLn8pYfZ7LHu1jSaj/mYEn3+/pl3mtyCTAK\n6JQ01/f9tsc0Hzguc+y1pA/+vVnn5ZC0l6SZfm26JB1TTmkmt1dIWuBlOnzdFEkTMmU6JH0/r3yF\nT4C3ayXJzMoxbg8MAv5SY5cN+ffeqnJuF0naycsMkTTDr/NdmfhXSRrmy0co9dbuCdwFHOnHuQj4\nOnB1dl/fZ4CkGyQ97ed9YdFzlTQCGGJmC33VnaQ/KGS9QfodCCGEEEIIm1mvw6Q2k88A3zaz+ZLu\nACYAPytvlDQSuB44HHgXmOONlNXA3mb2N15uF99lGumv/rMkDaKxRuJAYH12hZkd7fUcDPwIONbM\n3pGU/SA6wsyOk/RZYBYwE/gv4EwzWytpd2C+bwPYHzjLzJZLegb4pu9/htcxFvh7YK6ZXSBpV2CB\npEfM7CngKY/pCOC7ZnZR5YmU4zazDqDDj/HvwD8BXwJeN7Ov+nGGmNkHki4H2v38RgCTSPl/HygB\ni/2YN9VKpJndm3l7C1Ays7GSBOxcLub1nwIcYGZH+fZZko4HpgO/AH7p5b8OjMkrb2ZP4I02M1sN\nnO3HHwncXj7fSpIeAo4EHjGzh2qc14b8Az8AJpjZU5IGk645wGHAwcAaYJ6kL5jZk/TsZTIze0vS\n3wE/MLNyI/xY4D4zmympLVP+AuBdMzva7/F5kh42s1cLnOvepN+dstW+Lms96XcgX3Yg4r7EX42r\nWUXkpYjIUzGRp2IiT4WUSqXodSgg8lRM5GmjUqlUaE5aXzSw/mRm8335buASMg0s0gfeTjN7G0DS\nNOAE4BpgP0k3A38AHpa0MzDKzGYBmNlH9QbjH9QP9ViqORmYYWbveB3vZrb93tetkLRX+ZDAFEkn\nkD64jspsW2Vmy335eeARX36W9LEVYAxwuqQf+vtBwD7AynKlZrYI6NG4ynE3cJOZdUlaC/yDpCnA\nA94wKcdc7lU6mu75nw4cULCuSicDf+sxG1A592oMcIqkxV7/TqQG1FRJe3pjby/gbTN7XdLEauWB\nJ6jCzP4MVG1c+fYveYPlXknjzezOguc1D/i535szPTaABV4nkpaQrumTZHrsGjSGNIxvnL/fhXTe\nr2bOpddzrWE16XfgmdwSJzV45BBCCCGEbVR7e3u3xubkyZOrluuLBlaPv+ZXKdPjA6mZvSvpUOBU\n4HvAONLcoV4/vPpQswu9ntPMbE1m2wDgj8CHwAN1nEPZh1ViPg/YAzjczNYrPXRihyrl12fer2dj\n7kXq5XqpgXi6kTSJ1KC9E8DMXlJ6kMhpwDXeM3ZNtV03tW5Xa36QgClmdnuVbTNI13gEqUerVvmG\n5nWZ2UeS/hdwFGn4XJF9firpfuArpN6kMb4pe30/YeM1/ZiNPatVHyZSg4BLzGxOA/u+Dvx15v2n\nfF3Wr4DZko4ys+82UEeA+Ct6UZGnYiJPxUSeConehmIiT8VEnurXF3Ow2iQd7cvnAo9XbF8AnCBp\nmKSBwDnAYz7cbqCZ/Q74MTDa59G8JulrsOGpbDtmD2Zmv/SHGYzONq5823oz25f0l/u8hxw8CozL\nzKEZmlOu3CjZFXjTG1cnAW1VyvRmNnDphh2kwwrs0zOYNHfri8BlmXUjgXVmdg9wI+khC5CGApaH\nXD5Nyv9Qn580jiokXZydJ5VjLmkIaHke0ZDy7v5zNvCd8hwmSaN8bhLAvcA3gbNIja288ntUHLMm\npXlyI3x5O1JDaYm/P1PSdTX2/7SZPW9mNwALgYNqVLkKOMKXzyoaZ8ZsYILHiqQDKu/zPH7Pvyep\nPKxyPPBvFcWuID0EIxpXIYQQQgibWV80sF4ALpa0nDSx/lZfb7DhA+GVpLk/XcBCM7uPNG+kJKmL\n9HCAK32/8cClkpaShm4NbyCmF4Fh1Tb4kL5rSY28LqA8DymvJ24a6cEFS4FvASuqlKm2f9nVpAeB\nLFN6TPdVlQWUHpRwWy/nA3A56eEVC5UeojAJOIQ0p6sL+Alp2CXA7cBDkuZ6/ieT5o49DizvceTk\nIGo/GGIicJKkZaRG7MG+vnyt5wD3AE95mRn4PC3P+xBgtZm90Uv5IdljZkka6T1NlXYizd9aAiwC\nXgP+xbftT+2HiUyU9Kxf44+AB6uUycZzFXCLpAWk3qw8effEr0nXYbHfE7dS0dvcy7kCXAzcQbrP\nX6oy32wo8HIvcYUitqovdtiKRZ6KiTwVE3kqJL63qJjIUzGRp/qpP36DuM932t3MrqxZOAAgaRYw\n1sx6azC0HEl3ApebWa3G4zbBe7WWAWeb2cqcMsakPg2rNcVk+2IiT8VEnorZ1vI0CbbE57B4KEEx\nkadiIk/5JGFmPUZV9dcG1v7Ab4C1Fd+FFcI2S9KBpKGYy4DzLeeXX1L/+0chhBCaYPjew1mzek3t\ngiGErVI0sEIIhUjKa3uFEEIIIQSX18DqizlYIYSwzYkx6cVEnoqJPBUTeSom8lRM5KmYyFP9ooEV\nQgghhBBCCJtJDBEMIXQTQwRDCCGEEGqLIYIhhBBCCCGEsIVFAyuEEBoQY9KLiTwVE3kqJvJUTOSp\nmMhTMZGn+kUDK4QQQgghhBA2k5iDFULoJr4HK4QQQn8zfHgba9a80uwwQouJ78EKIRSSGljx70II\nIYT+RMRn4lCveMhFCCFsVqVmB9AiSs0OoEWUmh1Aiyg1O4AWUWp2AC2i1OwAWkLMwarfFm1gSWqT\n9GzOtk5Jo7dk/Xkk7SNpsaQHM+tWNSOWPJJOlDS1QLm645Z0maQdcradL+kWX+6QNL7Gsc6X1FGj\nzAf1xlhL+Zh+j3UWKH+NpD9Jer/g8XvNv2+/r3jEIGmQpDl+742TdLyk5/z9QXm/K5n9a56rpB0l\n3S9phaRnJV2X2Xag1ze9nrhDCCGEEEJxfdGDtTX2t54JPGxmX86s2xrjLBJTI3FPBAY3sF+jMWyJ\n3FrOcp5ZwJGbUEcj2yuNBszMRpvZDOA84DozGw2sK3i8ImVuNLPPAocDx0s6lVTxi2b2OeAQSfvV\nGXvoob3ZAbSI9mYH0CLamx1Ai2hvdgAtor3ZAbSI9mYH0BLa29ubHULL6YsG1vaS7pa0XNK91XpO\nJJ0jaZm/rvd1AyRN9XVLJV3m6/f3XoAlkp5p8IPibsCbFeveysQz3uvskvRbXzdV0s2S5kl6WdJY\nX7+TpEc8lqWSzvD1bd6LMFXSSknTJJ3i+6+U9HkvN1jSHZLmS1ok6XQP4yPgvQLn8pYfZ7LHu1jS\naj/mYO/N6PI8jpN0CTAK6JQ01/f9tsc0Hzguc+y1pA/+vVnn5ZC0l6SZfm26JB1TTmkmt1dIWuBl\nOnzdFEkTMmU6JH0/r3yFT4C3ayXJzBaY2Ru1ymVsyL/3VpVzu0jSTl5miKQZfp3vysS/StIwXz5C\nqbd2T+Au4Eg/zkXA14Grs/v6PgMk3SDpaT/vC4ueq5mtM7PHfPljYDHwqYpib5B+B0IIIYQQwuZm\nZlvsBbQB64Fj/P0dwPd9uZP0F/2RwKvAMFKDby5whm97OHOsXfznfOAMXx4E7NBAXJOBiTnbDgZe\nAIb6+93851Rgui9/FnjJlwcCO/vy7pn1baQP6Qf7+2eAO3z5DGCmL18LnOvLuwIrgR0rYjoCuK3g\nue0KLCX1XowFfpXZNsR//jFzfiMy+d8OeAK4pcHr/a/Apb6sTH3v+89TyvH49vuA44HDgFLmOM8D\ne+eV9/cfVKl/JHB/jRjfb+C8ZgHH+vJgv09PBN7xOgU8CXwhk99hmWv3qC+fCMzKHHcqMDZzvyzz\n5QuBH2Xu8YVAWwPnuhvwH8C+FevnAp/vZT+Djsyr08Di1eMVeYk8RZ4iT1vrK/JUf56wUF1nZ2ez\nQ9hqdHZ2WkdHx4aX3zdUvrZjy/uTmc335buBS4CfZbYfCXSa2dsAkqYBJwDXAPtJuhn4A/CwpJ2B\nUWY2i3RGH9UbjCQBh3os1ZwMzDCzd7yOdzPbfu/rVkjaq3xIYIqkE0iNyVGZbavMbLkvPw884svP\nAvv68hjgdEk/9PeDgH1IDS28vkXARQVP8W7gJjPrkrQW+AdJU4AHzOyJTMzlXqWj6Z7/6cABBeuq\ndDLwtx6zAZVzr8YAp0ha7PXvBBxgZlMl7SlpBLAX8LaZvS5pYrXypEZgD2b2Z+CrDcbem3nAz/3e\nnOmxASzwOpG0hHRNnyTTY9egMaRhfOP8/S6k8361XKDWuUoaCNwD/MLMXqnYvJr0O/BMfgiT6o86\nhBBCCGEb1t7e3m3I5OTJk6uW64sGltV4D1U+kJrZu5IOBU4FvgeMI80d6vXDqw81u9DrOc3M1mS2\nDSD1LnwIPFDHOZR9WCXm84A9gMPNbL3SQyd2qFJ+feb9ejbmXsBZZvZSA/F0I2kSqUF7J4CZvaT0\nIJHTgGskPWJm11TbdVPrdtWubWU9U8zs9irbZpCu8QhgeoHyterabMzsp5LuB74CzJM0xjdlr+8n\nbLymH7Nx+G3Vh4nUIOASM5vTSLzuNmClmf1jlW2/AmZLOsrMvrsJdfRz7c0OoEW0NzuAFtHe7ABa\nRHuzA2gR7c0OoEW0NzuAlhBzsOrXF3Ow2iQd7cvnAo9XbF8AnCBpmP/V/RzgMUm7AwPN7HfAj4HR\nZrYWeE3S12DDU9l2zB7MzH5pZodbepDAmopt681sX9Jf7r+RE++jwLjMHJqhOeXKjZJdgTe9cXUS\naahXZZnezAYu3bCDdFiBfXoGk+ZufRG4LLNuJLDOzO4BbiQNuwR4n9QrAvA0Kf9DJW1PauRUO/7F\n2XlSOeYCE7z8AElDyrv7z9nAd8pzmCSN8rlJAPcC3wTOIjW28srvUXHMenXbT9KZyjxpr+oO0qfN\n7Hkzu4E0XO+gGnWsIg0NhHQ+9ZoNTJC0ndd/QOV9XiPea0hDai/PKXIFcEE0rkIIIYQQNr++aGC9\nAFwsaTlpTsitvt4AvBF0JenLCLqAhWZ2H2kOTklSF+nhAFf6fuOBSyUtJQ3dGt5ATC+S5hz14EP6\nriU18rqAm7LxZov6z2mkBxcsBb4FrKhSptr+ZVeTHgSyTOkx3VdVFvAHJdzWy/kAXE56eMVCf4jC\nJOAQYIGfx09Iwy4BbgcekjTX8z+ZNLftcWB5jyMnBwF/qRHDROAkSctIjdiDfX35Ws8hDVt7ysvM\nAHb2bcuBIcBq84dR5JQfkj1mlqSR3tPUg6SfSnoN2FHpce0/8U37U/thIhOVHnm+lDSv7sEqZbLx\nXAXcImkBqTcrT9498WvSdVjs98StVPQ2552rpL2BHwEHZx7M8Z2KYkOBl3uJKxRSanYALaLU7ABa\nRKnZAbSIUrMDaBGlZgfQIkrNDqAlxPdg1U9pqkz/4vOddjezK2sWDgBImkV6IENvDYaWI+lO4HIz\nq9V43Cb4HMRlwNlmtjKnjPXhCMwWViKGlxRRIvJURInIUxElIk9FlIg8FVFiY55Ef/xMXESpVIph\ngjkkYWY9RlX11wbW/sBvgLXW/buwQthmSTqQNBRzGXC+5fzyRwMrhBBC/xMNrFC/aGCFEApJDawQ\nQgih/xg+vI01a15pdhihxeQ1sPriKYIhhBYTf3ipLYZMFBN5KibyVEzkqZjIUzGRp2IiT/Xri4dc\nhBBCCCGEEEK/EEMEQwjdSMqbnhVCCCGEEFzeEMHowQohhBBCCCGEzSQaWCGE0ID4XpBiIk/FRJ6K\niTwVE3kqJvJUTOSpftHACiGEEEIIIYTNJOZghRC6iTlYIYQQQgi1xWPaQwiFST3+rQghhH5l+N7D\nWbN6TbPDCCG0oOjBCiF0I8mY1OwoWsAqYL9mB9ECIk/FRJ6K6cs8TWrd7wSM7y0qJvJUTOQpXzxF\nMIQQQgghhBC2sC3awJLUJunZnG2dkkZvyfrzSNpH0mJJD2bWrWpGLHkknShpaoFydcct6TJJO+Rs\nO1/SLb7cIWl8jWOdL6mjRpkP6o2xlvIx/R7rLFD+Gkl/kvR+weP3mn/ffl/xiEHSIElz/N4bJ+l4\nSc/5+4Pyflcy+xc919GSlkl6UdIvMusP9Pqm1xN3yBG9DcVEnoqJPBUTeSokehuKiTwVE3mqX1/0\nYG2N/etnAg+b2Zcz67bGOIvE1EjcE4HBDezXaAxbIreWs5xnFnDkJtTRyPZKowEzs9FmNgM4D7jO\nzEYD6woer0iZfwYuMLMDgQMlnUqq+EUz+xxwiKT4mBJCCCGEsAX0RQNre0l3S1ou6d5qPSeSzvG/\nuC+TdL2vGyBpqq9bKukyX7+/9wIskfRMgx8UdwPerFj3Viae8V5nl6Tf+rqpkm6WNE/Sy5LG+vqd\nJD3isSyVdIavb5O0wvdbKWmapFN8/5WSPu/lBku6Q9J8SYskne5hfAS8V+Bc3vLjTPZ4F0ta7ccc\nLOl+X7/Me00uAUYBnZLm+r7f9pjmA8dljr2W9MG/N+u8HJL2kjTTr02XpGPKKc3k9gpJC7xMh6+b\nImlCpkyHpO/nla/wCfB2rSSZ2QIze6NWuYwN+ffeqnJuF0naycsMkTTDr/NdmfhXSRrmy0co9dbu\nCdwFHOnHuQj4OnB1dl/fZ4CkGyQ97ed9YdFzlTQCGGJmC33VnaQ/KGS9QfodCJtiq+rz3opFnoqJ\nPBUTeSokvreomMhTMZGn+vXFUwQ/A3zbzOZLugOYAPysvFHSSOB64HDgXWCON1JWA3ub2d94uV18\nl2mkv/rPkjSIxhqJA4H12RVmdrTXczDwI+BYM3tHUvaD6AgzO07SZ0k9IjOB/wLONLO1knYH5vs2\ngP2Bs8xsuaRngG/6/md4HWOBvwfmmtkFknYFFkh6xMyeAp7ymI4AvmtmF1WeSDluM+sAOvwY/w78\nE/Al4HUz+6ofZ4iZfSDpcqDdz28EMImU//eBErDYj3lTrUSa2b2Zt7cAJTMbK0nAzuViXv8pwAFm\ndpRvnyXpeGA68Avgl17+68CYvPJm9gTeaDOz1cDZfvyRwO3l890U2fwDPwAmmNlTkgaTrjnAYcDB\nwBpgnqQvmNmT9OxlMjN7S9LfAT8ws3Ij/FjgPjObKaktU/4C4F0zO9rv8XmSHjazVwuc696k352y\n1b4uaz3pdyBfdiDivsSwnBBCCCH0e6VSqVCDsy8aWH8ys/m+fDdwCZkGFmnYVqeZvQ0gaRpwAnAN\nsJ+km4E/AA9L2hkYZWazAMzso3qD8Q/qh3os1ZwMzDCzd7yOdzPbfu/rVkjaq3xIYIqkE0gfXEdl\ntq0ys+W+/DzwiC8/S/rYCjAGOF3SD/39IGAfYGW5UjNbBPRoXOW4G7jJzLokrQX+QdIU4AFvmJRj\nLvcqHU33/E8HDihYV6WTgb/1mA2onHs1BjhF0mKvfydSA2qqpD29sbcX8LaZvS5pYrXywBNUYWZ/\nBja5cVXFPODnfm/O9NgAFnidSFpCuqZPkumxa9AY0jC+cf5+F9J5v1ousInnupr0O/BMbomTGjxy\nfxKNzmIiT8VEnoqJPBUSc2aKiTwVE3naqL29vVs+Jk+eXLVcXzSwevw1v0qZHh9IzexdSYcCpwLf\nA8aR5g71+uHVh5pd6PWcZmZrMtsGAH8EPgQeqOMcyj6sEvN5wB7A4Wa2XumhEztUKb8+8349G3Mv\nUi/XSw3E042kSaQG7Z0AZvaS0oNETgOu8Z6xa6rtuql1u1rzgwRMMbPbq2ybQbrGI0g9WrXK99mc\nOTP7qaT7ga+QepPG+Kbs9f2Ejdf0Yzb2rFZ9mEgNAi4xszkN7Ps68NeZ95/ydVm/AmZLOsrMvttA\nHSGEEEIIIUdfzMFqk3S0L58LPF6xfQFwgqRhkgYC5wCP+XC7gWb2O+DHwGgzWwu8JulrsOGpbDtm\nD2ZmvzSzw/1BAmsqtq03s31Jf7n/Rk68jwLjMnNohuaUKzdKdgXe9MbVSUBblTK9mQ1cumEH6bAC\n+/QMJs3d+iJwWWbdSGCdmd0D3Eh6yAKkoYDlIZdPk/I/VNL2pEZOteNfnJ0nlWMuaQhoeR7RkPLu\n/nM28J3yHCZJo3xuEsC9wDeBs0iNrbzye1Qcs17d9pN0pqTret1B+rSZPW9mNwALgYNq1LEKOMKX\nz2ogxtnABEnbef0HVN7nefyef09SeVjleODfKopdQXoIRjSuNkXMBSkm8lRM5KmYyFMhMWemmMhT\nMZGn+vVFA+sF4GJJy0kT62/19QYbPhBeSZr70wUsNLP7SPNGSpK6SA8HuNL3Gw9cKmkpaejW8AZi\nehEYVm2DD+m7ltTI6wLK85DyeuKmkR5csBT4FrCiSplq+5ddTXoQyDKlx3RfVVnAH5RwWy/nA3A5\n6eEVC/0hCpOAQ0hzurqAn5CGXQLcDjwkaa7nfzJp7tjjwPIeR04OAv5SI4aJwEmSlpEasQf7+vK1\nngPcAzzlZWbg87Q870OA1eWHUeSUH5I9Zpakkd7T1IOkn0p6DdhR6XHtP/FN+1P7YSITJT3r1/gj\n4MEqZbLxXAXcImkBqTcrT9498WvSdVjs98StVPQ293auwMXAHaT7/CUze6hi+1Dg5V7iCiGEEEII\nDVKrfkv5pvD5Trub2ZU1CwcAJM0CxppZbw2GliPpTuByM6vVeNwmeK/WMuBsM1uZU8aY1KdhhRDC\n1mcS9MfPSCGE4iRhZj1GVfXXBtb+wG+AtRXfhRXCNkvSgaShmMuA8y3nl19S//tHIYQQKgzfezhr\nVq+pXTCE0G9FAyuEUIikvLZXyCiVSvFkpQIiT8VEnoqJPBUTeSom8lRM5ClfXgOrL+ZghRBCCCGE\nEEK/ED1YIYRuogcrhBBCCKG26MEKIYQQQgghhC0sGlghhNCA+F6QYiJPxUSeiok8FRN5KibyVEzk\nqX7RwAohhBBCCCGEzSTmYIUQuok5WCGEEEIIteXNwdquGcGEELZu6fuIQwihfxk+vI01a15pdhgh\nhBYXQwRDCFVYvGq+OreCGFrhFXmKPLVOnt5441W2BTFnppjIUzGRp/pFAyuEEEIIIYQQNpPN0sCS\n1Cbp2ZxtnZJGb4566iVpH0mLJT2YWbeqGbHkkXSipKkFyq3yn7m5rig/RNJrkm7JHkPSsDpiq5kr\nv7779LL9fEn/WLTOgnGdXz4vSR2Sxtcof6SkLn8tlfSNAnVMlXRCje1j64z7eEnP+T35V5JulPSs\npJ/6eXy/xv5FzvWLkp7x81wo6aTMth9IeqHI+Yci2psdQItob3YALaK92QG0iPZmB9AS2tvbmx1C\nS4g8FRN5qt/mnINlm/FYm8uZwMNmdmVm3dYYZ5GYLGc5z9XAYw3Usynlt/RxGvUscISZrZc0AnhO\n0v80s0/6OI7zgOvM7B4ASRcCQ83MJHVspjreAr5qZmsk/TdgNvApADO7SdITwI3A9M1UXwghhBBC\nyNicQwS3l3S3pOWS7pW0Q2UBSedIWuav633dAO8NWOZ/db/M1+8vaY6kJf4X+f0aiGk34M2KdW9l\n4hnvdXZJ+q2vmyrpZknzJL1c7qWQtJOkRzK9A2f4+jZJK3y/lZKmSTrF918p6fNebrCkOyTNl7RI\n0ukexkfAewXO5a3KFZJuz/TMvCnp//H1RwB7AQ9X7gJc6vUvlXRg5tz+xa/BEkn/I6/OKv4CfOLH\n+ZIfe4mkOVXi3UPS/5T0tL+OVbJK0i6Zci9K2rNa+Sr1rwXW9Ragmf2Xma33tzsC7xVoXL1LujZI\nut57npZIuiFT5sQq98mJku7LnMs/+n12AfB14GpJd0n6N2BnYJGkcRV5+rSkB70H6rHydQI+KHCu\nS81sjS8/D+wgaftMkTXArjXOPRRSanYALaLU7ABaRKnZAbSIUrMDaAkxZ6aYyFMxkaf6bc4erM/w\n/7N372F2VHW+/98fMokRAgkhkEA0F+IZJA6JSUBEEVoBHWYGRBQU5aIPjyAXQS4eGUZJIojMIJwT\n9ScOwsQIkesAAyJDQLujB4hAroSEIBAFZBJACJIZBKG/vz9q7VC9u/betTsddnfyeT3Pfrp21aqq\n7/pWdbJXr7VqwxciYoGkK4GTgUsrGyXtDFwETCH78HpXaqQ8DYyOiEmpXOWD9lyyv/bfKmkQPWsM\nDgA68ysiYu90nonAucA+EfGipGG5YqMi4oOSdgduBW4C/gwcFhHrJe0ALEjbACYAn4yIFZIeBD6T\n9j80neNw4J+AX0TE8ZKGAvdLujsi7gPuSzFNA06MiBOqK1KJu2rdF9N+Y4A7gNmSBHyHrLfkoIKc\nPBsR0ySdBJwNnAB8A1iXuwZDa52zIIZPpX1GAJcD+0bEk1X5rJgFXBoR90p6J3BnREwmIQquAAAg\nAElEQVSUdAvwCWCOpPcBv4uI5yTNrS4PTKw6/yWVZUknZqvi8uoTp+P+GzAe+GyJep2R9htOdt3f\nnd5vlytWdJ9AQY9dRFwpaV/gtoi4KR3rTxExNS3ne7AuJ7sPHk9xXwYcEBH536eadc2V+RSwKCL+\nklvdSanf+xm55TY8LMfMzMy2dB0dHaUanL3ZwHoyIhak5auBL5NrYAF7Ae0R8QJA+vC8H3ABMF7S\nLODnwDxJQ4BdIuJWgIh4rdlgUkNjcoqlyEeAGyLixXSOdbltt6R1KyXtVDkk8G1l83I6gV1y21ZH\nxIq0/DBwd1p+CBiXlj8KHCLpq+n9IGAMsKpy0ohYSNbgaaaeg4EbgFMj4mlJpwC3R8QzWQqoft72\nzennQrJGDcCBwIZ5ORFRpket2vuB+RHxZDrGuoIyBwK7p2sDMETS1sD1wHnAHOAzvDl8rVb5QhHx\nr3W23Q/8jaTdgDsltUfEn0rU6yXgFUlXALcDP8ttK7pPNoqkbYAPADfk6j2wuly9uqbjvAf4Nt0b\n2c8DO0oaVuMaJTPKB73Famt1AP1EW6sD6CfaWh1AP9HW6gD6Bc+ZKcd5Ksd5elNbW1uXfMycObOw\n3Kacg1U076bbl+tExDpJk4GPAV8CjgC+UlS2y4Gkk4EvpvP8XWVYVNq2FfAE8CrZh+JmvVoQ8+eA\nEcCUNJdnNTC4oHxn7n2+t0BkvVy/7UE89VwG3BgR7en9PsC+KT/bkg3dfDkizq2K9Q16/3vQGn15\nkoC9q3pUAO5TNiR0BNm8uW/WK6+N+I6miFgl6XHgf5E1MhuVfyP1Ih1Adm+empah+D55na69rd2G\nyjawFfBipWerJyS9g6w37ZiI+F1+W0S8Iula4AlJn46IbkM5zczMzKznenMO1lhJlSFlnwV+XbX9\nfmA/ScMlDQCOAuan4XYDIuJm4OvA1IhYDzwl6eMAkgZJenv+YBHxg4iYEhFT842rtK0zIsYBD5Lr\nmanyS+CINAQMSdvXKFf54DyUbHhdp7Ins40tKFPPncBpG3aQ3ltin7pSb9WQiLi4si4ijo6IcRGx\nK9kQwJ/kGle13AWckjtut+F9yuaf7VznGAuAD0kam8oX5XMecHrumJNz224m6/FcketZqVe+NEnj\n0j1Hiu9dwG/T+zlK8+Rq7LsNMCwi/hM4E5hUq2j6+XtgoqSBKY8H1Cif32eDiHgZWJ2G91ViqHXO\noniHkvWyfS3Xo5zfPozsd2K0G1cbq6PVAfQTHa0OoJ/oaHUA/URHqwPoFzxnphznqRznqXm92cB6\nBDhF0gqyh0v8MK0PgNQIOofsX8fFwAMRcRswGuiQtBi4KpUBOJbsgQxLgXuAkT2I6VGg8LHkaUjf\nt8gaeYuBylyeWj1xc4G9UjxHAysLyhTtX3E+WW/SMmWPWf9mdQFJ0yTVnFNT4CxgD2UPuVgkqdHw\nwlqxXQAMV/bI8MVUjcFIQ9UmAC/UPHDE82TDG29Ox7i2oNjpwJ7KHrCxHDgxt+16sl7Ca0uW70bS\niTVysC+wVNKidJ4TcsMDJwHP1DnstsDP0nX/FXBGWl94n0TE0+kcy1NdFlWXqfO+4mjgeGUP1VgO\nHFpdoE5dTyW7Vufl7osRue1DgbURUfdhGWZmZmbWM4po9RO0N50032mHqse0W5PSfJ4vRMTZrY6l\nN0naFrgiIraY74VKwx1nRUTRExkrZaL1T9Y3M2sFsTl/LjKz3iWJiOg2Imlzb2BNAH4MrI+Ig1sc\njllLSTqLrJfw4oi4pk65zfcfBTOzOkaOHMuaNb9rdRhm1k9skQ0sM2uepPC/C411dHT4yUolOE/l\nOE/lOE/lOE/lOE/lOE+11Wpg9eYcLDMzMzMzsy2ae7DMrAv3YJmZmZk15h4sMzMzMzOzTcwNLDOz\nHvD3gpTjPJXjPJXjPJXjPJXjPJXjPDXPDSwzMzMzM7Ne4jlYZtaF52CZmZmZNVZrDtZftSIYM+vb\npG7/VphZHzNy9EjWPL2m1WGYmVkV92CZWReSghmtjqIfWA2Mb3UQ/YDzVE5P8jQDtrT/w/19POU4\nT+U4T+U4T7X5KYJmZmZmZmabWK80sCSNlfRQjW3tkqb2xnmaJWmMpEWS7sitW92KWGqRtL+k2SXK\nrU4/a+a6qvy2kp6S9N38MSQNbyK2hrlK13dMne3HSfpe2XOWjOu4Sr0kTZd0bIPye0lanF5LJX26\nxDlmS9qvwfbDm4x7X0nL0z35NkkXS3pI0j+nepzZYP+GdU3l/lHSbyWtlPTR3PqzJD1Spv5Wgntl\nynGeynGeSvFf0ctxnspxnspxnprXmz1YfXGcwmHAvIg4OLeuL8ZZJqaosVzL+cD8HpxnY8pv6uP0\n1EPAtIiYAnwM+P8kDWhBHJ8DLoyIqRHxKvBFYFJEfK23TiBpd+BIYHfgYOAHShOqIuIS4DjglN46\nn5mZmZl11ZsNrIGSrpa0QtL1kgZXF5B0lKRl6XVRWrdV6g1YlnoXTk/rJ0i6S9ISSQ9K6snf94YB\nz1atey4Xz7HpnIslzUnrZkuaJekeSY9VeikkbSPp7hTLUkmHpvVjU0/BbEmrJM2VdFDaf5WkPVO5\nrSVdKWmBpIWSDklhvAa8VKIuz1WvkPSjXM/Ms5K+kdZPA3YC5lXvApyWzr9U0l/n6vZv6RoskfSJ\nWucs8EfgjXScv03HXiLproJ4R0i6UdJv0msfZVZL2i5X7lFJOxaVLzj/euCVegFGxJ8jojO9fTvw\nUkS80aBe68iuDZIuSj1PSyT9S67M/gX3yf6SbsvV5XvpPjuerOFzvqSrJP0HMARYKOmIqjztKukO\nSQ9Iml+5TsDLjeoKfBy4NiJej4jfAb8F3pfbvgYY2uAYVkaf6gvvw5yncpynUvx9POU4T+U4T+U4\nT83rzacI7gZ8ISIWSLoSOBm4tLJR0s7ARcAUsg+vd6VGytPA6IiYlMpVPmjPJftr/62SBtGzxuAA\noDO/IiL2TueZCJwL7BMRL0oalis2KiI+mHoDbgVuAv4MHBYR6yXtACxI2wAmAJ+MiBWSHgQ+k/Y/\nNJ3jcOCfgF9ExPGShgL3S7o7Iu4D7ksxTQNOjIgTqitSibtq3RfTfmOAO4DZqbfiO2S9JQcV5OTZ\niJgm6STgbOAE4BvAutw1GFrrnAUxfCrtMwK4HNg3Ip6symfFLODSiLhX0juBOyNioqRbgE8AcyS9\nD/hdRDwnaW51eWBi1fkvqSxLOjFbFZdXnzgd99/IBuJ8tkS9zkj7DSe77u9O77fLFSu6T6Cgxy4i\nrpS0L3BbRNyUjvWniJialqfnil9Odh88nuK+DDggIvK/T7XqOpp0PyV/SOsqOinze9+eWx6Hhy+Z\nmZnZFq+jo6NUg7M3G1hPRsSCtHw18GVyDSxgL6A9Il4ASB+e9wMuAMZLmgX8HJgnaQiwS0TcChAR\nrzUbTGpoTE6xFPkIcENEvJjOsS637Za0bqWknSqHBL6tbF5OJ7BLbtvqiFiRlh8G7k7LD5F9PAX4\nKHCIpK+m94OAMcCqykkjYiFZg6eZeg4GbgBOjYinJZ0C3B4Rz2QpoPrJJjennwvJGjUABwIb5uVE\nRJketWrvB+ZHxJPpGOsKyhwI7J6uDcAQSVsD1wPnAXOAzwDXNShfKCL+tc62+4G/kbQbcKek9oj4\nU4l6vQS8IukK4HbgZ7ltRffJRpG0DfAB4IZcvQdWl6tX1waeB3aUNKzGNcp8uIdH35K40VmO81SO\n81SK54KU4zyV4zyV4zy9qa2trUs+Zs6cWViuNxtY1X+1L5p30+0xhhGxTtJksrkxXwKOAL5SVLbL\ngaSTyeawBPB3EbEmt20r4AngVbIPxc16tSDmzwEjgCkR0ansARCDC8p35t7newtE1sv12x7EU89l\nwI0RUelz2AfYN+VnW7Khmy9HxLlVsb5B738PWqMvTxKwd0T8pWr9fcqGhI4gmzf3zXrltRHf0RQR\nqyQ9DvwvskZmo/JvpF6kA8juzVPTMhTfJ6/Ttbe121DZBrYCXqz0bPXAH4B35t6/I60DICJekXQt\n8ISkT0dEt6GcZmZmZtZzvTkHa6ykypCyzwK/rtp+P7CfpOHKHjBwFDA/DbcbEBE3A18HpkbEeuAp\nSR8HkDRI0tvzB4uIH0TElPTAgDVV2zojYhzwILmemSq/BI5IQ8CQtH2NcpUPzkPJhtd1SvowMLag\nTD13Aqdt2EF6b4l96kq9VUMi4uLKuog4OiLGRcSuZEMAf5JrXNVyF7kHHxQN71M2/2znOsdYAHxI\n0thUviif84DTc8ecnNt2M1mP54pcz0q98qVJGpfuOVJ87yKbm4SkOUrz5Grsuw0wLCL+EzgTmFSr\naPr5e2CipIEpjwfUKJ/fZ4OIeBlYLelTuRhqnbPIrcBn0u/MeLK63p871jCy34nRblxtJM+ZKcd5\nKsd5KsVzQcpxnspxnspxnprXmw2sR4BTJK0ge7jED9P6AEiNoHOADmAx8EBE3EY2P6RD0mLgqlQG\n4FiyBzIsBe4BRvYgpkeBwseSpyF93yJr5C0GKnN5avXEzQX2SvEcDawsKFO0f8X5ZL1Jy5Q9Zv2b\n1QUkTZPUbf5QHWcBeyh7yMUiSY2GF9aK7QJguLJHhi8G2qriEtk8sxdqHjjiebLhjTenY1xbUOx0\nYE9lD9hYDpyY23Y9WS/htSXLdyPpxBo52BdYKmlROs8JueGBk4Bn6hx2W+Bn6br/CjgjrS+8TyLi\n6XSO5akui6rL1HlfcTRwvLKHaiwHDq0uUKuu6b6+HlhBNuT25Oj6TaRDgbUR0ehhGWZmZmbWA9qc\nvwU+zXfaISLOaVjYapL0HrIHmJzd6lh6k6RtgSsiYov5Xqg03HFWRBQ9kbFSJpjx1sVkZj00Azbn\n/8PNzPo6SUREtxFJm3sDawLwY2B91XdhmW1xJJ1F1kt4cURcU6fc5vuPgtlmZOTokax5ek3jgmZm\ntknUamD15hDBPiciHo+ID7lxZZY90j7NWazZuMqV9avBq729veUx9IeX87Tp8rQlNq48F6Qc56kc\n56kc56l5m3UDy8zMzMzM7K20WQ8RNLPmSQr/u2BmZmZW3xY5RNDMzMzMzOyt5AaWmVkPeEx6Oc5T\nOc5TOc5TOc5TOc5TOc5T89zAMjMzMzMz6yWeg2VmXXgOlpmZmVljteZg/VUrgjGzvk3q9m+FmVmh\nkSPHsmbN71odhplZn+EhgmZWIPxq+GrvAzH0h5fztLnnae3a3/NW8VyQcpyncpyncpyn5rmBZWZm\nZmZm1kt6pYElaaykh2psa5c0tTfO0yxJYyQtknRHbt3qVsRSi6T9Jc0uUW51+lkz11Xlt5X0lKTv\n5o8haXgTsTXMVbq+Y+psP07S98qes2Rcx1XqJWm6pGMblB8u6ZeSXs7no8E+syXt12D74U3Gva+k\n5emefJukiyU9JOmfUz3ObLB/mboeKOlBSUslPSDpw7ltZ0l6RNKnm4nbamlrdQD9RFurA+gn2lod\nQL/Q1tbW6hD6BeepHOepHOepeb3ZgxW9eKzechgwLyIOzq3ri3GWiSlqLNdyPjC/B+fZmPKb+jg9\n9Wfg68BZLY7jc8CFETE1Il4FvghMioiv9eI5ngP+ISImA58HrqpsiIhLgOOAU3rxfGZmZmaW05sN\nrIGSrpa0QtL1kgZXF5B0lKRl6XVRWrdV6g1Ylv7qfnpaP0HSXZKWpL/Ij+9BTMOAZ6vWPZeL59h0\nzsWS5qR1syXNknSPpMcqvRSStpF0d6534NC0fqyklWm/VZLmSjoo7b9K0p6p3NaSrpS0QNJCSYek\nMF4DXipRl+eqV0j6UYp9saRnJX0jrZ8G7ATMq94FOC2df6mkv87V7d/SNVgi6RO1zlngj8Ab6Th/\nm469RNJdBfGOkHSjpN+k1z7KrJa0Xa7co5J2LCpfcP71wCv1AoyI/4mIe4FXS9SnYh3ZtUHSRann\naYmkf8mV2b/gPtlf0m25unwv3WfHA0cC50u6StJ/AEOAhZKOqMrTrpLuSD1Q8yvXCXi5RF2XRsSa\ntPwwMFjSwFyRNcDQJvJgNXW0OoB+oqPVAfQTHa0OoF/wXJBynKdynKdynKfm9eZTBHcDvhARCyRd\nCZwMXFrZKGln4CJgCtmH17tSI+VpYHRETErlKh+055L9tf9WSYPoWWNwANCZXxERe6fzTATOBfaJ\niBclDcsVGxURH5S0O3ArcBNZL8hhEbFe0g7AgrQNYALwyYhYIelB4DNp/0PTOQ4H/gn4RUQcL2ko\ncL+kuyPiPuC+FNM04MSIOKG6IpW4q9Z9Me03BrgDmC1JwHfIeksOKsjJsxExTdJJwNnACcA3gHW5\nazC01jkLYvhU2mcEcDmwb0Q8WZXPilnApRFxr6R3AndGxERJtwCfAOZIeh/wu4h4TtLc6vLAxKrz\nX1JZlnRitioubxR3iXqdkY45nOy6vzu93y5XrOg+gYIeu4i4UtK+wG0RcVM61p8iYmpanp4rfjnZ\nffB4ysdlwAERkf99alhXSZ8CFkXEX3KrOyn1ez8jt9yGhy+ZmZnZlq6jo6NUg7M3G1hPRsSCtHw1\n8GVyDSxgL6A9Il4ASB+e9wMuAMZLmgX8HJgnaQiwS0TcChARrzUbTGpoTE6xFPkIcENEvJjOsS63\n7Za0bqWknSqHBL6tbF5OJ7BLbtvqiFiRlh8G7k7LDwHj0vJHgUMkfTW9HwSMAVZVThoRC8kaPM3U\nczBwA3BqRDwt6RTg9oh4JksB1c/bvjn9XEjWqAE4ENgwLyciyvSoVXs/MD8inkzHWFdQ5kBg93Rt\nAIZI2hq4HjgPmAN8BriuQflCEfGvPYi7kZeAVyRdAdwO/Cy3reg+2SiStgE+ANyQq/fA6nKN6irp\nPcC36d7Ifh7YUdKwGtcomVE+6C1WW6sD6CfaWh1AP9HW6gD6Bc8FKcd5Ksd5Ksd5elNbW1uXfMyc\nObOwXG82sKr/al8076bbl+tExDpJk4GPAV8CjgC+UlS2y4Gkk8nmsATwd5VhUWnbVsATZEPCbm+i\nDhX5oWSVOD4HjACmRESnsgdADC4o35l7n+8tEFkv1297EE89lwE3RkR7er8PsG/Kz7ZkQzdfjohz\nq2J9g97/HrRGX54kYO+qHhWA+5QNCR1BNm/um/XK6y38jqaIeCP1Ih1Adm+empah+D55na69rd2G\nyjawFfBipWerJyS9g6w37ZiI+F1+W0S8Iula4AlJn46IbkM5zczMzKznenMO1lhJlSFlnwV+XbX9\nfmA/ZU90GwAcBcxPw+0GRMTNZA8imBoR64GnJH0cQNIgSW/PHywifhARU9IDA9ZUbeuMiHHAg+R6\nZqr8EjgiDQFD0vY1ylU+OA8lG17XqezJbGMLytRzJ3Dahh2k95bYp67UWzUkIi6urIuIoyNiXETs\nSjYE8Ce5xlUtd5F78EHR8D5l8892rnOMBcCHJI1N5YvyOQ84PXfMybltN5P1eK7I9azUK99TXa6V\npDlK8+QKC2c9SsMi4j+BM4FJDY77e2CipIEpjwfUKN8tFoCIeBlYnYb3VWKodc6ieIeS9bJ9Ldej\nnN8+jOx3YrQbVxuro9UB9BMdrQ6gn+hodQD9gueClOM8leM8leM8Na83G1iPAKdIWkH2cIkfpvUB\nkBpB55D9L7IYeCAibgNGAx2SFpM98eyctN+xZA9kWArcA4zsQUyPAoWPJU9D+r5F1shbDFTm8tTq\niZsL7JXiORpYWVCmaP+K88l6k5Ype8z6N6sLSJomqZn5Q2cBeyh7yMUiSY2GF9aK7QJguLJHhi+m\naqxKGqo2AXih5oEjnicb3nhzOsa1BcVOB/ZU9oCN5cCJuW3Xk/USXluyfDeSTqyVg9TjeAlwnKQn\nJb07bZoEPFPnsNsCP0vX/VfAGWl94X0SEU+nuixPdVlUXabO+4qjgeOVPVRjOXBoQX1q1fVUsmt1\nXu6+GJHbPhRYGxF1H5ZhZmZmZj2jiFY/QXvTSfOddoiIcxoWtprSfJ4vRMTZrY6lN0naFrgiIraY\n74VKwx1nRUTRExkrZaL1T9Y3s/5DbM6fJczMapFERHQbkbS5N7AmAD8G1ld9F5bZFkfSWWS9hBdH\nxDV1yrmBZWZNcAPLzLZMtRpYvTlEsM+JiMcj4kNuXJllj7RPcxZrNq7eJL/88suvUq+RI/NTkjct\nzwUpx3kqx3kqx3lqXm8/Rc7MNgP+a3RjHR0dfnRtCc5TOc6TmdnmY7MeImhmzZMU/nfBzMzMrL4t\ncoigmZmZmZnZW8kNLDOzHvCY9HKcp3Kcp3Kcp3Kcp3Kcp3Kcp+a5gWVmZmZmZtZLPAfLzLrwHCwz\nMzOzxjwHy8zMzMzMbBNzA8vMupHkl1996jXqHaNa/WuxSXmOQznOUznOUznOUznOU/P8PVhm1t2M\nVgfQD6wGxrc6iH6gl/K0dsbajT+ImZnZW8BzsMysC0nhBpb1OTP8BdhmZta3SJtwDpaksZIeqrGt\nXdLU3jhPsySNkbRI0h25datbEUstkvaXNLtEudXpZ81cV5XfVtJTkr6bP4ak4U3E1jBX6fqOqbP9\nOEnfK3vOknEdV6mXpOmSjm1QfrikX0p6OZ+PBvvMlrRfg+2HNxn3vpKWp3vybZIulvSQpH9O9Tiz\nwf4N65rK/aOk30paKemjufVnSXpE0qebidvMzMzMyuvNOVh98U+LhwHzIuLg3Lq+GGeZmKLGci3n\nA/N7cJ6NKb+pj9NTfwa+DpzV4jg+B1wYEVMj4lXgi8CkiPhab51A0u7AkcDuwMHADyQJICIuAY4D\nTumt823R+tSfavow56kUz3Eox3kqx3kqx3kqx3lqXm82sAZKulrSCknXSxpcXUDSUZKWpddFad1W\nqTdgmaSlkk5P6ydIukvSEkkPSurJKP5hwLNV657LxXNsOudiSXPSutmSZkm6R9JjlV4KSdtIujvF\nslTSoWn92NRTMFvSKklzJR2U9l8lac9UbmtJV0paIGmhpENSGK8BL5Woy3PVKyT9KMW+WNKzkr6R\n1k8DdgLmVe8CnJbOv1TSX+fq9m/pGiyR9Ila5yzwR+CNdJy/TcdeIumugnhHSLpR0m/Sax9lVkva\nLlfuUUk7FpUvOP964JV6AUbE/0TEvcCrJepTsY7s2iDpotTztETSv+TK7F9wn+wv6bZcXb6X7rPj\nyRo+50u6StJ/AEOAhZKOqMrTrpLukPSApPmV6wS83KiuwMeBayPi9Yj4HfBb4H257WuAoU3kwczM\nzMya0JsPudgN+EJELJB0JXAycGllo6SdgYuAKWQfXu9KjZSngdERMSmVq3zQnkv21/5bJQ2iZ43B\nAUBnfkVE7J3OMxE4F9gnIl6UNCxXbFREfDD1BtwK3ETWC3JYRKyXtAOwIG0DmAB8MiJWSHoQ+Eza\n/9B0jsOBfwJ+ERHHSxoK3C/p7oi4D7gvxTQNODEiTqiuSCXuqnVfTPuNAe4AZqfeiu+Q9ZYcVJCT\nZyNimqSTgLOBE4BvAOty12BorXMWxPCptM8I4HJg34h4siqfFbOASyPiXknvBO6MiImSbgE+AcyR\n9D7gdxHxnKS51eWBiVXnv6SyLOnEbFVc3ijuEvU6Ix1zONl1f3d6v12uWNF9AgU9dhFxpaR9gdsi\n4qZ0rD9FxNS0PD1X/HKy++DxlI/LgAMiIv/7VKuuo0n3U/KHtK6ikzK/9+255XH4YQ5FnJNynKdS\n2traWh1Cv+A8leM8leM8leM8vamjo6NUj15vNrCejIgFaflq4MvkGljAXkB7RLwAkD487wdcAIyX\nNAv4OTBP0hBgl4i4FSAiXms2mNTQmJxiKfIR4IaIeDGdY11u2y1p3UpJO1UOCXxb2bycTmCX3LbV\nEbEiLT8M3J2WHyL7eArwUeAQSV9N7wcBY4BVlZNGxEKyBk8z9RwM3ACcGhFPSzoFuD0inslSQPXE\nu5vTz4VkjRqAA4EN83IiokyPWrX3A/Mj4sl0jHUFZQ4Edk/XBmCIpK2B64HzgDnAZ4DrGpQvFBH/\n2oO4G3kJeEXSFcDtwM9y24ruk40iaRvgA8ANuXoPrC63EXV9HthR0rAa1yjz4R4e3czMzGwz1dbW\n1qXBOXPmzMJym3IOVtG8m25P2Ugf8iYDHcCXgB/VKtvlQNLJaWjcIkmjqrZtRTbyf3eyD8XNyg8l\nq8TxOWAEMCUippANPRxcUL4z9z7fWyCyXq4p6TU+Ilax8S4DboyISp/DPsCpkp4g68k6RtKFBXV7\ng95/TH/da5a2753LwZg0fO8+YELqBTsM+Pd65Xs55roi4g2yIXY3Av8A/Gduc9F98jpdf6+6DZVt\nYCvgxTRPq1Lvv2li/z8A78y9f0daB0BEvAJcCzwhqaiH08ry3KJynKdSPMehHOepHOepHOepHOep\neb3ZwBorqTKk7LPAr6u23w/sp+yJbgOAo4D5abjdgIi4mexBBFMjYj3wlKSPA0gaJOnt+YNFxA/S\nh8+pEbGmaltnRIwDHiTXM1Pll8ARaQgYkravUa7ywXko2fC6TkkfBsYWlKnnTuC0DTtI7y2xT12p\nt2pIRFxcWRcRR0fEuIjYlWwI4E8i4twGh7qL3IMPiob3KZt/tnOdYywAPiRpbCpflM95wOm5Y07O\nbbuZrMdzRa5npV75nupyrSTNUZonV1g461EaFhH/CZwJTGpw3N8DEyUNTHk8oGwsABHxMrBa0qdy\nMdQ6Z5Fbgc+k35nxwLvIfvcqxxpG9jsxOiK6zZMzMzMzs43Tmw2sR4BTJK0ge7jED9P6AEiNoHPI\neqoWAw9ExG1k80M6JC0GrkplAI4leyDDUuAeYGQPYnoUKHwseRrS9y2yRt5ioDKXp1ZP3FxgrxTP\n0cDKgjJF+1ecT/YgkGXKHrP+zeoCkqZJamb+0FnAHrmevEbDC2vFdgEwXNkjwxcDbVVxiWye2Qs1\nDxzxPNnwxpvTMa4tKHY6sKeyB2wsB07MbbuerJfw2pLlu5F0Yq0cKHvk/CXAcSw5BAgAACAASURB\nVJKelPTutGkS8Eydw24L/Cxd918BZ6T1hfdJRDyd6rI81WVRdZk67yuOBo5X9lCN5cChBfUprGu6\nr68HVpANuT05un550FBgberJso3huUXlOE+leI5DOc5TOc5TOc5TOc5T8zbrLxpO8512iIhzGha2\nmiS9h+wBJme3OpbeJGlb4IqI2GK+Fyo9NGNWRBQ9kbFSxl80bH3PDH/RsJmZ9S2q8UXDm3sDawLw\nY2B91XdhmW1xJJ1F1kt4cURcU6fc5vuPgvVbI0ePZM3TaxoX7Kc6Ojr8V+ISnKdynKdynKdynKfa\najWwevshB31KRDwOfKjVcZj1BemR9pc0LIh7CsrwfzjlOE9mZral2ax7sMyseZLC/y6YmZmZ1Ver\nB6s3H3JhZmZmZma2RXMDy8ysB/y9IOU4T+U4T+U4T+U4T+U4T+U4T81zA8vMzMzMzKyXeA6WmXXh\nOVhmZmZmjXkOlpmZmZmZ2SbmBpaZdSPJL7+2+NeoUePest85z3Eox3kqx3kqx3kqx3lq3mb9PVhm\n1lMeIthYB9DW4hj6gw76a57Wru026sPMzKwhz8Eysy4khRtYZgDyl26bmVlNUh+agyVprKSHamxr\nlzT1rY4pnXuMpEWS7sitW92KWGqRtL+k2SXKrU4/a+a6qvy2kp6S9N3cunZJYxrsN1vSfg3iva3R\n+ZuRP6ak4yRNL7HPP0t6SNIySUeWKD9d0rENtp/ZZNy7SVosaaGk8ZJOk7RC0lWpHt9rsH/Dukqa\nLOneVNcl+bpKOkrSI5LOaCZuMzMzMyuvlXOw+uKfBQ8D5kXEwbl1fTHOMjFFjeVazgfm9yycpmLZ\nFMese3xJfwe8F5gEvB84W9KQTRBTI4cBN0TEtIhYDZwEHBgRx6TtzV7XIv8NHBMRewAHA/9X0nYA\nEXENsD/gBlav6Gh1AP1ER6sD6Bc8x6Ec56kc56kc56kc56l5rWxgDZR0dfoL/vWSBlcXSH9xX5Ze\nF6V1W6Vek2WSlko6Pa2fIOmu9Ff7ByWN70FMw4Bnq9Y9l4vn2HTOxZLmpHWzJc2SdI+kxyQdntZv\nI+nuFMtSSYem9WMlrUz7rZI0V9JBaf9VkvZM5baWdKWkBanH45AUxmvASyXq8lz1Ckk/SrEvlvSs\npG+k9dOAnYB5Vbv8EXijwXnWpZiQtFeqx5IU9zZV5y+sk6T7JO2eK9cuaWqdHOS9AqxvEONE4FeR\n+R9gGfC3DfZ5OR2b1NP0cKrXT3Nl3pNifUzSl1PZLj2Gks5KvV0HA18BTpL0C0mXAbsCd1Tu4dw+\nIyTdKOk36bVP2bpGxGMR8Xha/i+y+3nH3Pa1wNAGdTczMzOznoqIt/wFjAU6gfen91cCZ6bldmAq\nsDPwe2A4WUPwF8Chadu83LG2Sz8XAIem5UHA4B7ENRP4So1tE4FHgO3T+2Hp52zgurS8O/DbtDwA\nGJKWd8itH0vWIJmY3j8IXJmWDwVuSsvfAj6blocCq4C3V8U0Dbi8RK6XVa0bAzwMvANQyvkuwHHA\nd3t4TQcCjwNT0/sh6brtD9xar07A6cCMtH4UsLJB+Q3HrIrhkMpxqtYfBPw67TsixXlGE3X7AzCw\n6n6bDvw/sgfF7AA8n655l3wDZwHn5fY5M7ftidz9tCH3wFzgA2n5ncCKsnWtKvM+4OGC9S832C9g\neu7VHhB++bUFvggzM7OK9vb2mD59+oZX+n+C6lcrnyL4ZEQsSMtXA18GLs1t3wtoj4gXACTNBfYD\nLgDGS5oF/ByYl4Z77RIRtwJExGvNBiNJwOQUS5GPkA3vejGdY11u2y1p3UpJO1UOCXxb2fykTmCX\n3LbVEbEiLT8M3J2WHwLGpeWPAodI+mp6P4isYbSqctKIWAic0GQ9BwM3AKdGxNOSTgFuj4hnshTQ\n08dm7QY8ExGLUmzr0/nyZWrV6Qay3rMZwJHAjQ3KF4qI24Bu870i4i5JewH3kvXo3Evjnrm8pcBP\nJd1CutbJ7RHxOvBHSWuBkU0cE7JcF+X7QGB3vZm8IZK2jqz3Dahd1w0HlnYGfgIcU7D5BUkTIvV0\nFZvRMHgzMzOzLUlbWxttbW0b3s+cObOwXF+ag1X9Hgo+fKaGzWSygf1fAn5Uq2yXA0knp6FxiySN\nqtq2FbCarAfq9lLRd/VqQcyfI+stmRIRU8g+2A8uKN+Ze9/Jm4/OF/DJiJiSXuMjYhUb7zLgxoho\nT+/3AU6V9ATwHeAYSRf28NiNGmeFdYqIZ4DnJe0BfBq4LrdPr+QgIi5Mx/gY2X3/aBO7/z3wfbLe\n0wfS/QLdr+NfAa+T9WRVdBv6WoKAvXP1HpNvXDXcWdoW+BnwjxHxQEGRWcASSZ/vQWy2QUerA+gn\nOlodQL/gOQ7lOE/lOE/lOE/lOE/Na2UDa6ykvdPyZ8mGcOXdD+wnabikAcBRwHxJOwADIuJm4Otk\nQ9LWA09J+jiApEGS3p4/WET8IH1YnRoRa6q2dUbEOLLhep+uEe8vgSMkDU/n2L5GuUojYyjwbER0\nSvow2dCx6jL13AmctmEH6b0l9qkr9VYNiYiLK+si4uiIGBcRuwJnAz+JiHML9p1TmR9WwypgVJrP\nhaQh6brl1avTdcD/JhuCt7xE+dKUzdurXLdJwB6k+WaSLqzcNzX2FTAmIuYD5wDbkQ1/rGUtsKOk\n7SW9DfiHHoQ8j2zYZCWGyWV3lDSQrJdtTvodKXIu8K6I+HEPYjMzMzOzOlrZwHoEOEXSCrKHS/ww\nrQ+A1Ag6h+zPn4uBB9KwqNFAh6TFwFWpDMCxwGmSlgL30PxwLch6NYYXbUhD+r5F1shbDFySjzdf\nNP2cC+yV4jkaWFlQpmj/ivPJHgSyLD004ZvVBSRNk3R5nfpUOwvYI9eT18zwwknAM7U2RsRfyBqn\n35e0hKyR8LaqYvXq9O907726oE75biQdImlGwaaBwK8lLSe7z46OiM60bQ9gTcE+FQOAq9N1XAjM\niog/FZSr3LevpzgfIGsgriwo22WfAqcDeyp7OMpy4MTqAnXqeiSwL/D53HWeVFVmUGQPu7CN0tbq\nAPqJtlYH0C/kh5xYbc5TOc5TOc5TOc5T8/xFwzlprs8OEXFOw8JbkDTk7IqIqNW7129JuiO6PpZ/\ns5bmAS6NiJ3rlInabT+zLYm/aNjMzGpTX/qi4T7sJuCDyn3RsEFEvLw5Nq4AtrDG1VFkPYv/UqK0\nX35t8a+RI/Mjuzctz3Eox3kqx3kqx3kqx3lqXiufItjnpKeqfajVcZhtCpF90fA1Jctu4mj6v46O\nDg+bKMF5MjOzLY2HCJpZF5LC/y6YmZmZ1echgmZmZmZmZpuYG1hmZj3gMenlOE/lOE/lOE/lOE/l\nOE/lOE/NcwPLzMzMzMysl3gOlpl14TlYZmZmZo15DpaZmZmZmdkm5gaWmXUjyS+/Nslr1DtGtfr2\n7pM8x6Ec56kc56kc56kc56l5/h4sM+tuRqsD6AdWA+NbHUQ/UJWntTPWtiwUMzOzt4LnYJlZF5LC\nDSzbZGb4i6zNzGzzIPWhOViSxkp6qMa2dklT3+qY0rnHSFok6Y7cutWtiKUWSftLml2i3Or0s2au\nq8pvK+kpSd/NrWuXNKbBfrMl7dcg3tsanb8Z+WNKOk7S9BL7/LOkhyQtk3RkifLTJR3bYPuZTca9\nm6TFkhZKGi/pNEkrJF2V6vG9BvuXretxkh6VtCpfB0lHSXpE0hnNxG1mZmZm5bVyDlZf/BPmYcC8\niDg4t64vxlkmpqixXMv5wPyehdNULJvimHWPL+nvgPcCk4D3A2dLGrIJYmrkMOCGiJgWEauBk4AD\nI+KYtL3Z69qNpO2B84C9gL2B6ZKGAkTENcD+gBtYvaFP/emlD3OeSvEch3Kcp3Kcp3Kcp3Kcp+a1\nsoE1UNLV6S/410saXF0g/cV9WXpdlNZtlXpNlklaKun0tH6CpLskLZH0oKSezI4YBjxbte65XDzH\npnMuljQnrZstaZakeyQ9JunwtH4bSXenWJZKOjStHytpZdpvlaS5kg5K+6+StGcqt7WkKyUtSD0e\nh6QwXgNeKlGX56pXSPpRin2xpGclfSOtnwbsBMyr2uWPwBsNzrMuxYSkvVI9lqS4t6k6f2GdJN0n\nafdcuXZJU+vkIO8VYH2DGCcCv4rM/wDLgL9tsM/L6diknqaHU71+mivznhTrY5K+nMp26TGUdFbq\n7ToY+ApwkqRfSLoM2BW4o3IP5/YZIelGSb9Jr32aqOvHyP5I8FJErCO7phvqGhFrgaENjmFmZmZm\nPdTKh1zsBnwhIhZIuhI4Gbi0slHSzsBFwBSyD/F3pUbK08DoiJiUym2XdpkLXBgRt0oaRM8ajwOA\nzvyKiNg7nWcicC6wT0S8KGlYrtioiPhgaiTcCtwE/Bk4LCLWS9oBWJC2AUwAPhkRKyQ9CHwm7X9o\nOsfhwD8Bv4iI41MPxP2S7o6I+4D7UkzTgBMj4oTqilTirlr3xbTfGOAOYLYkAd8BPgccVFX+U40S\nFhFnpGMOBK4FjoiIRamH6JWq4oV1Svt9GpghaVTK5yJJ36pRPn/+6yvLqQE2LSJmVJ13KXCepEuB\nbYAPAw83qNelubdfA8ZFxF9y9xtk93AbWYNllaQfVHbvfri4Q9IPgZcrx5b0MaAt3U/H5crPAi6N\niHslvRO4E5hYsq6jgady7/+Q1uU1/t1ozy2Pww9zKOKclOM8ldLW1tbqEPoF56kc56kc56kc5+lN\nHR0dpXr0WtnAejIiFqTlq4Evk2tgkQ1xao+IFwAkzQX2Ay4AxkuaBfwcmJc+zO8SEbcCRMRrzQaT\nGhqTUyxFPkI2vOvFdI51uW23pHUrJe1UOSTwbWXzkzqBXXLbVkfEirT8MFBpNDxE9nEW4KPAIZK+\nmt4PAsYAqyonjYiFQLfGVYN6DgZuAE6NiKclnQLcHhHPZCmg20S9knYDnomIRSm29el8+TK16nQD\nWU/LDOBI4MYG5QtFxG1At/leEXGXpL2Ae8l6KO+lcc9c3lLgp5JuIV3r5PaIeB34o6S1wMgmjglZ\nrovyfSCwu95M3hBJW6feN6B2XUt6QdKEiHi8ZokP9/DIZmZmZpuptra2Lg3OmTNnFpbrS3OwiuaW\ndPvwmRo2k4EO4EvAj2qV7XIg6eQ0NG5R6iXJb9uKbKbA7sDtpaLv6tWCmD8HjACmRMQUsg/2gwvK\nd+bed/Jmo1dkvVxT0mt8RKxi410G3BgRlT6KfYBTJT1B1pN1jKQLe3jsRo2zwjpFxDPA85L2IOvJ\nui63T6/kICIuTMf4GNl9/2gTu/898H1gKvBAul+g+3X8K+B1sp7Qim5DX0sQsHeu3mPyjasG/kDX\nRug70rq8WcASSZ/vQWxW4blF5ThPpXiOQznOUznOUznOUznOU/Na2cAaK6kyjO2zwK+rtt8P7Cdp\nuKQBwFHA/DTcbkBE3Ax8HZiaekuekvRxAEmDJL09f7CI+EH6sDo1ItZUbeuMiHHAg2Qf8Iv8EjhC\n0vB0ju1rlKs0MoYCz0ZEp6QPA2MLytRzJ3Dahh2k95bYp67UWzUkIi6urIuIoyNiXETsCpwN/CQi\nzi3Yd47S/LAaVgGj0rBFJA1J1y2vXp2uA/43sF1ELC9RvjRl8/Yq120SsAdpvpmkCyv3TY19BYyJ\niPnAOcB2QL0HZKwFdpS0vaS3Af/Qg5DnARvmZUma3MS+dwIHSRqa7tGD0rq8c4F3RcSPexCbmZmZ\nmdXRygbWI8ApklaQPVzih2l9AKRG0DlkPVWLgQfSsKjRQIekxcBVqQzAscBpkpYC99D8cC3IejWG\nF21IQ/q+RdbIWwxcko83XzT9nAvsleI5GlhZUKZo/4rzyR4Esiw9NOGb1QUkTZN0eZ36VDsL2CPX\nk9fM8MJJwDO1NkbEX8gap9+XtISskfC2qmL16vTvdO+9uqBO+W4kHSJpRsGmgcCvJS0nu8+OjojK\nXLs9gDUF+1QMAK5O13EhMCsi/lRQrnLfvp7ifICsYbOyoGyXfQqcDuyp7OEoy4ETqwvUqmsawno+\n2R8LfgPMrBrOCjAoPezCNobnFpXjPJXiOQ7lOE/lOE/lOE/lOE/N8xcN56S5PjtExDkNC29BJG0L\nXBERtXr3+i1Jd1Q9ln+zluYBLo2IneuU8RcN26Yzw180bGZmmwfV+KJhN7ByJE0Afgys35I+dNuW\nQdJRZE9EnBMR/6dOOf+jYJvMyNEjWfN0vU7jLVNHR4f/SlyC81SO81SO81SO81RbrQZWK58i2Oek\np6p9qNVxmG0K6YuGrylZdhNH0//5P5xynCczM9vSuAfLzLqQFP53wczMzKy+Wj1YrXzIhZmZmZmZ\n2WbFDSwzsx7w94KU4zyV4zyV4zyV4zyV4zyV4zw1zw0sMzMzMzOzXuI5WGbWhedgmZmZmTXmOVhm\nZmZmZmabmBtYZtaNJL/88mszeo0aNa7V/6z0Cs8FKcd5Ksd5Ksd5ap6/B8vMCniIYGMdQFuLY+gP\nOnCeyuhgU+Zp7dpuI1jMzGwT8RwsM+tCUriBZba5kb9A3Mysl0l9aA6WpLGSHqqxrV3S1Lc6pnTu\nMZIWSbojt251K2KpRdL+kmaXKLc6/ayZ66ry20p6StJ3c+vaJY1psN9sSfs1iPe2RudvRv6Yko6T\nNL3EPm+ka7tY0i0lyk+XdGyD7Wc2Gfdu6fwLJY2XdJqkFZKuSvX4XoP9G9ZV0mRJ90p6SNISSUfm\nth0l6RFJZzQTt5mZmZmV18o5WH3xT2mHAfMi4uDcur4YZ5mYosZyLecD83sWTlOxbIpjljn+f0fE\n1IiYEhGHbYJ4yjgMuCEipkXEauAk4MCIOCZtb/a6Fvlv4JiI2AM4GPi/krYDiIhrgP0BN7B6RUer\nA+gnOlodQD/R0eoA+gXPBSnHeSrHeSrHeWpeKxtYAyVdnf6Cf72kwdUF0l/cl6XXRWndVqnXZJmk\npZJOT+snSLor/dX+QUnjexDTMODZqnXP5eI5Np1zsaQ5ad1sSbMk3SPpMUmHp/XbSLo7xbJU0qFp\n/VhJK9N+qyTNlXRQ2n+VpD1Tua0lXSlpQerxOCSF8RrwUom6PFe9QtKPUuyLJT0r6Rtp/TRgJ2Be\n1S5/BN5ocJ51KSYk7ZXqsSTFvU3V+QvrJOk+SbvnyrVLmlonB3mvAOsbxAjQ7ASEl9OxST1ND6d6\n/TRX5j0p1sckfTmV7dJjKOms1Nt1MPAV4CRJv5B0GbArcEflHs7tM0LSjZJ+k177lK1rRDwWEY+n\n5f8iu593zG1fCwxtMhdmZmZmVlZEvOUvYCzQCbw/vb8SODMttwNTgZ2B3wPDyRqCvwAOTdvm5Y61\nXfq5ADg0LQ8CBvcgrpnAV2psmwg8Amyf3g9LP2cD16Xl3YHfpuUBwJC0vENu/ViyBsnE9P5B4Mq0\nfChwU1r+FvDZtDwUWAW8vSqmacDlJXK9rGrdGOBh4B1kDY92YBfgOOC7PbymA4HHganp/ZB03fYH\nbq1XJ+B0YEZaPwpY2aD8hmNWxXBI5TgF215Lub4X+HiTdfsDMLDqfpsO/D+yB8XsADyfrnmXfANn\nAefl9jkzt+2J3P20IffAXOADafmdwIpm6por8z7g4YL1LzfYL2B67tUeEH755Ve/fhFmZrZx2tvb\nY/r06Rte6d9Wql+tfIrgkxGxIC1fDXwZuDS3fS+gPSJeAJA0F9gPuAAYL2kW8HNgnqQhwC4RcStA\nRLzWbDCSBExOsRT5CNnwrhfTOdbltt2S1q2UtFPlkMC3lc1P6gR2yW1bHREr0vLDwN1p+SFgXFr+\nKHCIpK+m94PIGkarKieNiIXACU3WczBwA3BqRDwt6RTg9oh4JktB0z09FbsBz0TEohTb+nS+fJla\ndbqBrPdsBnAkcGOD8oUi4jag1nyvsRHxX6ln85eSlkU2TK+MpcBPlc3dys/fuj0iXgf+KGktMLLk\n8SpEcb4PBHbXm8kbImnriPifSoEGdUXSzsBPgGMKNr8gaUKknq5iMxoGb2ZmZrYlaWtro62tbcP7\nmTNnFpbrS3Owqt9DwYfP1LCZTDZg/UvAj2qV7XIg6eQ0NG6RpFFV27YCVpP1QN1eKvquXi2I+XPA\nCGBKREwhG6o1uKB8Z+59J28+Ol/AJyObMzQlIsZHxCo23mXAjRHRnt7vA5wq6QngO8Axki7s4bEb\nNc4K6xQRzwDPS9oD+DRwXW6fXslBZMPlSI2qDmBKE7v/PfB9st7TB9L9At2v418Br5P1ZFV0G/pa\ngoC9c/Uek29cNdxZ2hb4GfCPEfFAQZFZwBJJn+9BbLZBR6sD6Cc6Wh1AP9HR6gD6Bc8FKcd5Ksd5\nKsd5al4rG1hjJe2dlj8L/Lpq+/3AfpKGSxoAHAXMl7QDMCAibga+TjYkbT3wlKSPA0gaJOnt+YNF\nxA/Sh9WpEbGmaltnRIwjG0L26Rrx/hI4QtLwdI7ta5SrNDKGAs9GRKekD5MNHasuU8+dwGkbdpDe\nW2KfulJv1ZCIuLiyLiKOjohxEbErcDbwk4g4t2DfOZX5YTWsAkal+VxIGpKuW169Ol0H/G+yIXjL\nS5QvTdIwSYPS8gjgg8CK9P7Cyn1TY18BYyJiPnAOsB3Z8Mda1gI7Stpe0tuAf+hByPPIhk1WYphc\ndkdJA8l62eak35Ei5wLviogf9yA2MzMzM6ujlQ2sR4BTJK0ge7jED9P6AEiNoHPI/qy3GHggDYsa\nDXRIWgxclcoAHAucJmkpcA/ND9cCeJRszlc3aUjft8gaeYuBS/Lx5oumn3OBvVI8RwMrC8oU7V9x\nPtmDQJalhyZ8s7qApGmSLq9Tn2pnAXvkevKaGV44CXim1saI+AtZ4/T7kpaQNRLeVlWsXp3+ne69\nVxfUKd+NpEMkzSjYtDvwYLpuvwAujIhH0rY9gDUF+1QMAK5O13EhMCsi/lRQrnLfvp7ifICsgbiy\noGyXfQqcDuyp7OEoy4ETqwvUqeuRwL7A53PXeVJVmUGRPezCNkpbqwPoJ9paHUA/0dbqAPqF/NAc\nq815Ksd5Ksd5ap6/aDgnzfXZISLOaVh4C5KGnF0REbV69/otSXdE18fyb9bSPMClEbFznTJRu+1n\nZv2Tv2jYzKy3qS990XAfdhPwQeW+aNggIl7eHBtXAFtY4+oosp7Ff2l1LJuHjlYH0E90tDqAfqKj\n1QH0C54LUo7zVI7zVI7z1LxWPkWwz0lPVftQq+Mw2xQi+6Lha8qV7unDJM2sLxo5cmzjQmZm1is8\nRNDMupAU/nfBzMzMrD4PETQzMzMzM9vE3MAyM+sBj0kvx3kqx3kqx3kqx3kqx3kqx3lqnhtYZmZm\nZmZmvcRzsMysC8/BMjMzM2vMc7DMzMzMzMw2MTewzMx6wGPSy3GeynGeynGeynGeynGeynGemufv\nwTKzbiR/D5b1npGjR7Lm6TWtDsPMzOwt4TlYZtaFpGBGq6OwzcoM8P81Zma2ufEcLDMzMzMzs02s\nJQ0sSWMlPVRjW7ukqW91TOncYyQtknRHbt3qVsRSi6T9Jc0uUW51+lkz11Xlt5X0lKTv5ta1SxrT\nYL/ZkvZrEO9tjc7fjPwxJR0naXqJfd5I13axpFtKlJ8u6dgG289sMu7d0vkXShov6TRJKyRdlerx\nvQb7l63rcZIelbQqXwdJR0l6RNIZzcRtNfSpfxn6MOepFM9xKMd5Ksd5Ksd5Ksd5al4r52D1xfEi\nhwHzIuKc3Lq+GGeZmKLGci3nA/N7Fk5TsWyKY5Y5/n9HREsa7jmHATdExIUAkk4CDoiIZyQdR/PX\ntRtJ2wPnAVMBAQsl/UdEvBQR10j6JfAA8H82piJmZmZmVqyVQwQHSro6/QX/ekmDqwukv7gvS6+L\n0rqtUq/JMklLJZ2e1k+QdJekJZIelDS+BzENA56tWvdcLp5j0zkXS5qT1s2WNEvSPZIek3R4Wr+N\npLtTLEslHZrWj5W08v9n7+7Dra7q/P8/X5CIiiJqgTpf0JguJyfvQEYdHTyaN6l5MyYVYdpMl9lo\natlopI0eU/Om9MrJ1Ex/yAiaWt6ghiLGIRIJhcNBBckKy5tBLaVB8/68f3+s94bP2WffrH06uDny\nflzXuc7ea63P5/Ne77057HXWWp/jxy2TNFXSgX78Mkm7e7uNJV0vaZ7PeBzuYbwF/CWjLy+VF0j6\nscfeLulFSf/l5aOBDwEzyg75M/Buneus9JiQNMb7scjj3qTs+hX7JOlhSR8ttJslaVSNHBS9Drxa\nJ0ZIg41GrPJz4zNNT3i/biq0+UeP9beSTvG2XWYMJX3dZ7sOAb4K/IekByVdDXwYmF56DxeO2UrS\nTyX92r/2aqCvB5N+SfAXM1tJek0/Uao0sxeAwQ3mIlTSk58w66PIU5aWlpZmh9AnRJ7yRJ7yRJ7y\nRJ4a18wZrB2AfzOzeZKuB04CLi9VStoauBjYjfQh/gEfpDwLbGtmO3u7zfyQqcB3zGyapAH0bPDY\nH+gsFpjZHn6dHYGzgL3M7BVJmxeaDTOzvX2QMA24HXgDOMrMXpW0JTDP6wBGAp8ysyWSHgU+68cf\n4dc4GjgbeNDMvihpMDBf0kwzexh42GMaDZxoZl8q70gp7rKyE/y44cB0YJIkAd8DJgAHlrU/pl7C\nzOxrfs4NgJ8A48xsoaRB+ACloGKf/LjPAK2Shnk+F0q6sEr74vVvLT32AdhoM2utEOqGnuu3gEvM\n7K46/bq88PQbwHZm9nbh/QbpPdxCGrAsk3RV6fDup7Ppkq4BVpXOLelgoMXfT8cX2l8BXG5mcyX9\nP+B+YMfMvm4LPFN4/pyXFdX/tzGr8Hg74kNyCCGEENZ7bW1tWUsmmzmD9Uczm+ePpwD7lNWPAWaZ\n2ctm1kkaQI0Ffg9s77NGBwOr/MP8NmY2DcDM3jKzNxoJxgcau5AGcJXsT1re9YpfY2Wh7k4vW0qa\nCYI0Y3KRpA5gJrCNpFLdcjNb4o+f8HqAx0gfZwEOAiZKagfagAFAl/1QzdMG8wAAIABJREFUZrag\n0uCqTj8HArcBXzGzZ0kD23vN7PlC3D2xA/C8mS302F71162oWp9uA0qDuU8DP63TviIzu7vK4Apg\nhJntThpIfr/BGc4O4CZJE+g6o3evmb1jZn8GXgCGNnBOSLmulO8DgCu939OAQZI2Ljao09d6XpY0\nsmaL/QpfMbiqLPYW5Yk8ZYk9DnkiT3kiT3kiT3kiT2u0tLTQ2tq6+quadWkPVqW9Jd0+fJrZSkm7\nkJZCfRkYR1p6VXNgIOkk4AS/zqFmtqJQ1480cHsTuLeBPpS8WSHmCcBWwG5m1ql004mBFdp3Fp53\nsuY1EWmW66kexFPL1cBPzaw0R7EXsI/nZ1PS0s1VZnZWD85db3BWtU+S/iRpJ9JM1omFqm7tfZar\nIWb2v/59uaQ20sxo7ke/w0iD+yOAsyV9zMvLX8cPAO+QZkJLui19zSBgDzN7uwfHPkeaVSv5O7rO\nR0GaIVsk6RQzu6EH1wghhBBCCFU0cwZrhKTSMrbPAXPK6ucDYyVtIak/MB6Y7cvt+pvZHcC3gFFm\n9irwjKQjASQNkLRR8WRmdpWZ7WZmo4qDK6/rNLPtgEdJH/Ar+QUwTtIWfo0hVdqVBhmDgRd9cLUf\nMKJCm1ruB05dfYC0a8YxNUk6GRhkZt8tlZnZsWa2nZl9GPhP4H8qDa4kTZbvD6tiGTDMly0iaZC/\nbkW1+nQLcCawmZk9ntE+m6TNfdkokrYC9gaW+PPvlN43VY4VMNzMZgMTgc2AQTUu9wLwQUlDJG0I\nfLIHIc8AVu/L8l8o5LofOFDSYH+PHuhlRWcBfx+Dq79RzOzliTxliT0OeSJPeSJPeSJPeSJPjWvm\nAOtJ4GRJS0g3l7jGyw3AB0ETSUvD2oFHzOxu0n6SNl8+daO3ATgOONWX5D1E48u1AH4DbFGpwpf0\nXUga5LUDlxXjLTb171OBMR7PscDSCm0qHV9yPmk2abHfNOHb5Q0kjZZ0bY3+lPs6sJPSTS4WSmpk\neeHOwPPVKn225TOkpW2LSIOEDcua1erTz/z4WwplF9Ro342kwyW1Vqj6KPCov24PkvbqPel1OwEr\nKhxT0h+Y4q/jAuAKM/u/Cu1K79t3PM5HSAObpRXadjmmgtOA3ZVujvI4XWf0gOp99SWs55N+WfBr\n4Lyy5awAA/xmFyGEEEIIoZfJbF28C3lzSDoD2LLsNu3rPUmbAteZWbXZvT5L0nQzO6TZcbxXfB9g\nh5ltXaON0frexdRnLSdmZ3IsByZD/F9TW1tbW/yWOEPkKU/kKU/kKU/kqTpJmFm3lWnN3IO1Lrod\nuGF9+9Bdj5mtovrSyT5tfXqdJY0n3RHx0rqNW9d2NGF9MnTbniwoCCGEEPqmmMEKIXQhyeLnQggh\nhBBCbdVmsJq5ByuEEEIIIYQQ3ldigBVCCD0QfxckT+QpT+QpT+QpT+QpT+QpT+SpcTHACiGEEEII\nIYReEnuwQghdxB6sEEIIIYT6Yg9WCCGEEEIIIaxlMcAKIYQeiDXpeSJPeSJPeSJPeSJPeSJPeSJP\njYu/gxVC6EbqNtsdQliHDB06ghUrnm52GCGEECqIPVghhC4kGcTPhRDWbSL+/w4hhOaKPVghhBBC\nCCGEsJY1ZYAlaYSkx6rUzZI06r2Oya89XNJCSdMLZcubEUs1kvaVNCmj3XL/XjXXZe03lfSMpP8u\nlM2SNLzOcZMkja0T7931rt+I4jklHS/p3Ixjpkt6RdK0zGucK+m4OvWn50cNknaQ1C5pgaTtJZ0q\naYmkG70fP6hzfN2+StpF0lxJj0laJOnThbrxkp6U9LVG4g7VtDU7gD6irdkB9BFtzQ6gT4i9IHki\nT3kiT3kiT41r5gzWuri24ShghpkdUihbF+PMicmqPK7mfGB2z8JpKJa1cc6c818KHLsW4mjEUcBt\nZjbazJYD/wEcYGaf9/pGX9dKXgM+b2Y7AYcA35e0GYCZ3QzsC8QAK4QQQghhLWnmAGsDSVP8N/i3\nShpY3sB/477Yvy72sn4+a7JYUoek07x8pKQH/Lf2j0ravgcxbQ68WFb2UiGe4/ya7ZIme9kkSVdI\nekjSbyUd7eWbSJrpsXRIOsLLR0ha6sctkzRV0oF+/DJJu3u7jSVdL2mez3gc7mG8Bfwloy8vlRdI\n+rHH3i7pRUn/5eWjgQ8BM8oO+TPwbp3rrPSYkDTG+7HI496k7PoV+yTpYUkfLbSbJWlUjRwUvQ68\nWidGzGxWTruCVX5ufKbpCe/XTYU2/+ix/lbSKd62y4yhpK/7bNchwFeB/5D0oKSrgQ8D00vv4cIx\nW0n6qaRf+9deuX01s9+a2e/88f+S3s8fLNS/AAxuIA+hqpZmB9BHtDQ7gD6ipdkB9AktLS3NDqFP\niDzliTzliTw1rpl3EdwB+DczmyfpeuAk4PJSpaStgYuB3Ugf4h/wQcqzwLZmtrO328wPmQp8x8ym\nSRpAzwaP/YHOYoGZ7eHX2RE4C9jLzF6RtHmh2TAz29sHCdOA24E3gKPM7FVJWwLzvA5gJPApM1si\n6VHgs378EX6No4GzgQfN7IuSBgPzJc00s4eBhz2m0cCJZval8o6U4i4rO8GPGw5MByZJEvA9YAJw\nYFn7Y+olzMy+5ufcAPgJMM7MFkoahA9QCir2yY/7DNAqaZjnc6GkC6u0L17/1tJjH4CNNrPWenFn\n9OvywtNvANuZ2duF9xuk93ALacCyTNJVpcO7n86mS7oGWFU6t6SDgRZ/Px1faH8FcLmZzZX0/4D7\ngR0b7aukfwI2KA24CjL+bRRP20J8+AshhBDC+q6trS1ryWQzZ7D+aGbz/PEUYJ+y+jHALDN72cw6\nSQOoscDvge191uhgYJV/mN/GzKYBmNlbZvZGI8H4QGMX0gCukv1Jy7te8WusLNTd6WVLSTNBAAIu\nktQBzAS2kVSqW25mS/zxE14P8BiwnT8+CJgoqZ20OH8A0GU/lJktqDS4qtPPgcBtwFfM7FnSwPZe\nM3u+EHdP7AA8b2YLPbZX/XUrqtan24DSYO7TwE/rtK/IzO7ujcFVBR3ATZIm0HVG714ze8fM/gy8\nAAxt8Lyicr4PAK70fk8DBknauNigXl/9FxT/A3yhQvXLkkbWDq218NVSu+l6q63ZAfQRbc0OoI9o\na3YAfULsBckTecoTecoTeVqjpaWF1tbW1V/VNHMGq9tv+Su06fbh08xWStoFOBj4MjCOtPSq5sBA\n0knACX6dQ81sRaGuH2ng9iZwbwN9KHmzQswTgK2A3cysU+mmEwMrtO8sPO9kzWsi0izXUz2Ip5ar\ngZ/6kjmAvYB9PD+bkpZurjKzs3pw7nqDs6p9kvQnSTuRZrJOLFR1a++zXO+lw0iD+yOAsyV9zMvL\nX8cPAO+QZkJLui19zSBgDzN7uwfHImlT4B7gm2b2SIUmVwCLJJ1iZjf05BohhBBCCKGyZs5gjZBU\nWsb2OWBOWf18YKykLST1B8YDs325XX8zuwP4FjDKzF4FnpF0JICkAZI2Kp7MzK4ys93MbFRxcOV1\nnWa2HfAo6QN+Jb8Axknawq8xpEq70iBjMPCiD672A0ZUaFPL/cCpqw+Qds04piZJJwODzOy7pTIz\nO9bMtjOzDwP/CfxPpcGVpMny/WFVLAOG+bJFJA3y162oVp9uAc4ENjOzxzPa90S3GSNJ3ym9byoe\nkGY2h5vZbGAisBkwqMY1XgA+KGmIpA2BT/YgzhnA6n1Z/guFLL5U805gsv8bqeQs4O9jcPW3aml2\nAH1ES7MD6CNamh1AnxB7QfJEnvJEnvJEnhrXzAHWk8DJkpaQbi5xjZcbgA+CJpLWTbQDj5jZ3cC2\nQJsvn7rR2wAcB5zqS/IeovHlWgC/AbaoVOFL+i4kDfLagcuK8Rab+vepwBiP51hgaYU2lY4vOZ80\nm7TYb5rw7fIGkkZLurZGf8p9HdhJ6SYXCyU1srxwZ+D5apU+2/IZ0tK2RaRBwoZlzWr16Wd+/C2F\nsgtqtO9G0uGSWqvU/dLPvb+kP0oq7TfbCVhR6RjXH5jir+MC4Aoz+78K7Urv23c8zkdIA8SlFdp2\nOaaC04DdlW6O8jhdZ/RK/anW10+Tltt+ofA671zWZoDf7CKEEEIIIfQyxV+CX0PSGcCWZjaxbuP1\niC85u87Mqs3u9VmSppfdlv99zfcBdpjZ1jXa2Lr51wnWNW3ErEOONiJPOdpoLE9iffz/u62tLX6b\nniHylCfylCfyVJ0kzKzbyrRm7sFaF90O3LC+feiux8xWUX3pZJ+2Pr3OksaT7oh4aUbrtR1OCOFv\nMHToiPqNQgghNEXMYIUQupBk8XMhhBBCCKG2ajNYzdyDFUIIIYQQQgjvKzHACiGEHoi/C5In8pQn\n8pQn8pQn8pQn8pQn8tS4GGCFEEIIIYQQQi+JPVghhC5iD1YIIYQQQn2xByuEEEIIIYQQ1rIYYIUQ\nQg/EmvQ8kac8kac8kac8kac8kac8kafGxd/BCiF0I8XfwQp/m6HbDmXFsyuaHUYIIYTwnos9WCGE\nLiQZrc2OIvR5rRD/v4QQQng/iz1YIYQQQgghhLCWNWWAJWmEpMeq1M2SNOq9jsmvPVzSQknTC2XL\nmxFLNZL2lTQpo91y/14112XtN5X0jKT/LpTNkjS8znGTJI2tE+/d9a7fiOI5JR0v6dyMY6ZLekXS\ntMxrnCvpuDr1p+dHDZJ2kNQuaYGk7SWdKmmJpBu9Hz+oc3xuX4+X9BtJy4p9kDRe0pOSvtZI3KGK\ndeonw7or1u7niTzliTzliTzliTzliTw1rpkzWOvi2pGjgBlmdkihbF2MMycmq/K4mvOB2T0Lp6FY\n1sY5c85/KXDsWoijEUcBt5nZaDNbDvwHcICZfd7rG31du5E0BDgHGAPsAZwraTCAmd0M7AvEACuE\nEEIIYS1p5gBrA0lT/Df4t0oaWN7Af+O+2L8u9rJ+PmuyWFKHpNO8fKSkByQtkvSopO17ENPmwItl\nZS8V4jnOr9kuabKXTZJ0haSHJP1W0tFevomkmR5Lh6QjvHyEpKV+3DJJUyUd6Mcvk7S7t9tY0vWS\n5vmMx+EexlvAXzL68lJ5gaQfe+ztkl6U9F9ePhr4EDCj7JA/A+/Wuc5KjwlJY7wfizzuTcquX7FP\nkh6W9NFCu1mSRtXIQdHrwKt1YsTMZuW0K1jl58Znmp7wft1UaPOPHutvJZ3ibbvMGEr6us92HQJ8\nFfgPSQ9Kuhr4MDC99B4uHLOVpJ9K+rV/7dVAXw8m/ZLgL2a2kvSafqKQhxeAwQ3kIVTTk58w66GW\nlpZmh9AnRJ7yRJ7yRJ7yRJ7yRJ4a18y7CO4A/JuZzZN0PXAScHmpUtLWwMXAbqQP8Q/4IOVZYFsz\n29nbbeaHTAW+Y2bTJA2gZ4PH/kBnscDM9vDr7AicBexlZq9I2rzQbJiZ7e2DhGnA7cAbwFFm9qqk\nLYF5XgcwEviUmS2R9CjwWT/+CL/G0cDZwINm9kWfgZgvaaaZPQw87DGNBk40sy+Vd6QUd1nZCX7c\ncGA6MEmSgO8BE4ADy9ofUy9hZvY1P+cGwE+AcWa2UNIgfIBSULFPftxngFZJwzyfCyVdWKV98fq3\nlh77AGy0mbXWizujX5cXnn4D2M7M3i683yC9h1tIA5Zlkq4qHd79dDZd0jXAqtK5JR0MtPj76fhC\n+yuAy81srqT/B9wP7JjZ122BZwrPn/Oyovr/NmYVHm9HDCZCCCGEsN5ra2vLWjLZzBmsP5rZPH88\nBdinrH4MMMvMXjazTtIAaizwe2B7nzU6GFjlH+a3MbNpAGb2lpm90UgwPtDYhTSAq2R/0vKuV/wa\nKwt1d3rZUtJMEICAiyR1ADOBbSSV6pab2RJ//ITXAzxG+jgLcBAwUVI70AYMALrshzKzBZUGV3X6\nORC4DfiKmT1LGtjea2bPF+LuiR2A581socf2qr9uRdX6dBtQGsx9GvhpnfYVmdndvTG4qqADuEnS\nBLrO6N1rZu+Y2Z+BF4ChDZ5XVM73AcCV3u9pwCBJGxcb/I19fVnSyJot9it8xeCqstiDlSXW7ueJ\nPOWJPOWJPOWJPOWJPK3R0tJCa2vr6q9qmjmD1e23/BXadPvwaWYrJe1CWgr1ZWAcaelVzYGBpJOA\nE/w6h5rZikJdP9LA7U3g3gb6UPJmhZgnAFsBu5lZp9JNJwZWaN9ZeN7JmtdEpFmup3oQTy1XAz/1\nJXMAewH7eH42JS3dXGVmZ/Xg3PUGZ1X7JOlPknYizWSdWKjq1t5nud5Lh5EG90cAZ0v6mJeXv44f\nAN4hzYSWdFv6mkHAHmb2dg+OfY40q1byd3Sdj4I0Q7ZI0ilmdkMPrhFCCCGEEKpo5gzWCEmlZWyf\nA+aU1c8HxkraQlJ/YDww25fb9TezO4BvAaPM7FXgGUlHAkgaIGmj4snM7Coz283MRhUHV17XaWbb\nAY+SPuBX8gtgnKQt/BpDqrQrDTIGAy/64Go/YESFNrXcD5y6+gBp14xjapJ0MjDIzL5bKjOzY81s\nOzP7MPCfwP9UGlxJmlzaH1bFMmCYL1tE0iB/3Ypq9ekW4ExgMzN7PKN9T3SbMZL0ndL7puIBaWZz\nuJnNBiYCmwGDalzjBeCDkoZI2hD4ZA/inAGs3pflv1DIdT9woKTB/h490MuKzgL+PgZXf6OY2csS\na/fzRJ7yRJ7yRJ7yRJ7yRJ4a18wB1pPAyZKWkG4ucY2XG4APgiaSloa1A4+Y2d2k/SRtvnzqRm8D\ncBxwqi/Je4jGl2sB/AbYolKFL+m7kDTIawcuK8ZbbOrfpwJjPJ5jgaUV2lQ6vuR80mzSYr9pwrfL\nG0gaLenaGv0p93VgJ6WbXCyU1Mjywp2B56tV+mzLZ0hL2xaRBgkbljWr1aef+fG3FMouqNG+G0mH\nS2qtUvdLP/f+kv4oqbTfbCdgRaVjXH9gir+OC4ArzOz/KrQrvW/f8TgfIQ1sllZo2+WYCk4Ddle6\nOcrjdJ3RK/WnYl99Cev5pF8W/Bo4r2w5K8AAv9lFCCGEEELoZTJbF+9C3hySzgC2NLOJdRuvRyRt\nClxnZtVm9/osSdPLbsv/vub7ADvMbOsabYzW9y6mPms5MYtVSyuYGW1tbfHbzwyRpzyRpzyRpzyR\npzyRp+okYWbdVqY1cw/Wuuh24Ib17UN3PWa2iupLJ/u09el1ljSedEfES+s2bl3b0YT3u6Hb9mQR\nQQghhND3xQxWCKELSRY/F0IIIYQQaqs2g9XMPVghhBBCCCGE8L4SA6wQQuiB+LsgeSJPeSJPeSJP\neSJPeSJPeSJPjYsBVgghhBBCCCH0ktiDFULoIvZghRBCCCHUF3uwQgghhBBCCGEtiwFWCCH0QKxJ\nzxN5yhN5yhN5yhN5yhN5yhN5alz8HawQQjdSt9nuEHrN0KEjWLHi6WaHEUIIIawVsQcrhNCFJIP4\nuRDWJhH/94QQQujrYg9WCCGEEEIIIaxlWQMsSSMkPValbpakUb0bVh5JwyUtlDS9ULa8GbFUI2lf\nSZMy2i0vtL+7WhtJW/RiXBWvU6ivGbe/L2bVadPr74/iOXNeb0k7S5orqUPSXZIGZRxT87ySVuVH\nvPqY70p6TNIlkraSNE/SAkn75Ly2mX29VNJSSYsk/UzSZoW6X0qaL+lDjcYeKmlrdgB9RFuzA+gT\nYo9DnshTnshTnshTnshT4xqZwVoX13McBcwws0MKZetinDkxWZXHjZ6nEfXO12jczZBz/euAM81s\nF+AO4MxeOG9P+n0CsLOZfQM4AFhsZqPN7FeZ58tpMwP4RzPbFXgK+Obqg83GAguAwxqOPIQQQggh\nZGlkgLWBpCmSlki6VdLA8gaSxkta7F8Xe1k/SZO8rEPSaV4+UtID/pv2RyVt34P4NwdeLCt7qRDP\ncX7NdkmTvWySpCskPSTpt5KO9vJNJM30WDokHeHlI3xGYJKkZZKmSjrQj18maXdvt7Gk6wuzEod7\nGG8Bf8noy0uFx4Ml3SPpSUlXFcpXr/GUdLrPhiwu5HRjP67dy8d5+RiPd5HHt0nxwpLu9ZnAdkkr\nJX0+M+53gZf9HP0KMzSLJJ1c3tjzNtdzfIvHe7CkWwttVs+sSTqovH2dvFXzER/EAMwEPpVxzEse\nwzBJsz0/iyXtvSZUXeB9nSvpg144qfSe8uer/PtdwCBggaQzgUuAo/y8A+n62k6Q9Guvu1pafceJ\nun01s5lm1ulP5wF/V9ZkBenfTfibtTQ7gD6ipdkB9AktLS3NDqFPiDzliTzliTzliTw1rpG7CO4A\n/JuZzZN0PXAScHmpUtLWwMXAbsBK4AEfpDwLbGtmO3u70pKlqcB3zGyapAH0bD9Yf6CzWGBme/h1\ndgTOAvYys1ckFT9UDjOzvSV9FJgG3A68ARxlZq9K2pL04XSatx8JfMrMlkh6FPisH3+EX+No4Gzg\nQTP7oqTBwHxJM83sYeBhj2k0cKKZfam8I6W43Rjgo8AfgfslHW1mt5cqlZbHHe/t+gO/ltTmcT5n\nZp/0dptK2gD4CTDOzBYqLY97vezahxXO+/8Bd5rZqlLc1ZjZs8Ax/vRLwAjSDI2V5RvP6beAj5vZ\n6z7IOB24CPiRpI3M7HXgM8BN3v7sCu0vqJY3SfcCXzSzFWWhPiHpCDObBnya7oOOSn0rnfdzwH1m\ndpEPdEqDvE2AuWb2LUmXkGanvlPpVH6+IyX9n5mVlja+AIw2s1P9eakP/+A5+Gcze1fSD4EJwJTM\nvhb9O+m1L+okvWfqaC08biE+JIcQQghhfdfW1pa1ZLKRQc0fzWyeP54C7FNWPwaYZWYv+2/QpwJj\ngd8D2/us0cHAKv+Qv41/4MXM3jKzNxqIBf+wuwtpAFfJ/sBtZvaKX2Nloe5OL1sKlPajCLhIUgdp\nlmMbrdmrstzMlvjjJ7we4DFgO398EDBRUjtp08EAYHgxIDNbUGlwVcF8M/uDpdts3Uz3XO8D3GFm\nb5jZa6QB4r94PAdKukjSPj5I2gF43swWegyvFmY4VpO0FXAjMN6Pa9QBwI885vJ8A+wJ7Ag85Dk6\nDhhuZu8C9wGHS+pPWr42rVr7WgGY2WFVBhz/Dpws6RHSwOitBvr1CPBvks4hDR5f8/I3zezn/ngB\na94H5XLvd15a/vdxYBTwiPd7f+DD3RpX72u6qHQ28LaZ3VRW9Rywc/1wWgtfLfWbr5famh1AH9HW\n7AD6hNjjkCfylCfylCfylCfytEZLSwutra2rv6ppZAarfP9Hpf0g3T5MmtlKSbsABwNfBsYBX63U\ntsuJpJNIswIGHFr8MCmpH2ng9iZwbwN9KHmzQswTgK2A3cysU+mGAgMrtO8sPO9kTQ5FmuV6qgfx\nlMvJdfeDzJ7yWahDgfMlPUgaTNbLdT/SQK7VB51rg0j75SZUqLsF+ArwCvCImb3mA+hq7RtiZr8h\nvf+Q9BEa2INkZnMkjfVjbpB0mZlNAd4uNHuXNe+Dd/BfXHgfNmgwXAGTzezsBo9bcwLpC6T3wP4V\nqm8HzpG0xMx27Ok1QgghhBBCZY3MYI2QVFw2Naesfj4wVtIWPhMxHpjtS736m9kdpCVio8zsVeAZ\nSUcCSBogaaPiyczsKjPbzcxGlf+m3sw6zWw74FHScqpKfgGMk9+ZTdKQKu1Kg4/BwIs+uNqPtNyt\nvE0t9wOnrj5A2jXjmGr2UNr71Y/Uv/JczyHt3xmotJ/qX4E5vkzzdZ+1+B5pJmQZMMyXJyJpkL8+\nRZcAHWZ2W6VglPZwTa4T8wPAiaVzV8j3PGBvSSO9fmMf7ADM9lhPYM2StlrtG1LYH9WP9B68xp9v\nI2lmnWOHk94X15NullG6I2K198TTwO7++Ei6DrBqvY9KdQ8CxxRiHuIxZJH0CeAM4Agze7NCk+OA\n6TG46g0tzQ6gj2hpdgB9QuxxyBN5yhN5yhN5yhN5alwjA6wnScuslpA2yV/j5aUlYSuAiaT1IO2k\nmYi7gW2BNl/udKO3gfRB71RfkvcQMLQH8f8GqHhra1/SdyFpkNcOXFaMt9jUv08Fxng8xwJLK7Sp\ndHzJ+aQbgSxWuqX9t8sbSBot6doa/SmZD1xJWo74OzO7s3htM2sHbiAtX3sYuNbMOoCdSHu/2oFz\ngAvM7G3SIO1KSYtId5nbsOx6XwcOUrrJxUJJnyyrHw78tU7M1wHPAIv9+uPLYv4T8AXgZs/xXNLy\nRXzJ4j3AJ/x7zfZUeQ2UbtYxrELVeEnLgCWkPWo3ePnWdJ2JqqQF6JC0kLR/6/u1YgB+DOzrOdgT\neK1QV2smspSnpaRB4Azv9wygW59q9PUHpJtpPOCv5VVl9UNIdxcMIYQQQghrgXzLTJ8k6QxgSzOb\nWLdx6DG/icONZvZ4s2PpTUp3OvyDmd3T7FjeK37TjMVm9qMabaz5d9/vC9qI2ZkcbXTPk+jL//es\nDW1tbfFb4gyRpzyRpzyRpzyRp+okYWbdVig1sgdrXXQ7aV/M9LK/hRV6kf/dpvcdM/ths2N4L0ma\nTdo3WOluhyGEEEIIoRf06RmsEELvSzNYIaw9Q4eOYMWKp5sdRgghhPA3eb/OYIUQ1oL4xUsIIYQQ\nQs/05I/7hhDCei/+LkieyFOeyFOeyFOeyFOeyFOeyFPjYoAVQgghhBBCCL0k9mCFELqQZPFzIYQQ\nQgihtmp7sGIGK4QQQgghhBB6SQywQgihB2JNep7IU57IU57IU57IU57IU57IU+NigBVCCCGEEEII\nvST2YIUQuoi/g9V3DN12KCueXdHsMEIIIYT1UrU9WDHACiF0IclobXYUIUtr/M2yEEIIoVniJhch\nhNCbljc7gL4h1u7niTzliTzliTzliTzliTw1LmuAJWmEpMeq1M2SNKp3w8ojabikhZKmF8rWqY89\nkvaVNCmj3fJC+7urtZG0RS/GVfE6hfqacfv7YladNr3+/iieM+f1lrSzpLmSOiTdJWlQxjE1zytp\nVX7Eq4/5rqTHJF0iaStJ8yQtkLRPzmub2dchkmZIWibpfkmDC3UTog2GAAAgAElEQVS/lDRf0oca\njT2EEEIIIeRpZAZrXVyHchQww8wOKZSti3HmxGRVHjd6nkbUO1+jcTdDzvWvA840s12AO4Aze+G8\nPen3CcDOZvYN4ABgsZmNNrNfZZ4vp81EYKaZ7QD8Avjm6oPNxgILgMMajjx0t32zA+gbWlpamh1C\nnxB5yhN5yhN5yhN5yhN5alwjA6wNJE2RtETSrZIGljeQNF7SYv+62Mv6SZrkZR2STvPykZIekLRI\n0qOSevJxZXPgxbKylwrxHOfXbJc02csmSbpC0kOSfivpaC/fRNJMj6VD0hFePkLSUj9umaSpkg70\n45dJ2t3bbSzp+sKsxOEexlvAXzL68lLh8WBJ90h6UtJVhfLVazwlne6zIYsLOd3Yj2v38nFePsbj\nXeTxbVK8sKR7fSawXdJKSZ/PjPtd4GU/R7/CDM0iSSeXN/a8zfUc3+LxHizp1kKb1TNrkg4qb18n\nb9V8xAcxADOBT2Uc85LHMEzSbM/PYkl7rwlVF3hf50r6oBdOKr2n/Pkq/34XMAhYIOlM4BLgKD/v\nQLq+thMk/drrrpZUqsvp65HAZH88mfRLiKIVpH83IYQQQghhLWhkgLUDcKWZ7QisAk4qVkraGrgY\naAF2Bcb4IGVXYFsz29lnEErLzqYCPzCzXYF/Bv63B/H3BzqLBWa2h8ezI3AW0GJmuwGnFZoNM7O9\ngcNJH3QB3gCOMrPdgf2BywrtRwLf9VmBHYDP+vFn+DUAzgYeNLM9/fjvSdrIzB42s695TKMlXVup\nI6W43RjgZOCjwN8XP7D7eUYBx3u7vYATJO0CfAJ4zsx2M7OdgfskbQD8BDjFc30A8HrZtQ8zs1HA\nF4GngTuLcVdjZs+a2TH+9EvACNIMza6k17cY85bAt4CPe44XAKeTBjz/JGkjb/oZ4CZvf3aF9lXz\n5gPFYRVCfaI0YAY+DfxdrX6VnfdzwH2en12ARV6+CTDX+zqHNDtV8VR+viOBv5rZKDO7FDgH+Ik/\nf6PQh3/wHPyzX7MTmNBAXz9kZi94+xVA+XLATtK/m9pmFb7WqUW365DIS5ZYu58n8pQn8pQn8pQn\n8pQn8rRGW1sbra2tq7+q+UAD5/yjmc3zx1OAU4DLC/VjgFlmVprRmAqMBS4Atpd0BfBzYIbSHpht\nzGwagJm91UAc+PlF+sA7pUqT/YHbzOwVv8bKQt2dXrZUa/ajCLhI0ljSh9BtCnXLzWyJP36CNCgA\neAzYzh8fBBwu6Qx/PgAYDiwrXdTMFpAGIvXMN7M/eD9vBvYBbi/U7wPcUfpgLul24F+A+0kDu4uA\ne83sV5I+BjxvZgs9hlf9mC4XlLQVcCNwjJk1vL+INHC72vyWZmX5BtgT2BF4yF+7DUgDlHcl3UfK\n3c9Iy9fOIA3Uu7WvFYCZVVv69u/ADyT9FzCNNDuX6xHgeh+o3mVmHV7+ppn93B8vIPW/km53lqmi\ntPzv48Ao4BHv90DghW6Nq/e12nlLniPltrb9Ms8eQgghhLCeaGlp6bJk8rzzzqvYrpEBVvkHtUr7\nQbp9mDSzlT67cjDwZWAc8NVKbbucSDqJNCtgwKH+2/hSXT/g98CbwL0N9KHkzQoxTwC2AnYzs06l\nGwoMrNC+s/C8kzU5FPApM3uqB/GUy8l194PMnvLZrUOB8yU9SBpM1st1P+BmoNXMlvYg3hwi7Zeb\nUKHuFuArwCvAI2b2mg8uqrVviJn9hvT+Q9JHaGAPkpnN8UH3YcANki4zsynA24Vm77LmffAOPjNc\nGBg2QsBkMzu7weNKXpA01Mxe8Bmu8iW0twPnSFris9Ghp2IPVpZYu58n8pQn8pQn8pQn8pQn8tS4\nRpYIjpBUXDY1p6x+PjBW0haS+gPjgdm+1Ku/md1BWiI2ymdRnpF0JICkAYUlYgCY2VW+1G1UcXDl\ndZ1mth3wKGk5VSW/AMbJ78wmaUiVdqXBx2DgRR9c7Uda7lbeppb7gVNXHyDtmnFMNXso7f3qR+pf\nea7nkPbvDPT9VP8KzPFlmq+b2U3A90gzIcuAYZJGe1yD/PUpugToMLPbKgWjtIdrcqW6ggeAE0vn\nrpDvecDekkZ6/cY+2AGY7bGeQFrOWK99Qwr7o/qR3oPX+PNtJM2sc+xw0vvietLNMkp3RKz2nnga\n2N0fH0nXAVat91Gp7kHgmELMQzyGXNOAL/jj44G7yuqPA6bH4CqEEEIIYe1oZID1JHCypCWkTfLX\neHlpSdgK0h3M2oB20kzE3cC2QJukdtIStIl+3HHAqZI6gIeAoT2I/zdAxVtb+5K+C0mDvHbW7Kmq\nNjs0lbRvrAM4FlhaoU2l40vOJ90IZLHSLe2/Xd6g1h6sMvOBK0nLEX9nZncWr21m7cANpOVrDwPX\n+tK1nYD53t9zgAvM7G3SIO1KSYuAGcCGZdf7OnCQ0k0uFkr6ZFn9cOCvdWK+DngGWOzXH18W859I\nH/xv9hzPJe1nw8w6gXtIe8juqdeeKq9BjX1J4yUtA5aQ9qjd4OVb03UmqpIWoEPSQtL+re/XigH4\nMbCv52BP4LVCXa2ZyFKelpIGgTO83zOAbn2q0ddLgAO9vx8n7YssGgL0xixriD1YWWLtfp7IU57I\nU57IU57IU57IU+PkW2b6JN/vtKWZTazbOPSYpEuAG83s8WbH0puU7nT4BzO7p9mxvFck/ZB0e/gf\n1WhjtL53MfVZy2n+MsFWWNd/hre1tcXykgyRpzyRpzyRpzyRpzyRp+okYWbdVij19QHWSNJMzqtl\nfwsrhFBG0mzSvsFjzey5Gu367g+F9czQbYey4tkV9RuGEEIIode9LwdYIYTeJ8ni50IIIYQQQm3V\nBliN7MEKIYTgYk16nshTnshTnshTnshTnshTnshT42KAFUIIIYQQQgi9JJYIhhC6iCWCIYQQQgj1\nxRLBEEIIIYQQQljLYoAVQgg9EGvS80Se8kSe8kSe8kSe8kSe8kSeGhcDrBBCCCGEEELoJbEHK4TQ\nRfwdrBDeX4YOHcGKFU83O4wQQnjfib+DFULIkgZY8XMhhPcPEf/XhxBC74ubXIQQQq9qa3YAfURb\nswPoI9qaHUCfEHtB8kSe8kSe8kSeGpc1wJI0QtJjVepmSRrVu2HlkTRc0kJJ0wtly5sRSzWS9pU0\nKaPd8kL7u6u1kbRFL8ZV8TqF+ppx+/tiVp02vf7+KJ4z5/WWdK6kZ/29slDSJzKOqXleSavyI159\nzHclPSbpEklbSZonaYGkfXJe28y+XippqaRFkn4mabNC3S8lzZf0oUZjDyGEEEIIeRqZwVoX1xcc\nBcwws0MKZetinDkxWZXHjZ6nEfXO12jczZB7/cvNbJR/3dcL5+1Jv08AdjazbwAHAIvNbLSZ/Srz\nfDltZgD/aGa7Ak8B31x9sNlYYAFwWMORhwpamh1AH9HS7AD6iJZmB9AntLS0NDuEPiHylCfylCfy\n1LhGBlgbSJoiaYmkWyUNLG8gabykxf51sZf1kzTJyzokneblIyU94L9pf1TS9j2If3PgxbKylwrx\nHOfXbJc02csmSbpC0kOSfivpaC/fRNJMj6VD0hFePsJnBCZJWiZpqqQD/fhlknb3dhtLur4wK3G4\nh/EW8JeMvrxUeDxY0j2SnpR0VaF89RpPSaf7bMjiQk439uPavXycl4/xeBd5fJsULyzpXp/ZaZe0\nUtLnM+N+F3jZz9GvMEOzSNLJ5Y09b3M9x7d4vAdLurXQZvXMmqSDytvXyVst3dbH1vGSxzBM0mzP\nz2JJe68JVRd4X+dK+qAXTiq9p/z5Kv9+FzAIWCDpTOAS4Cg/70C6vrYTJP3a666WVKqr21czm2lm\nnf50HvB3ZU1WkP7dhBBCCCGEtcHM6n4BI4BOYE9/fj1wuj+eBYwCtgb+AGxBGrg9CBzhdTMK59rM\nv88DjvDHA4CBObGUxXUe8NUqdTsCTwJD/Pnm/n0ScIs//ijwlD/uDwzyx1sWykeQBhs7+vNHgev9\n8RHA7f74QuBz/ngwsAzYqCym0cC1dfq0L/BXv65IMxJHe91yz+8ooAMYCGwCPA7sAhwN/Khwrk2B\nDYDfAaO8bJC/PvsC08quPQpYBGzag9fiy8CtrLlxSinfpffHlsDsUk6AM4Fved6fLpRfBYyv1r54\nzgox3AsMq1B+ruduEXAdMLiBfp0OfNMfC9jEH3cCh/rjS4CzCu+vowvH/1+Vx8cD/114Xnpt/wGY\nBvT38h8Cx+b2tazNtNJ7slD2X8B/1jnO4NzC1ywDi69uX5GXyFNfyRP2fjFr1qxmh9AnRJ7yRJ7y\nRJ7WmDVrlp177rmrv/znK+VfHyDfH81snj+eApwCXF6oHwPMMrPSjMZUYCxwAbC9pCuAnwMzJA0C\ntjGzaaTI3mogDvz8Ig0qplRpsj9wm5m94tdYWai708uWFvajCLhI0ljSh+dtCnXLzWyJP34CmOmP\nHwO288cHAYdLOsOfDwCGkwZa+PUWAF/K6N58M/uD9/NmYB/g9kL9PsAdZvaGt7kd+BfgfuB7ki4C\n7jWzX0n6GPC8mS30GF71Y7pcUNJWwI3AMWbW8P4i0pK3q83M/Dory+r3JA16H/LXbgNgrpm9K+k+\nUu5+Rlq+dgZpvUy39rUCMLNqS9+uAr5tZibpAtL79ouZ/XoEuF7SBsBdZtbh5W+a2c/98QJS/yvJ\nnTkz//5x0oD0Ee/3QOCFbo2r9zVdVDobeNvMbiqreo6stUit9ZuEEEIIIaxHWlpauiyZPO+88yq2\na2SAZXWeQ4UPk2a2UtIuwMGkWY5xwFcrte1yIukk0p4VI80UrCjU9QN+D7xJ+k1+o96sEPMEYCtg\nNzPrVLqhwMAK7TsLzztZk0MBnzKzp3oQT7mcXHc/yOwppZs/HAqcL+lB0mCyXq77ATcDrWa2tAfx\n5hBpJnNChbpbgK8ArwCPmNlrPrio1r4hZlZcWvdjoOrNPSocO8cH3YcBN0i6zMymAG8Xmr3LmvfB\nO/jS28LAsBECJpvZ2Q0et+YE0hdI74H9K1TfDpwjaYmZ7djTawSIPTO5WpodQB/R0uwA+oTYC5In\n8pQn8pQn8tS4RvZgjZC0hz/+HDCnrH4+MFbSFpL6k5Z5zZa0JWm50x2kJWGjfBblGUlHAkgaIGmj\n4snM7Coz283STQlWlNV1mtl2pOV6n6kS7y+AcfI7s0kaUqVdafAxGHjRB1f7kZbolbep5X7g1NUH\nSLtmHFPNHkp7v/qR+lee6zmk/TsDfT/VvwJzJG0NvO6zFt8jzYQsA4ZJGu1xDfLXp+gSoMPMbqsU\njO/hmlwn5geAE0vnrpDvecDekkZ6/caSPuJ1sz3WE4CfZLRviKRhhadHk5ZUImkbSTMrH7X62OGk\n98X1pOWFpTsiVntPPA3s7o+PpOsAq9b7qFT3IHBMYU/XEI8hi9IdEs8gLb99s0KT44DpMbgKIYQQ\nQlg7GhlgPQmcLGkJaZP8NV5eWhK2AphI+mMe7aSZiLuBbYE2Se2kJWgT/bjjgFMldQAPAUN7EP9v\nSPtWuvElfReSBnntwGXFeItN/ftUYIzHcyywtEKbSseXnE+6EchipVvaf7u8gaTRkq6t0Z+S+cCV\npOWIvzOzO4vXNrN24AbS8rWHSfu6OoCdgPne33OAC8zsbdIg7UpJi0h7ujYsu97XgYOUbnKxUNIn\ny+qHk/aF1XId8Ayw2K8/vizmPwFfAG72HM8FdvC6TuAe4BP+vWZ7qrwGSjfrGFah6lJ/XRaR9p59\nzcu3putMVCUtQIekhcCnge/XioE0Q7av52BP4LVCXa2ZyFKelpJ+ETHD+z0D6NanGn39AWmf3QP+\nWl5VVj+EdHfB8Ddra3YAfURbswPoI9qaHUCfEH+PJ0/kKU/kKU/kqXGlGxL0Sb7faUszm1i3cegx\nSZcAN5rZ482OpTcp3enwD2Z2T7Njea9I+iHp9vA/qtHGMlelrufaiGVdOdqIPOVoY+3lSfTl/+uL\n2traYrlShshTnshTnshTdZIws24rlPr6AGskaSbnVev6t7BCCGUkzSbtGzzWzJ6r0a7v/lAIIXQz\ndOgIVqx4utlhhBDC+877coAVQuh9kix+LoQQQggh1FZtgNXIHqwQQggu1qTniTzliTzliTzliTzl\niTzliTw1LgZYIYQQQgghhNBLYolgCKGLWCIYQgghhFBfLBEMIYQQQgghhLUsBlghhNADsSY9T+Qp\nT+QpT+QpT+QpT+QpT+SpcTHACiGEEEIIIYReEnuwQghdxN/BCs0wdNuhrHh2RbPDCCGEELLF38EK\nIWSRZLQ2O4qw3mmF+P8ohBBCXxI3uQghhN60vNkB9BGRpyyxxyFP5ClP5ClP5ClP5KlxWQMsSSMk\nPValbpakUb0bVh5JwyUtlDS9ULZO/XcuaV9JkzLaLS+0v7taG0lb9GJcFa9TqK8Zt78vZtVp0+vv\nj+I5c15vSedKetbfKwslfSLjmJrnlbQqP+LVx3xX0mOSLpG0laR5khZI2ifntc3s6xBJMyQtk3S/\npMGFul9Kmi/pQ43GHkIIIYQQ8jQyg7Uurt04CphhZocUytbFOHNisiqPGz1PI+qdr9G4myH3+peb\n2Sj/uq8XztuTfp8A7Gxm3wAOABab2Wgz+1Xm+XLaTARmmtkOwC+Ab64+2GwssAA4rOHIQ3fbNzuA\nPiLylKWlpaXZIfQJkac8kac8kac8kafGNTLA2kDSFElLJN0qaWB5A0njJS32r4u9rJ+kSV7WIek0\nLx8p6QFJiyQ9Kqkn/w1vDrxYVvZSIZ7j/JrtkiZ72SRJV0h6SNJvJR3t5ZtImumxdEg6wstHSFrq\nxy2TNFXSgX78Mkm7e7uNJV1fmJU43MN4C/hLRl9eKjweLOkeSU9KuqpQvnqNp6TTfTZkcSGnG/tx\n7V4+zsvHeLyLPL5NiheWdK/P7LRLWinp85lxvwu87OfoV5ihWSTp5PLGnre5nuNbPN6DJd1aaLN6\nZk3SQeXt6+Stlm7rY+t4yWMYJmm252expL3XhKoLvK9zJX3QCyeV3lP+fJV/vwsYBCyQdCZwCXCU\nn3cgXV/bCZJ+7XVXSyrV5fT1SGCyP55M+iVE0QrSv5sQQgghhLAWNDLA2gG40sx2BFYBJxUrJW0N\nXAy0ALsCY3yQsiuwrZntbGa7AKVlZ1OBH5jZrsA/A//bg/j7A53FAjPbw+PZETgLaDGz3YDTCs2G\nmdnewOGkD7oAbwBHmdnuwP7AZYX2I4Hv+qzADsBn/fgz/BoAZwMPmtmefvz3JG1kZg+b2dc8ptGS\nrq3UkVLcbgxwMvBR4O+LH9j9PKOA473dXsAJknYBPgE8Z2a7mdnOwH2SNgB+ApziuT4AeL3s2oeZ\n2Sjgi8DTwJ3FuKsxs2fN7Bh/+iVgBGmGZlfS61uMeUvgW8DHPccLgNOBmcA/SdrIm34GuMnbn12h\nfdW8+UBxWJVwv+KDoetUWDZXo2+l834OuM/zswuwyMs3AeZ6X+eQZqcqnsrPdyTwV59BuxQ4B/iJ\nP3+j0Id/8Bz8s1+zE5jQQF8/ZGYvePsVQPlywE7Sv5vaZhW+1qlFt+uQyEueyFOW2OOQJ/KUJ/KU\nJ/KUJ/K0RltbG62trau/qvlAA+f8o5nN88dTgFOAywv1Y4BZZlaa0ZgKjAUuALaXdAXwc2CGpEHA\nNmY2DcDM3mogDvz8In3gnVKlyf7AbWb2il9jZaHuTi9bqjX7UQRcJGks6UPoNoW65Wa2xB8/QRoU\nADwGbOePDwIOl3SGPx8ADAeWlS5qZgtIA5F65pvZH7yfNwP7ALcX6vcB7ih9MJd0O/AvwP2kgd1F\nwL1m9itJHwOeN7OFHsOrfkyXC0raCrgROMbMGt5fRBq4XW1+G7CyfAPsCewIPOSv3QakAcq7ku4j\n5e5npOVrZ5AG6t3a1wrAzKotfbsK+LaZmaQLSO/bL2b26xHgeh+o3mVmHV7+ppn93B8vIPW/ktyZ\ns9Lyv48Do4BHvN8DgRe6Na7e12rnLXmOlNva9ss8ewghhBDCeqKlpaXLksnzzjuvYrtGBljlH9Qq\n7Qfp9mHSzFb67MrBwJeBccBXK7XtciLpJNKsgAGH+m/jS3X9gN8DbwL3NtCHkjcrxDwB2ArYzcw6\nlW4oMLBC+87C807W5FDAp8zsqR7EUy4n190PMnvKZ7cOBc6X9CBpMFkv1/2Am4FWM1vag3hziLRf\nbkKFuluArwCvAI+Y2Ws+uKjWviFmVlxa92Og6s09Khw7xwfdhwE3SLrMzKYAbxeavcua98E7+Mxw\nYWDYCAGTzezsBo8reUHSUDN7wWe4ypfQ3g6cI2mJz0aHnoq9RXkiT1lij0OeyFOeyFOeyFOeyFPj\nGlkiOEJScdnUnLL6+cBYSVtI6g+MB2b7Uq/+ZnYHaYnYKJ9FeUbSkQCSBhSWiAFgZlf5UrdRxcGV\n13Wa2XbAo6TlVJX8AhgnvzObpCFV2pUGH4OBF31wtR9puVt5m1ruB05dfYC0a8Yx1eyhtPerH6l/\n5bmeQ9q/M9D3U/0rMMeXab5uZjcB3yPNhCwDhkka7XEN8ten6BKgw8xuqxSM0h6uyZXqCh4ATiyd\nu0K+5wF7Sxrp9RtL+ojXzfZYTyAtZ6zXviFlS+mOBh738m0kzax81Opjh5PeF9cD13mcUP098TSw\nuz8+kq4DrFrvo1Ldg8AxhT1dQzyGXNOAL/jj44G7yuqPA6bH4CqEEEIIYe1oZID1JHCypCWkTfLX\neHlpSdgK0h3M2oB20kzE3cC2QJukdtIStIl+3HHAqZI6gIeAoT2I/zdAxVtb+5K+C0mDvHbW7Kmq\nNjs0lbRvrAM4FlhaoU2l40vOJ90IZLHSLe2/Xd6g1h6sMvOBK0nLEX9nZncWr21m7cANpOVrDwPX\n+tK1nYD53t9zgAvM7G3SIO1KSYuAGcCGZdf7OnCQ0k0uFkr6ZFn9cOCvdWK+DngGWOzXH18W859I\nH/xv9hzPJe1nw8w6gXtIe8juqdeeKq9BjX1Jl/rrsgjYFyjtLduarjNRlbQAHZIWAp8Gvl8rBtIM\n2b6egz2B1wp1tWYiS3laSvpFxAzv9wygW59q9PUS4EBJy0jLDS8uqx8C9MYsa4i9RXkiT1lij0Oe\nyFOeyFOeyFOeyFPj5Ftm+iTf77SlmU2s2zj0mKRLgBvN7PFmx9KblO50+Aczu6fZsbxXJP2QdHv4\nH9VoY7S+dzH1WcuJ5W85cvPUCn35/6O/VVtbWyzDyRB5yhN5yhN5yhN5qk4SZtZthVJfH2CNJM3k\nvFr2t7BCCGUkzSbtGzzWzJ6r0a7v/lAIfdbQbYey4tkV9RuGEEII64j35QArhND7JFn8XAghhBBC\nqK3aAKuRPVghhBBcrEnPE3nKE3nKE3nKE3nKE3nKE3lqXAywQgghhBBCCKGXxBLBEEIXsUQwhBBC\nCKG+WCIYQgghhBBCCGtZDLBCCKEHYk16nshTnshTnshTnshTnshTnshT42KAFUIIIYQQQgi9JPZg\nhRC6iL+DFULzDB06ghUrnm52GCGEEDLE38EKIWRJA6z4uRBCc4j4fzmEEPqGuMlFCCH0qrZmB9BH\ntDU7gD6irdkB9AmxFyRP5ClP5ClP5KlxWQMsSSMkPValbpakUb0bVh5JwyUtlPT/s3fncXJVdd7H\nP98EkG0IEDUBhMBklEdUCAEUBZMGd9kxLAEGhgEdXzqigDgoahIWBQXGCKLymAloMgiMKLuEpTsT\ngjEhZAGyyBIWcQI8QjQ4ipj+PX/cU8nt6lpONR26m3zfr1e/+tY55977u7+qdOrUOefW7aWyFX0R\nSz2SxkqamtFuRan9zfXaSNq2F+OqeZ5SfcO40+uivUmbXn99lI+Z83xLGifpIUlrcmNpdlxJq/Oi\n7bLPtyU9KOkiSW+UNEfSfEn75zy3mdf6LUlLJS2U9DNJW5Xq/lvSXElvbjV2MzMzM8vTyghWf5yz\ncDgwIyI+Virrj3HmxBR1tls9TiuaHa/VuPtCzvkfBI4AZvbicXty3Z8Edo+IfwM+CCyOiL0i4t7M\n4+W0mQG8IyJGAY8AX167c8QYYD5wUMuRWw1tfR3AANHW1wEMEG19HcCA0NbW1tchDAjOUx7nKY/z\n1LpWOlgbS5omaYmk6yRtWt1A0nhJi9PPhalskKSpqWyRpM+n8pGS7kyftN8vaZcexL818FxV2fOl\neE5M51wg6epUNlXSZEmzJT0q6chUvoWku1IsiyQdmspHpBGBqZKWS5ou6UNp/+WS9k7tNpc0pTQq\ncUgK46/AHzKu5fnS9hBJt0haJumKUvnaOZ6SzkijIYtLOd087bcglR+VyvdJ8S5M8W1RPrGkW9NI\n4AJJqyT9Y2bca4AX0jEGlUZoFkr6bHXjlLf7Uo6vTfF+RNJ1pTZrR9Ykfbi6fZO81RQRyyPikXL+\nMjyfYhguaWbKz2JJ+60LVeena71P0ptS4dTKayo9Xp1+3whsCcyX9CXgIuDwdNxN6frcHi/p16nu\n+5IqdTnXeldEdKaHc4C3VDVZSfHvxszMzMzWh4ho+gOMADqBfdPjKcAZabsdGA1sBzwJbEvRcbsb\nODTVzSgda6v0ew5waNreBNg0J5aquCYBX6hTtxuwDNgmPd46/Z4KXJu23w48krYHA1um7aGl8hEU\nnY3d0uP7gSlp+1DghrR9AXBc2h4CLAc2q4ppL+DKJtc0FvjfdF5RjEgcmepWpPyOBhYBmwJbAA8B\newBHAj8sHevvgI2Bx4DRqWzL9PyMBW6qOvdoYCHwdz14Lj4NXMe6G6dU8l15fQylGEHaLJV/Cfhq\nyvsTpfIrgPH12pePWSOGW4HhDWKsuV+T6zoD+HLaFrBF2u4EPp62LwK+Unp9HVna/491tk8Cvlt6\nXHlu/w9wEzA4lX8POKHVa01tbqq8JktlXwO+2GS/gAmln/aA8E+3H+fFeVofeSKsvvb29r4OYUBw\nnvI4T3mcp3Xa29tjwoQJa3/S32yqfzYi31MRMSdtTwM+BwsvocoAACAASURBVFxaqt8HaI+IyojG\ndGAMcD6wi6TJwG3ADElbAttHxE0Ukf21hThIxxdFp2JanSYHAtdHxIvpHKtKdb9IZUtL61EEfFPS\nGIo3z9uX6lZExJK0/TBwV9p+ENg5bX8YOETSWenxJsBOFB0t0vnmA5/KuLy5EfFkus5rgP2BG0r1\n+wM/j4i/pDY3AO8H7gAulvRN4NaIuFfSO4HfRcQDKYaX0j5dTijpjcBPgHER0fL6Ioopb9+PiEjn\nWVVVvy9Fp3d2eu42Bu6LiDWSfkmRu59RTF87i2K+TLf2jQKIiPUx9W0eMEXSxsCNEbEolb8cEbel\n7fkU119L7qhZpN8foOiQzkvXvSnwbLfGTa5V0jnAKxHxn1VVz5A1F2li8yZmZmZmG5C2trYuUyYn\nTZpUs10rHaxo8hhqvJmMiFWS9gA+QjHKcRTwhVptuxxI+gzFmpWgGClYWaobBDwOvEzxSX6rXq4R\n8/HAG4E9I6JTxQ0FNq3RvrP0uJN1ORTwiSimor1aObnuvlPEIypu4vBx4DxJd1N0JpvlehBwDTAx\nIpb2IN4cohjJPL5G3bXAvwIvAvMi4k+pc1Gv/WsmImalTvdBwFWSLomIacArpWZrWPc6+Btp6m2p\nY9gKAVdHxDk9jVnSP1G8Bg6sUX0D8HVJSyJit56ew8BrZnK19XUAA0RbXwcwIHgtSB7nKY/zlMd5\nal0ra7BGSHpP2j4OmFVVPxcYI2lbSYMppnnNlDSUYrrTzymmhI1OoyhPSzoMQNImkjYrHywiroiI\nPSNidLlzleo6I2Jniul6x9SJ9x7gKKU7s0napk67SudjCPBc6lwdQDFFr7pNI3cAp63dQRqVsU89\n71Gx9msQxfVV53oWxfqdTdN6qiOAWZK2A/6cRi0uphgJWQ4Ml7RXimvL9PyUXQQsiojrawWT1nBd\n3STmO4F/qRy7Rr7nAPtJGpnqN5f01lQ3M8X6SeCnGe1fjfJap+0l3dWwsbQTxetiCvCjFGeX41R5\nAtg7bR9G1w5Wo9dRpe5uYFxpTdc2KYYskj5KMQJ4aES8XKPJicDt7lyZmZmZrR+tdLCWAZ+VtIRi\nkfwPUnllSthK4GyKL/NYQDEScTOwA9AhaQHFFLSz034nAqdJWgTMBob1IP7fUKxb6SZN6buAopO3\nALikHG+5afo9HdgnxXMCsLRGm1r7V5xHcSOQxSpuaX9udQNJe0m6ssH1VMwFLqeYjvhYRPyifO6I\nWABcRTF97VcU67oWAe8C5qbr/TpwfkS8QtFJu1zSQoo1XW+oOt+ZwIdV3OTiAUkHV9XvRLEurJEf\nAU8Di9P5x1fF/P+AfwKuSTm+D9g11XUCtwAfTb8btqfOc6DiZh3Da5QfLulpimmKt2jdbf23o+tI\nVC1twCJJDwBHA99pFAPwf4GxKQf7An8q1TUaiazkaSnFBxEz0nXPAGpdU81rBS6jWGd3Z3our6iq\n34bi7oL2qnX0dQADREdfBzBAdPR1AAOCv48nj/OUx3nK4zy1rnJDggEprXcaGhFnN21sPSbpIuAn\nEfFQX8fSm1Tc6fDJiLilr2N5rUj6HsXt4X/YoE1kzkrdwHXgaV05OnCecnRQ5EkM5P+X17eOjg5P\nV8rgPOVxnvI4T/VJIiK6zVAa6B2skRQjOS9F1+/CMrMqkmZSrBs8ISKeadDOHSyzPuMOlpnZQPG6\n7GCZWe8rOlhm1heGDRvBypVP9HUYZmaWoV4Hq5U1WGa2gaj1nQ7+6frT3t7e5zEMhB/nqbU8uXPV\nmNeC5HGe8jhPeZyn1rmDZWZmZmZm1ks8RdDMupAU/rtgZmZm1pinCJqZmZmZma1n7mCZmfWA56Tn\ncZ7yOE95nKc8zlMe5ymP89Q6d7DMzMzMzMx6iddgmVkXXoNlZmZm1ly9NVgb9UUwZta/Sd3+Vmyw\nhu0wjJW/XdnXYZiZmdkA4REsM+tCUjCxr6PoRyYW3wtWraOjg7a2ttc8nIHGecrjPOVxnvI4T3mc\npzzOU32+i6CZmZmZmdl6ltXBkjRC0oN16tolje7dsPJI2knSA5JuL5Wt6ItY6pE0VtLUjHYrSu1v\nrtdG0ra9GFfN85TqG8adXhftTdr0+uujfMyc51vSOEkPSVqTG0uz40panRdtl32+LelBSRdJeqOk\nOZLmS9o/57nNvNZtJM2QtFzSHZKGlOr+W9JcSW9uNXbrzp/m5XGe8jhPeZynPM5THucpj/PUulZG\nsPrjXMLDgRkR8bFSWX+MMyemqLPd6nFa0ex4rcbdF3LO/yBwBDCzF4/bk+v+JLB7RPwb8EFgcUTs\nFRH3Zh4vp83ZwF0RsStwD/DltTtHjAHmAwe1HLmZmZmZZWmlg7WxpGmSlki6TtKm1Q0kjZe0OP1c\nmMoGSZqayhZJ+nwqHynpTkkLJd0vaZcexL818FxV2fOleE5M51wg6epUNlXSZEmzJT0q6chUvoWk\nu1IsiyQdmspHSFqa9lsuabqkD6X9l0vaO7XbXNKU0qjEISmMvwJ/yLiW50vbQyTdImmZpCtK5Wvn\neEo6I42GLC7ldPO034JUflQq3yfFuzDFt0X5xJJuTSOBCyStkvSPmXGvAV5IxxhUGqFZKOmz1Y1T\n3u5LOb42xfsRSdeV2qwdWZP04er2TfJWU0Qsj4hHyvnL8HyKYbikmSk/iyXtty5UnZ+u9T5Jb0qF\nUyuvqfR4dfp9I7AlMF/Sl4CLgMPTcTel63N7vKRfp7rvS2vvONH0WoHDgKvT9tUUH0KUraT4d2Ov\nkr8XJI/zlMd5yuM85XGe8jhPeZyn1rVyF8FdgZMjYo6kKcBngEsrlZK2Ay4E9gRWAXemTspvgR0i\nYvfUbqu0y3TgGxFxk6RN6Nl6sMFAZ7kgIt6TzrMb8BXgvRHxoqTym8rhEbGfpLcDNwE3AH8BDo+I\nlyQNBeakOoCRwCciYomk+4Fj0/6HpnMcCZwD3B0Rp6RpWXMl3RURvwJ+lWLaC/iXiPhU9YVU4k72\nAd4OPAXcIenIiLihUqlimttJqd1g4NeSOlKcz0TEwand30naGPgpcFREPCBpS+DPVec+qHTc/wB+\nERGrK3HXExG/Bcalh58CRlCM0ERVvkk5/SrwgYj4c+pknAF8E/ihpM0i4s/AMcB/pvbn1Gh/fr28\nSboVOCUiXvUt30rHPQ74ZUR8M3V0Kp28LYD7IuKrki6iGJ36Rq1DpeMdJumPEVGZ2vgssFdEnJYe\nV67h/6QcvC8i1kj6HnA8MC3zWt8cEc+mc65U9+mAnRSvmcbKEz93Bnry8YeZmZnZ60hHR0dWh7OV\nDtZTETEnbU8DPkepg0XxZr89IiojGtOBMRRviHeRNBm4DZiR3uRvHxE3AUTEX1uIg3R8AXukWGo5\nELg+Il5M51hVqvtFKltaegMq4JuSxlC8Cd2+VLciIpak7YeBu9L2gxRvPwE+DBwi6az0eBNgJ2B5\n5aQRMZ+iI9LM3Ih4Ml3nNcD+FJ3Aiv2Bn0fEX1KbG4D3A3cAF0v6JnBrRNwr6Z3A7yLigRTDS2mf\nLieU9EbgJ8C41Llq1QeB71e+QKkq3wD7ArsBs9NztzFFB2WNpF9S5O5nFNPXzgLaarVvFEClo9jL\n5gFTUkf1xohYlMpfjojb0vZ8iuuvJXfUrDL97wPAaGBeuu5NgWe7Nc6/1upphc9Q5LaxAzKPvgHz\nnPQ8zlMe5ymP85THecrjPOVxntZpa2vrko9JkybVbNdKB6v6jVqt9SDd3kxGxCpJewAfAT4NHAV8\noVbbLgeSPkMxKhDAx8uf1EsaBDwOvAzc2sI1VLxcI+bjgTcCe0ZEp4obCmxao31n6XEn63IoilGu\nR3oQT7WcXHffKeKRNAr1ceA8SXdTdCab5XoQcA0wMSKW9iDeHKJYL3d8jbprgX8FXgTmRcSfUuei\nXvvXTETMSp3ug4CrJF0SEdOAV0rN1rDudfA30mhsqWPYCgFXR8Q5PQz5WUnDIuJZScPpPoX2BuDr\nkpZExG49PIeZmZmZ1dHKtLwRksrTpmZV1c8FxkjaVtJgYDwwM031GhwRP6eYIjY6jaI8LekwAEmb\nSNqsfLCIuCIi9oyI0dXToCKiMyJ2Bu6nmE5Vyz3AUUp3ZpO0TZ12lc7HEOC51Lk6gGK6W3WbRu4A\nTlu7gzQqY5963qNi7dcgiuurzvUsivU7m6pYT3UEMCtN0/xzRPwncDHFSMhyYHianoikLdPzU3YR\nsCgirq8VjIo1XFfXqiu5E/iXyrFr5HsOsJ+kkal+c0lvTXUzU6yfpJjO2Kz9q1Fe67S9pLsaNpZ2\nonhdTAF+lOLscpwqTwB7p+3D6NrBavQ6qtTdDYwrrenaJsWQ6ybgn9L2ScCNVfUnAre7c/XqeU56\nHucpj/OUx3nK4zzlcZ7yOE+ta6WDtQz4rKQlFIvkf5DKK1PCVlLcwawDWEAxEnEzsAPQIWkBxRS0\ns9N+JwKnSVoEzAaG9SD+3wA1b22dpvRdQNHJWwBcUo633DT9ng7sk+I5AVhao02t/SvOo7gRyGIV\nt7Q/t7qBpL0kXdngeirmApdTTEd8LCJ+UT53RCwArqKYvvYr4Mo0de1dFGu/FgBfB86PiFcoOmmX\nS1oIzADeUHW+M4EPq7jJxQOSDq6q3wn43yYx/wh4Gliczj++Kub/R/HG/5qU4/so1vUREZ3ALcBH\n0++G7anzHKi4WcfwGuWHS3qaYpriLVp3W//t6DoSVUsbsEjSA8DRwHcaxQD8X2BsysG+wJ9KdY1G\nIit5WkrxQcSMdN0zgFrXVPNaKTrLH5K0nGK64YVV9dsAvTHKamZmZmY1KC2ZGZDSeqehEXF208bW\nY+kmDj+JiIf6OpbepOJOh09GxC19HctrJd00Y3FE/LBBm2DiaxdTvzcRBvLfSTMzM1s/JBER3WYo\nDfQO1kiKkZyXqr4Ly8yqSJpJsW7whIh4pkG7gftHYT0YtsMwVv72Vd+Y0szMzF5n6nWwenJr9H4j\nIh6LiPe7c2XWXESMjYgDGnWuSm39k37qda48Jz2P85THecrjPOVxnvI4T3mcp9YN6A6WmZmZmZlZ\nfzKgpwiaWe+TFP67YGZmZtbY63KKoJmZmZmZWX/iDpaZWQ94Tnoe5ymP85THecrjPOVxnvI4T61z\nB8vMzMzMzKyXeA2WmXXhNVhmZmZmzdVbg7VRXwRjZv2b1O1vhfUzw4aNYOXKJ/o6DDMzM6viKYJm\nVkP4p+lPe5+e/9lnn6z/9PUjnrufx3nK4zzlcZ7yOE95nKfWNexgSRoh6cE6de2SRq+fsBqTtJOk\nByTdXipb0Rex1CNprKSpGe36VdxlObE1ayNpgqQzei+qrseUNFXSmCbtt5Z0g6RFkuZI2i3jHO2S\ndmpS39LrX9I4SUsk3Z0eXyNpoaTPp+s4ssn+Odd6XLrORZLulbR7qe4SSQ9LGttK3GZmZmaWL2cE\nqz8uxjgcmBERHyuV9cc4c2Lqj3FXDPT4K74CLIiIPYCTgO/2URynAKdGxAckDQf2johRETG5F8/x\nODAmXev5wJWViog4EzgX+OdePN8GrK2vAxgQ2tra+jqEAcF5yuM85XGe8jhPeZyn1uV0sDaWNC19\n8n6dpE2rG0gaL2lx+rkwlQ1Kn7gvTp+mfz6Vj5R0Z/rk/n5Ju/Qg7q2B56rKni/Fc2I65wJJV6ey\nqZImS5ot6dHKaIGkLSTdlWJZJOnQVD5C0tK033JJ0yV9KO2/XNLeqd3mkqakkZH5kg5JYfwV+EPG\ntTyfjjNc0sw0MrdY0n6pfLWk81O+7pP0plR+cOmcM0rlEyT9OLVdLunUVD42Hf8WScskXaHCyZL+\nvZS7UyVdUp3TZvHXy3uZpL+XdLukeSmWt0naStITpTabS3pK0uBa7WucfxVFrhvZDbgHICKWAztX\n8tXA74E19V7HydGSfp3yWXm+TpJ0Wel6bpY0RtLXgP2BKZK+BdwB7JCe7/2r8jRaUke67tslDcu9\n1oiYExGV190cYIeqJisp/v2YmZmZ2foQEXV/gBFAJ7BvejwFOCNttwOjge2AJ4FtKTpsdwOHproZ\npWNtlX7PAQ5N25sAmzaKoU5ck4Av1KnbDVgGbJMeb51+TwWuTdtvBx5J24OBLdP20FL5CIo3s7ul\nx/cDU9L2ocANafsC4Li0PQRYDmxWFdNewJVNrukM4MtpW8AWabsT+Hjavgj4SuVcpX1PAb6dticA\nC1JuhwJPAcOBscD/pusSMAM4EtgCeBQYnPafDbyjB89JvbxPKL1m7gJGpu13A3en7Z8DY9P20ZVc\nNWi/9pg1XhcH1yi/ALikdJy/AntmXle913F7KecfA+5M2ycB3y21v5liRKmyz56l19fiUrup6fnY\nKD0HQ0v5mJJ7rVVtvlj9ugPeD9zSZL+A8E/Tn/Y+Pj8xELS3t/d1CAOC85THecrjPOVxnvI4T/Wl\n/4u7vZfKuYvgUxExJ21PAz4HXFqq3wdoj4gXACRNB8ZQTE/aRdJk4DZghqQtge0j4iaKiJqNPHQj\nScAeKZZaDgSuj4gX0zlWlep+kcqWSnpz5ZDAN1WsbekEti/VrYiIJWn7YYo3/AAPAjun7Q8Dh0g6\nKz3eBNiJoqNFOt984FNNLm0exejGxsCNEbEolb8cEbel7fnAB9P2jpKuo+jgbgysKB3rxpTb30u6\nh6JT8QdgbkQ8CcX6H2D/iLhBxZqggyUtAzaKiIebxFpLo7wjaQvgfcD16TkkxQ1wHXAMMBM4Fvhe\nk/Y1RcSEOlUXApMlPUDx3C0A1mRe1+NUvY5LdTek3/MpOkw5mt2eb1fgncCd6boHAb+rbtTgWouT\nSAcAJ1OMmpU9A7xN0hsi4uX6R5hY2m7D0+HMzMxsQ9fR0ZF104+cDlY0eQw13jRGxCpJewAfAT4N\nHAV8oVbbLgeSPgN8Mp3n4xGxslQ3iOIN78vArRmxVyu/oazEcTzwRoqRhU4VN23YtEb7ztLjTtbl\nTsAnIuKRHsSzVkTMSp28g4CrJF0SEdOAV0rN1pTOexlwcUTcquKmBeU33OXnSNR+zsrtplCsU1pG\nMZKyPgwCXoyIWjeGuAm4QNI2FCNG9wBbNmjfkohYTWndUXqOH8/ct9br+NRUXXk9lJ+Xv9F16m23\nKbVNCHgoIvZrcb91ByhubHEl8NFKh7ciIh6XtBR4UtIH6nemJ/b09BuQtr4OYEDw3P08zlMe5ymP\n85THecrjPK3T1tbWJR+TJk2q2S5nDdYISe9J28cBs6rq5wJjJG0raTAwHpgpaSjFtLOfA18FRkfE\nS8DTkg4DkLSJpM3KB4uIKyJiz4gYXe5cpbrOiNiZYrreMXXivQc4StK26Rzb1GlX6WANAZ5LnasD\n6DoSkfNlQHcAp63dQRqVsU/3YIo71j0XEVOAH1F0NBrFsBXrRjZOqqo7LOV2KMXUwHmpfB8Va8sG\nUeTvXoCImAvsSPHcXVMnvqVNLqFh3lMnZ4WkcaVj7p7q/kTxnE6mmL4Wjdq3StKQNDKIpE8CM9Nr\nERXr77ZrsG+313G9pun3E8CotL5tR4rRw7qHr1G2HHiTpH3T+TdSxl0PS/HuBPwM+MeIeKxG/e7A\nLhQjyT0ZqTQzMzOzBnI6WMuAz0paQrE4/gepvFisUXSCzgY6KKZezYuImykW13dIWgD8JLUBOBE4\nTdIiirUmlQX8rfgNxZqvbtKUvgsoOnkLgMoNG+qNxE2n6HgsAk4AltZoU2v/ivMobgSyWMUt7c+t\nbiBpL0lXdt+1izZgUZrGdjTwnSbnnQT8l6R5dL8ZxWKK5+M+4NxSR/V+4HKK6Y6PpU5DxXXA7Fh3\ng4Ry/EObxN4o72UnAKeouGHHQxRr2SqupRhN/Gmp7PgG7buRNEnSwTWq3g48lDqJHwEqN1wRMBJ4\nocFh672Oa76eImI2RSfrYYrncH51mzqPK/u/AowDLpK0kOLf1HtbuNavUfzbuELFzUbmVtVvAzwR\nEZ019rWWdPR1AAOCvz8lj/OUx3nK4zzlcZ7yOE+tU7E+a2BJ652GRsTZTRtvYCRNAFZHxKVV5WOB\nMyOiZidF0s3ApRHRXqPuIGCXiLh8fcTcVyS9Azg5Ir7Y17G8ViQdDRwREeMbtIn6/Xpbp4O+nSYo\nBsLf746ODk8vyeA85XGe8jhPeZynPM5TfZKIiG4zkgZqB2skcBXwUnT9LqwNXqsdLElDKKZ5LoiI\nY1+7SO21puL2+++nuFvl3Q3auYM1IAyMDpaZmdnr1euqg2Vm60/RwbL+btiwEaxc+URfh2FmZrbB\nqtfBylmDZWYbmFrf6eCfrj/t7e19ev6B0rny3P08zlMe5ymP85THecrjPLXOHSwzMzMzM7Ne4imC\nZtaFpPDfBTMzM7PGPEXQzMzMzMxsPXMHy8ysBzwnPY/zlMd5yuM85XGe8jhPeZyn1rmDZWZmZmZm\n1ku8BsvMuvAaLDMzM7Pm6q3B2qgvgjGz/k3q9rfC+sCwHYax8rcr+zoMMzMza4FHsMysC0nBxL6O\nYgBYAeyyns8xsfhOsoGso6ODtra2vg6j33Oe8jhPeZynPM5THuepvh7dRVDSCEkP1qlrlzS6twJs\nhaSdJD0g6fZS2Yq+iKUeSWMlTc1o16/iLsuJrVkbSRMkndF7UXU9pqSpksY0ab+1pBskLZI0R9Ju\nGedol7RTk/qWXv+SxklaIunu9PgaSQslfT5dx5FN9m96randdyU9ko49qlR+iaSHJY1tJW4zMzMz\ny5dzk4v++PHp4cCMiPhYqaw/xpkTU3+Mu2Kgx1/xFWBBROwBnAR8t4/iOAU4NSI+IGk4sHdEjIqI\nyb11AkkfA0ZGxFuBfwF+UKmLiDOBc4F/7q3zbdDW9+jV64Q/9czjPOVxnvI4T3mcpzzOU+tyOlgb\nS5qWPnm/TtKm1Q0kjZe0OP1cmMoGpU/cF6eRg8+n8pGS7kyfrt8vqSdvU7YGnqsqe74Uz4npnAsk\nXZ3KpkqaLGm2pEcrowWStpB0V4plkaRDU/kISUvTfsslTZf0obT/ckl7p3abS5qSRkbmSzokhfFX\n4A8Z1/J8Os5wSTPTyNxiSful8tWSzk/5uk/Sm1L5waVzziiVT5D049R2uaRTU/nYdPxbJC2TdIUK\nJ0v691LuTpV0SXVOm8VfL+9lkv5e0u2S5qVY3iZpK0lPlNpsLukpSYNrta9x/lUUuW5kN+AegIhY\nDuxcyVcDvwfW1HsdJ0dL+nXKZ+X5OknSZaXruVnSGElfA/YHpkj6FnAHsEN6vvevytNoSR3pum+X\nNKyFaz0M+HG61l8DQ0r7A6yk+PdjZmZmZutBTgdrV+DyiNgNWA18plwpaTvgQqANGAXskzopo4Ad\nImL3NHJQmS43HbgsIkYB7wP+pwdxDwY6ywUR8Z4Uz24UIxZtEbEnUH5DPDwi9gMOAS5KZX8BDo+I\nvYEDgUtK7UcC346IXVMejk37n5XOAXAOcHdE7Jv2v1jSZhHxq4g4PcW0l6Qra11IJW7gOOCXETEa\n2ANYmMq3AO5L+ZoFfDKVz4qIfSNiL+Ba4Eulw76L4vl4H/B1FaMlAPsAnwXeDvwDcARwHXCIpMGp\nzcnAf1TFVldm3iuuBP41IvahyOH3I+KPwAKtm7Z2cMrDmlrta5z/9IiYk2KYJOngGuddBFQ61O8G\ndgLe0uS6xkXEM9R/HQMMTtd/OnRZtdRtVC8izgPuB46LiC8BhwKPRsToiLi30k7SRsBlwCfSdU8F\nvtHCte4APF16/Ewqq+ik+Pdjr1a/ndzbv/j7U/I4T3mcpzzOUx7nKY/z1Lqcuwg+VXlTB0wDPgdc\nWqrfB2iPiBcAJE0HxgDnA7tImgzcBsyQtCWwfUTcBBARzT6N70aSKDog0+o0ORC4PiJeTOdYVar7\nRSpbKunNlUMC31SxtqUT2L5UtyIilqTth4G70vaDwM5p+8MUHZSz0uNNKN7AL6+cNCLmA59qcmnz\nKEY3NgZujIhFqfzliLgtbc8HPpi2d5R0HbAdsDFd3+7dmHL7e0n3AO+mGE2bGxFPQrH+B9g/Im5Q\nsSboYEnLgI0i4uEmsdbSKO9I2oKiw3d9eg5JcUPRyTsGmAkcC3yvSfuaImJCnaoLgcmSHqB47hYA\nazKv63GqXseluhvS7/nAiMzjNbs9367AO4E703UPAn5X3ajBtTbzDPA2SW+IiJfrtmovbe+Mp8OZ\nmZnZBq+joyOrw5nTwar+NL7WmptubxojYpWkPYCPAJ8GjgK+UKttlwNJn6EYpQng4xGxslQ3iOIN\n78vArRmxVyu/oazEcTzwRmDPiOhUcdOGTWu07yw97mRd7kQx2vBID+JZKyJmpU7eQcBVki6JiGnA\nK6Vma0rnvQy4OCJuTaM/5Tfc5edI1F8nVSmfQjH6tIyuIzS9aRDwYhqhq3YTcIGkbYDRFNP5tmzQ\nviURsZrSuqP0HD+euW+t1/Gpqbryeig/L3+j68hwtym1TQh4KI2U9sQzwI6lx29JZQBExOOSlgJP\nSvpA3c70AT08+4bEnc4snrufx3nK4zzlcZ7yOE95nKd12trauuRj0qRJNdvlTBEcIak8jW1WVf1c\nYIykbdM0s/HATElDKaZQ/Rz4KjA6Il4CnpZ0GICkTSRtVj5YRFwREXumqVMrq+o6I2JniqlWx9SJ\n9x7gKEnbpnNsU6ddpYM1BHguda4OoOtIRM6XAd0BnLZ2h9Jd21qh4o51z0XEFOBHFB2NRjFsxbqR\njZOq6g5LuR0KjKUYHYNi+uaI1FE9BrgXICLmUrwpHw9cUye+pU0uoWHeUydnhaRxpWPunur+RPGc\nTgZuiULd9q2SNCSNDCLpk8DM9FpExfq77Rrs2+11XK9p+v0EMEqFHSlGD+sevkbZcuBNkvZN599I\nGXc9LLkJODHtuy+wKiKeLV3P7hRdg+17OFJpZmZmZg3kdLCWAZ+VtIRicXzlrmQBkDpBZwMdFFOv\n5kXEzRTrPjokLQB+ktpA8ebvNEmLgNlAeQF+rt8A9uLOlAAAIABJREFU29aqSFP6LqDo5C1g3Zqq\neiNx0yk6HouAE4ClNdrU2r/iPIobgSxWcUv7c6sbNFqDVdIGLErT2I4GvtPkvJOA/5I0j+43o1hM\n8XzcB5xb6qjeD1xOMd3xsdRpqLgOmB0R3W7MkToZDTXIe9kJwCkqbtjxEMU6pIprKUYTf1oqO75B\n+24arEt6O/BQ6iR+hLQ+LE3BGwm80OCw9V7HNV9PETGbopP1MMVzOL+6TZ3Hlf1fAcYBF0laSPFv\n6r2515qmk66Q9CjwQ6rWTALbAE9ERGf1vtYir8HK4rn7eZynPM5THucpj/OUx3lq3YD8ouG03mlo\nRJzdtPEGRtIEYHVEXFpVPhY4MyJqdlIk3QxcGhHtNeoOAnaJiMvXR8x9RdI7gJMj4ot9HctrRdLR\nwBERMb5BG3/RcA5/0XAWf0FlHucpj/OUx3nK4zzlcZ7qU50vGh6oHayRwFXAS1XfhbXBa7WDJWkI\nxTTPBRFx7GsXqb3WVNx+//3AlyPi7gbt3MHqLyYO/A6WmZnZ69XrqoNlZuuPJP9R6CeG7TCMlb9d\n2byhmZmZvebqdbBy1mCZ2QYmIvzT5Ke9vX29n+P10Lny3P08zlMe5ymP85THecrjPLXOHSwzMzMz\nM7Ne4imCZtaFpPDfBTMzM7PGPEXQzMzMzMxsPXMHy8ysBzwnPY/zlMd5yuM85XGe8jhPeZyn1rmD\nZWZmZmZm1ku8BsvMuvAaLDMzM7PmvAbLzLJJqvkzfPjOfR2amZmZWb/mDpaZ1RA1f5599sk+jao/\n8Zz0PM5THucpj/OUx3nK4zzlcZ5a17CDJWmEpAfr1LVLGr1+wmpM0k6SHpB0e6lsRV/EUo+ksZKm\nZrTrV3GX5cTWrI2kCZLO6L2ouh5T0lRJYzL2+a6kRyQtlDQqo327pJ2a1Lf0+pc0TtISSXenx9ek\neD6fruPIJvs3vVZJx0lalH7ulbR7qe4SSQ9LGttK3GZmZmaWb6OMNv1xMcbhwIyIOLtU1h/jzImp\nP8ZdMdDjB0DSx4CREfFWSe8BfgDs2wehnAKcGhH3SRoO7B0Rb00xNu2MZ3ocGBMRf5D0UeBK0rVG\nxJmS5gL/DMzspfNtsNra2vo6hAHBecrjPOVxnvI4T3mcpzzOU+typghuLGla+uT9OkmbVjeQNF7S\n4vRzYSoblD5xX5w+Tf98Kh8p6c70yf39knbpQdxbA89VlT1fiufEdM4Fkq5OZVMlTZY0W9KjldEC\nSVtIuivFskjSoal8hKSlab/lkqZL+lDaf7mkvVO7zSVNkTRH0nxJh6Qw/gr8IeNank/HGS5pZhqZ\nWyxpv1S+WtL5KV/3SXpTKj+4dM4ZpfIJkn6c2i6XdGoqH5uOf4ukZZKuUOFkSf9eyt2pki6pzmmz\n+OvlvUzS30u6XdK8FMvbJG0l6YlSm80lPSVpcK32Nc6/iiLXjRwG/BggIn4NDJE0rMk+vwfW1Hsd\nJ0dL+nXKZ+X5OknSZaXruVnSGElfA/YHpkj6FnAHsEN6vvevytNoSR3pum8vxdr0WiNiTkRUXndz\ngB2qmqyk+PdjZmZmZutBTgdrV+DyiNgNWA18plwpaTvgQqANGAXskzopo4AdImL3iNgDqHxCPx24\nLCJGAe8D/qcHcQ8GOssFEfGeFM9uwFeAtojYEyi/IR4eEfsBhwAXpbK/AIdHxN7AgcAlpfYjgW9H\nxK4pD8em/c9K5wA4B7g7IvZN+18sabOI+FVEnJ5i2kvSlbUupBI3cBzwy4gYDewBLEzlWwD3pXzN\nAj6ZymdFxL4RsRdwLfCl0mHfRfF8vA/4uorREoB9gM8Cbwf+ATgCuA44RNLg1OZk4D+qYqsrM+8V\nVwL/GhH7UOTw+xHxR2CB1k1bOzjlYU2t9jXOf3pEzEkxTJJ0cI3z7gA8XXr8DN07HtXHHRcRz1D/\ndQwwOF3/6cDE8u41jncecD9wXER8CTgUeDQiRkfEvZV2kjYCLgM+ka57KvCNFq617FTg9qqyTop/\nP/YqeU56Hucpj/OUx3nK4zzlcZ7yOE+ty5ki+FTlTR0wDfgccGmpfh+gPSJeAJA0HRgDnA/sImky\ncBswQ9KWwPYRcRNARDQbeehGkig6INPqNDkQuD4iXkznWFWq+0UqWyrpzZVDAt9UsbalE9i+VLci\nIpak7YeBu9L2g8DOafvDFB2Us9LjTYCdgOWVk0bEfOBTTS5tHsXoxsbAjRGxKJW/HBG3pe35wAfT\n9o6SrgO2AzYGymuhbky5/b2ke4B3U4ymzY2IJ6FY/wPsHxE3qFgTdLCkZcBGEfFwk1hraZR3JG1B\n0eG7Pj2HpLih6OQdQzFt7Vjge03a1xQRE3oQdzOPU/U6LtXdkH7PB0ZkHq/brTyr7Aq8E7gzXfcg\n4HfVjZpdq6QDKDrL+1dVPQO8TdIbIuLl+keYWNpuSz9mZmZmG66Ojo6sDmdP1mDVWnPT7U1jRKyS\ntAfwEeDTwFHAF2q17XIg6TMUozQBfDwiVpbqBlG84X0ZuDUj9mrlN5SVOI4H3gjsGRGdKm7asGmN\n9p2lx52sy50oRhse6UE8a0XErNTJOwi4StIlETENeKXUbE3pvJcBF0fErWn0p/yGu/wcifrrpCrl\nUyhGn5bRdYSmNw0CXkwjdNVuAi6QtA0wGrgH2LJB+1Y9A+xYevyWVNZUndfxqam68nooPy9/o+vI\ncLcptU0IeCiNlPaIihtbXAl8tNLhrYiIxyUtBZ6U9IH6nemJPT39BsNz0vM4T3mcpzzOUx7nKY/z\nlMd5Wqetra1LPiZNmlSzXc4UwREqbgwAxTS2WVX1c4ExkrZN08zGAzMlDaWYQvVz4KvA6Ih4CXha\n0mEAkjaRtFn5YBFxRUTsmaZOrayq64yInSmmWh1TJ957gKMkbZvOsU2ddpUO1hDgudS5OoCuIxHN\nRhugWEtz2todMu5QVzOY4o51z0XEFOBHFB2NRjFsxbqRjZOq6g5LuR0KjKUYHYNi+uaI1FE9BrgX\nICLmUnRAxgPX1IlvaZNLaJj3iFgNrJA0rnTM3VPdnyie08nALVGo274HbgJOTMfYF1gVEc+mx3el\naa411Xod12uafj8BjFJhR4rRw7qHr1G2HHhTihNJG6Xpl1nS6+hnwD9GxGM16ncHdqEYSe7JSKWZ\nmZmZNZDTwVoGfFbSEorF8T9I5QGQOkFnAx3AAmBeRNxMscalQ9IC4CepDRRvdE+TtAiYDTS72UAt\nvwG2rVWRpvRdQNHJW8C6NVX1RuKmU3Q8FgEnAEtrtKm1f8V5FDcCWazilvbnVjdotAarpA1YJOkB\n4GjgO03OOwn4L0nz6H4zisUUz8d9wLmljur9wOUU0x0fS52GiuuA2aUbJJTjH9ok9kZ5LzsBOEXF\nDTseoliHVHEtxWjiT0tlxzdo3029dUlpiuUKSY8CPyStI0xT8EYCLzQ4bL3Xcc3XU0TMpuhkPUzx\nHM6vblPncWX/V4BxwEWSFlL8m3pv7rUCX6P4t3GFipuNzK2q3wZ4IiI6u+9qrfCc9DzOUx7nKY/z\nlMd5yuM85XGeWqeIfn+X7W7SeqehVbdpN4q7CAKrI+LSqvKxwJkRUbOTIulm4NKIaK9RdxCwS0Rc\nvj5i7iuS3gGcHBFf7OtYXiuSjgaOiIjxDdpE/X69GIh/M9aHjo4OT5vI4DzlcZ7yOE95nKc8zlMe\n56k+SUREtxlJA7WDNRK4CngpIj7Wx+H0K612sCQNoZjmuSAijn3tIrXXmorb778f+HJE3N2gnTtY\nZmZmZk28rjpYZrb+uINlZmZm1ly9DlbOGiwz2+Co5s+wYbl3o3/985z0PM5THucpj/OUx3nK4zzl\ncZ5al3ObdjPbwHiUyszMzKxnPEXQzLqQFP67YGZmZtaYpwiamZmZmZmtZ+5gmZn1gOek53Ge8jhP\neZynPM5THucpj/PUOnewzMzMzMzMeonXYJlZF16DZWZmZtac12CZmZmZmZmtZ+5gmVk3kmr+DH/L\n8L4Ord/wnPQ8zlMe5ymP85THecrjPOVxnlrn78Eys+4m1i5+duKzr2kYZmZmZgNNwxEsSSMkPVin\nrl3S6PUTVmOSdpL0gKTbS2Ur+iKWeiSNlTQ1o12/irssJ7ZmbSRNkHRG70XV9ZiSpkoak7HPdyU9\nImmhpFEZ7dsl7dSkvqXXv6RxkpZIujs9vibF8/l0HUc22f9VXaukSyQ9LGlsK3FbbW1tbX0dwoDg\nPOVxnvI4T3mcpzzOUx7nqXU5UwT742r3w4EZEfGxUll/jDMnpv4Yd8VAjx8ASR8DRkbEW4F/AX7Q\nR6GcApwaER+QNBzYOyJGRcTk3jpBo2uNiDOBc4F/7q3zmZmZmVlXOR2sjSVNS5+8Xydp0+oGksZL\nWpx+Lkxlg9In7oslLZL0+VQ+UtKd6dP1+yXt0oO4twaeqyp7vhTPiemcCyRdncqmSposabakRyuj\nBZK2kHRXimWRpENT+QhJS9N+yyVNl/ShtP9ySXundptLmiJpjqT5kg5JYfwV+EPGtTyfjjNc0sw0\nMrdY0n6pfLWk81O+7pP0plR+cOmcM0rlEyT9OLVdLunUVD42Hf8WScskXaHCyZL+vZS7UyVdUp3T\nZvHXy3uZpL+XdLukeSmWt0naStITpTabS3pK0uBa7WucfxVFrhs5DPgxQET8GhgiaViTfX4PrKn3\nOk6OlvTrlM/K83WSpMtK13OzpDGSvgbsD0yR9C3gDmCH9HzvX5Wn0ZI60nXfXoq1N651JcW/H3uV\nPCc9j/OUx3nK4zzlcZ7yOE95nKfW5XSwdgUuj4jdgNXAZ8qVkrYDLgTagFHAPqmTMgrYISJ2j4g9\ngMp0uenAZRExCngf8D89iHsw0FkuiIj3pHh2A74CtEXEnkD5DfHwiNgPOAS4KJX9BTg8IvYGDgQu\nKbUfCXw7InZNeTg27X9WOgfAOcDdEbFv2v9iSZtFxK8i4vQU016Srqx1IZW4geOAX0bEaGAPYGEq\n3wK4L+VrFvDJVD4rIvaNiL2Aa4EvlQ77Lorn433A11WMlgDsA3wWeDvwD8ARwHXAIZIGpzYnA/9R\nFVtdmXmvuBL414jYhyKH34+IPwILtG7a2sEpD2tqta9x/tMjYk6KYZKkg2ucdwfg6dLjZ1JZo+sa\nFxHPUP91DDA4Xf/pdF211G1ULyLOA+4HjouILwGHAo9GxOiIuLfSTtJGwGXAJ9J1TwW+0YvX2knx\n78fMzMzM1oOcm1w8VXlTB0wDPgdcWqrfB2iPiBcAJE0HxgDnA7tImgzcBsyQtCWwfUTcBBARzT6N\n70aSKDog0+o0ORC4PiJeTOdYVar7RSpbKunNlUMC31SxtqUT2L5UtyIilqTth4G70vaDwM5p+8MU\nHZSz0uNNgJ2A5ZWTRsR84FNNLm0exejGxsCNEbEolb8cEbel7fnAB9P2jpKuA7YDNgbKa6FuTLn9\nvaR7gHdTjKbNjYgnoVj/A+wfETeoWBN0sKRlwEYR8XCTWGtplHckbUHR4bs+PYekuKHo5B0DzASO\nBb7XpH1NETGhB3E38zhVr+NS3Q3p93xgRObxun1XQpVdgXcCd6brHgT8rrrRq7jWZ4C3SXpDRLxc\nt1V7aXtnoCfjzK9znpOex3nK4zzlcZ7yOE95nKc8ztM6HR0dWSN6OR2s6k/ja6256famMSJWSdoD\n+AjwaeAo4Au12nY5kPQZilGaAD4eEStLdYMo3vC+DNyaEXu18hvKShzHA28E9oyIThU3bdi0RvvO\n0uNO1uVOFKMNj/QgnrUiYlbq5B0EXCXpkoiYBrxSaramdN7LgIsj4tY0+lN+w11+jkT9dVKV8ikU\no0/L6DpC05sGAS+mEbpqNwEXSNoGGA3cA2zZoH2rngF2LD1+Syprqs7r+NRUXXk9lJ+Xv9F1ZLjb\nlNomBDyURkp7ouG1RsTjkpYCT0r6QN3O9AE9PLuZmZnZ61RbW1uXDuekSZNqtsuZIjhCUnka26yq\n+rnAGEnbpmlm44GZkoZSTKH6OfBVYHREvAQ8LekwAEmbSNqsfLCIuCIi9kxTp1ZW1XVGxM4UU62O\nqRPvPcBRkrZN59imTrtKB2sI8FzqXB1A15GIZqMNUKylOW3tDhl3qKsZTHHHuuciYgrwI4qORqMY\ntmLdyMZJVXWHpdwOBcZSjI5BMX1zROqoHgPcCxARcynelI8HrqkT39Iml9Aw7xGxGlghaVzpmLun\nuj9RPKeTgVuiULd9D9wEnJiOsS+wKiKeTY/vStNca6r1Oq7XNP1+Ahilwo4Uo4d1D1+jbDnwphQn\nkjZK0y9z1b3WVLY7xXjU9j0cqbTEc9LzOE95nKc8zlMe5ymP85THeWpdTgdrGfBZSUsoFsdX7koW\nAKkTdDbQASwA5kXEzRTrPjokLQB+ktpA8ebvNEmLgNlAs5sN1PIbYNtaFWlK3wUUnbwFrFtTVW8k\nbjpFx2MRcAKwtEabWvtXnEdxI5DFKm5pf251g0ZrsEragEWSHgCOBr7T5LyTgP+SNI/uN6NYTPF8\n3AecW+qo3g9cTjHd8bHUaai4DpgdEd1uzJE6GQ01yHvZCcApKm7Y8RDFOqSKaylGE39aKju+Qftu\n6q1LSlMsV0h6FPghaR1hmoI3EnihwWHrvY5rvp4iYjZFJ+thiudwfnWbOo8r+78CjAMukrSQ4t/U\ne1/ttZZsAzwREZ3V+5qZmZnZq6eIfn+X7W7SeqehEXF208YbGEkTgNURcWlV+VjgzIio2UmRdDNw\naUS016g7CNglIi5fHzH3FUnvAE6OiC/2dSyvFUlHA0dExPgGbaLeFw0zEQbi3wwzMzOz3iaJiOg2\nIylnBKs/ugHYT6UvGraekTRE0nLgT7U6VwARcevrrXMFEBEPb2Cdq0uAL1JMQTUzMzOz9WBAjmCZ\n2fojqe4fhWE7DGPlb1fWq96gdHR0+M5KGZynPM5THucpj/OUx3nK4zzVV28EK+cugma2gfEHL2Zm\nZmY94xEsM+tCUvjvgpmZmVljr7c1WGZmZmZmZv2OO1hmZj3g7wXJ4zzlcZ7yOE95nKc8zlMe56l1\n7mCZmZmZmZn1Eq/BMrMuvAbLzMzMrDmvwTIzMzMzM1vPfJt2M+tG6vZhjJmZbQA2pO879Pc75XGe\nWucOlpl1N7GvAxgAVgC79HUQA4DzlMd5yuM85XkVeXp24rO9GorZhshTBK3PSVrd1zFUSBot6UFJ\nU0plK/oolj0kfaz0+CRJEzL2u13Si5JuqiofL2mZpNPXR7wbHL/Jy+M85XGe8jhPeZynLB6VyeM8\ntc4dLOsP+tMdFU4AvhcRp5TKWopPUm/9uxoFfLyqLCeWb1FcR9cdI64BxgLuYJmZmZmtJ+5gWb8h\naZKkBZIekPRbSVMkjZC0VNJUScslTZf0IUmz0+O90777SLpP0nxJ90p6aw/D2Bp4rqrs+XSOsZJm\nSroljQRdUYp9taSLJS0A9k0jYR2S5qURpWGp3WmSHpa0UNJ/prLN07XOSfEfImlj4Fzg6JSPo4D/\nBV5qdgER0V6vXUQ8CwxpOSvWXZ+Maw5AzlMe5ymP85THecri73fK4zy1zmuwrN+IiAnABElDgP8G\nLktVI4FPRMQSSfcDx0bEfpIOBc4BjgCWAvtHRKekDwDfBMb1IIzBQGdVXO8pPdwHeDvwFHCHpCMj\n4gZgC+BXEfFFSRsBM4FDI+L3ko4GvgGcAvwbsHNEvCJpq3TMc4C7I+KUdO1zgbuArwN7RcRp1UFK\nOiTVTezBNfqDFTMzM7P1xB0s64+mAZdExEJJI4AVEbEk1T1M0fkAeBAYkba3Bn6cRq6CHry2U8fo\nHazr2NUyNyKeTO2vAfYHbgDWpN8AuwLvBO5UcTu+QcDvUt0i4D8l/QL4RSr7MHCIpLPS402AnRrF\nGhE3AzfnX10XL0gaGRGP1W3RXtreGc/nr8U5yeM85XGe8jhPeZynLF5blMd5WqejoyNrRM8dLOtX\nJE0EnoqIH5eKXy5td5Yed7LuNXwecE9EHJk6ZeUuQuXY5wMHARERo6vq3kIxcvRoRNzfIMTqNVCV\nx38ufTuvgIciYr8a+x8EjAEOBc6R9K7U/hMR8UhVTPs2iOPVmAwslPS5iLiqZosD1tOZzczMzAao\ntra2Lh3OSZMm1WznqULWHwjWTnv7IPD5WvVNDAGeSdsn12oQEV+NiD2rO1ep7rfADkUYamtwnnen\ndWGDgGOAWTViXA68qdJBkrSRpN1S3U4RMRM4G9iKYmrhHcDaaYCSRqXN1alNT4j6efsK8A91O1eW\nx2sc8jhPeZynPM5THucpi9cW5XGeWucOlvUHlZGf04HtgXnpxg4Tq+qrt8u+BVwoaT49fF2nEahH\ngW0bNLsfuJxiquJjEVGZ5rc2roh4hWL910WSFgILgPemKYjTJC0C5gOTI+KPFKNvG0taLOlBiptb\nQDEKt1vpJhdrpRthTKwVoKT/Bq4FDpT0lKQPVTXZJN3swszMzMx6mdbNajIzSd8DHoyIH9SoGwuc\nGRGHvvaR9Q5JbwYWRcR2DdqEv2jYzGwDNRH83tAsjyQiotuMIY9gmXX1Y+Dk8hcNv15IGg/MoBjt\nMzMzM7P1wCNYZtaFJP9RMDPbQA3bYRgrf7uyr8N4TXR0dPgOeRmcp/rqjWD5LoJm1o0/eGnO/+Hk\ncZ7yOE95nKc8zpNZ3/IIlpl1ISn8d8HMzMysMa/BMjMzMzMzW8/cwTIz6wF/L0ge5ymP85THecrj\nPOVxnvI4T61zB8vMzMzMzKyXeA2WmXXhNVhmZmZmzXkNlpmZmZmZ2Xrm27SbWTdStw9jzMzMzAac\nvvhuN08RNLMuJAUT+zqKAWAFsEtfBzEAOE95nKc8zlMe5ymP85RnoOdp4vr7fk9PETTrZySNkPRg\nC+2/JWmppIWSfiZpqxbPt6uk+yT9RdIZrUdsXQzk/2xeS85THucpj/OUx3nK4zzlcZ5a5g6WWd9q\n5SOVGcA7ImIU8Ajw5RbP9Xvgc8C3W9zPzMzMzDK5g2XWtzaWNE3SEv3/9u49yJK6POP490FQEQUV\nw1Jh5RZBRQV2uYUC4mAUbwHRRAyKAhpjFZRsAhgTk8IlmggYJQQvkYgrohhBUYHyhsAoKyLI7nJH\no1wES1YtAVeNROTNH6cHembOzPYZZvfMst9P1anp/p3uc9556uzsvNO/7k7OTfL4JLslWZ5kWZLr\nkvweoKq+XlUPNvtdCcwf5I2q6udVdQ3wwCx/D+un24ZdwDrCnLoxp27MqRtz6sacujGngdlgScP1\nTOADVbUTsAo4qqquqaoFVbUQ+Ar9jzi9EfjyWqxTkiRJHXgVQWm4flRVVzbLn6Q3he/9AEleAywA\nDmjvkOQfgd9V1TlrrKrLWsvb4vzrfsykG3Pqxpy6MaduzKkbc+rGnB4yOjrK6OjoarezwZKGa+I5\nWAWQ5LnACcB+7bv+JjkCeBnwgn4vluTdwMuBao6Azcz+M95TkiTpUWlkZISRkZGH1k888cS+2zlF\nUBqubZLs1Sy/FliaZDPgHOANVfWLsQ2TvAR4G3BQVd3f78Wq6p9a0wun442uHinnpHdjTt2YUzfm\n1I05dWNO3ZjTwDyCJQ3XLcDRSZYANwAfBg4Btgb+K707/o4djTodeCxwcXMj4Cur6qiub5RkHvBd\n4EnAg0kWATtV1a9m8xuSJElan3mjYUnjeKNhSZL0qLHYGw1LkiRJ0jrLI1iSxkniDwVJkvSoMG+r\nedx9191r5LWnOoLlOViSJvEPL6s3Ojo67kpC6s+cujGnbsypG3Pqxpy6MafBeQRL0jhJyp8LkiRJ\n0/McLEmSJElaw2ywJGkGutzJXebUlTl1Y07dmFM35tSNOQ3OBkuSJEmSZonnYEkax3OwJEmSVs9z\nsCRJkiRpDbPBkjRJEh8+fMzBx5bztxz2j4eh8lyQbsypG3PqxpwG532wJE22eNgFrANuA7YbdhHr\nAHPqpmNOKxevXOOlSJIeGc/BkuaYJLcBu1XVL5Israp9kzwfOL6qDnwEr3sm8GfAyqraeZrtygZL\nmqMWeyNwSZorEs/BktYVD/32VFX79hufoSXAix/ha0iSJGkaNljSkCR5S5LlSZYluTXJJWNPtbZZ\n1dplsyQXJbklyYcGfb+qWgrc8wjL1pjbhl3AOsKcujGnTjwXpBtz6sacujGnwdlgSUNSVR+pqgXA\nnsCdwPv6bdZa3gM4Gng28Iwkr1rzVUqSJGkQXuRCGr7/AC6tqi+tZrurquoOgCSfBvYFzl8jFV3W\nWt4WL1LQj5l0Y07dmFMnIyMjwy5hnWBO3ZhTN+b0sNHR0U5H9GywpCFKcgTw9Ko6qsPmE8/BGree\nZE/gI834CVV10YwL23/Ge0qSJD0qjYyMjGs4TzzxxL7bOUVQGpIkuwHHAYdNt1lrea8k2yTZAHgN\nsLS9YVVdVVULqmrhNM1VJrymZspzZroxp27MqRPPBenGnLoxp27MaXA2WNLwHA08BbisudDFGc14\n+8hUe/kq4APAjcAPq+rzg7xZknOAK4Adk/woyZEzL12SJEn9eB8sSeN4HyxpDlvsfbAkaa7wPliS\nJEmStIbZYEnSTHjOTDfm1I05deK5IN2YUzfm1I05Dc6rCEqabPGwC5DUz7yt5g27BEnSangOlqRx\nkpQ/FyRJkqbnOViSJEmStIbZYEnSDDgnvRtz6sacujGnbsypG3PqxpwGZ4MlSZIkSbPEc7AkjeM5\nWJIkSavnOViSJEmStIbZYEnSDDgnvRtz6sacujGnbsypG3PqxpwGZ4MlaZIkfR9bzt9y2KVJkiTN\naZ6DJWmcJDXljYYXgz8zJEmSPAdL6iTJg0ne21o/LskJQ6plm6aeo1tjpyd5wzDqkSRJ0urZYEnj\n3Q+8KslTh11I46fAoiQbDrsQjeec9G7MqRtz6sacujGnbsypG3ManA2WNN4DwBnAsROfaI4oXZJk\nRZKLk8xvxpckOS3Jt5L8IMmrWvscn+SqZp8OkRXqAAAKwklEQVR3zqCenwGXAEf0qWfXJN9uXvtz\nSTZrxi9LclKS7yS5Jck+zfgGSU5pxlckefMM6pEkSdI0bLCk8Qr4IPC6JE+a8NzpwJKq2hU4p1kf\ns2VV7QMcCJwMkORFwA5VtSewANg9yb4zqOdk4PgkE+f4ngW8rannBqDdwD2mqvYC/hYeOqPqTcC9\nzfiewF8n2WbAetQYGRkZdgnrBHPqxpy6MaduzKkbc+rGnAbntCNpgqr6VZKzgEXA/7ae2ht4ZbN8\nNk0j1fhCs+/NSbZoxg4AXpRkGRBgE2AHYOmA9dye5ErgdWNjSTYFNquqsdc6Czi3tdv5zddrgLEm\n6gDgeUle3axv2tRzx6Q3vay1vC2w3SAVS5IkPfqMjo52mjLpESypv9PoHfHZpDU23eXz7m8tp/X1\nPVW1sKoWVNWOVbWkvVOSg5MsT7IsycJpXv89wNsnjE26ak2fen7Pw39ICfDWppYFVfVHVfX1vnvv\n33rYXPXlnPRuzKkbc+rGnLoxp27MqRtzetjIyAiLFy9+6DEVGyxpvABU1T30jgi9qfXcFcChzfJh\nwOXTvQbwVeCNSTYBSPKHSf6gvWFVfaFpdhZW1bJp6vkecBNwULP+S+AXY+dXAa8HvtGhnqPGLpiR\nZIckG0+xjyRJkmbA+2BJLUl+WVWbNstbALcCJ1fVu5JsDSwBNqd38Ykjq+quJB8DLqqq8/u8xluB\nsYtJrAIOq6rbOtayDXBhVe3crO8MLAPeWFWfSLIL8J/Axk2dR1bVfUkuBY6vqmVJNgeurqrtm3O4\n3k3vPLHQu0LhwVW1asL7eh8sSZKk1ZjqPlg2WJLGscGSJElaPW80LEmzyDnp3ZhTN+bUjTl1Y07d\nmFM35jQ4GyxJky3u/5i31bwhFTT3rFixYtglrBPMqRtz6sacujGnbsypG3ManJdplzSJ0wBX7957\n7x12CesEc+rGnLoxp27MqRtz6sacBucRLEmSJEmaJTZYkjQDt99++7BLWCeYUzfm1I05dWNO3ZhT\nN+Y0OK8iKGmcJP5QkCRJ6sDLtEuSJEnSGuQUQUmSJEmaJTZYkiRJkjRLbLAkSZIkaZbYYEkCIMlL\nktyS5PtJ3j7seuaSJGcmWZnkutbYU5J8Lcn3knw1yWbDrHHYksxPcmmSG5Ncn+SYZtycWpI8Lsl3\nkixvsvrXZtyc+kiyQZJlSS5o1s2pjyS3J7m2+Vxd1YyZ1QRJNktyXpKbm39/e5nTeEl2bD5Hy5qv\n9yU5xpwGY4MliSQbAB8AXgw8Bzg0ybOGW9WcsoReNm1/D3y9qp4JXAr8w1qvam55ADi2qp4D7A0c\n3XyGzKmlqu4H9q+qBcDOwAuS7IM5TWURcFNr3Zz6exAYqaoFVbVnM2ZWk50GfKmqng3sAtyCOY1T\nVd9vPkcLgd2AXwOfx5wGYoMlCWBP4H+q6o6q+h3w38ArhlzTnFFVS4F7Jgy/AjirWT4LOHitFjXH\nVNXdVbWiWf4VcDMwH3OapKp+0yw+jt7/w/dgTpMkmQ+8DPhoa9ic+guTf6czq5YkmwL7VdUSgKp6\noKruw5ym80Lgh1V1J+Y0EBssSQBbAXe21u9qxjS1LapqJfSaC2CLIdczZyTZFtgVuBKYZ07jNdPe\nlgN3A6NVdRPm1M+pwNuA9v1kzKm/Ai5OcnWSv2rGzGq87YCfJ1nSTH87I8kTMKfpvAY4p1k2pwHY\nYEnS7PCmgkCSJwKfBRY1R7Im5rLe51RVDzZTBOcD+yUZwZzGSfJyYGVzVHTSTTxb1uucWvZppnS9\njN703P3wMzXRhsBC4INNVr+mN+3NnPpIshFwEHBeM2ROA7DBkgTwY2Dr1vr8ZkxTW5lkHkCSLYGf\nDrmeoUuyIb3m6uyq+mIzbE5TqKpfAl8CdsecJtoHOCjJrcCn6Z2rdjZwtzlNVlU/ab7+DPgCvWnf\nfqbGuwu4s6q+26x/jl7DZU79vRS4pqp+3qyb0wBssCQBXA08I8k2SR4L/CVwwZBrmmvC+L+kXwAc\n0SwfDnxx4g7roY8BN1XVaa0xc2pJ8rSxq28l2Rh4EbAccxqnqt5RVVtX1fb0fh5dWlWvBy7EnMZJ\n8oTmyDFJNgEOAK7Hz9Q4zfS2O5Ps2Az9KXAj5jSVQ+n9cWOMOQ0gVR7hk9S7TDu9KyxtAJxZVScN\nuaQ5I8k5wAiwObASeCe9vxKfBzwduAM4pKruHVaNw9ZcCe+b9H6xq+bxDuAq4FzMCYAkz6N3gvjY\nRQnOrqp/S/JUzKmvJM8Hjquqg8xpsiTb0bvKW9GbBvepqjrJrCZLsgu9i6ZsBNwKHAk8BnMapzk3\n7Q5g+6pa1Yz5eRqADZYkSZIkzRKnCEqSJEnSLLHBkiRJkqRZYoMlSZIkSbPEBkuSJEmSZokNliRJ\nkiTNEhssSZIkSZolNliSJGmdkuSiJJuuxffbJclLW+sHJvm7tfX+ktYt3gdLkiStFUkeU1W/H3Yd\n/UxXW5LDgd2r6q1ruSxJ6yCPYEmStJ5J8oYk1yZZnuSsZmybJJckWZHk4iTzm/ElST6U5NtJfpBk\nJMnHk9yU5GOt11yV5P1Jbmj237wZvyzJqUmuBo5J8rQkn03yneaxd7Pd85t6liW5JskmSbZM8o1m\n7Lok+zTb3pbkqc3ysUmub55f1PpebkpyRlPPV5I8rk8OS5J8OMmVwMlJ9khyRfP+S5PskGQj4J+B\nQ5o6Xp3k8CSnT5ebpPWXDZYkSeuRJDsB7wBGqmoBsKh56nRgSVXtCpzTrI95clXtDRwLXACcUlU7\nATsn2bnZZhPgqqp6LvBN4J2t/Teqqj2q6lTgNOD9VbUX8BfAmc02xwFHVdVCYD/gt8Brga80Y7sA\nK5ptq/leFgKHA3sAewNvTrJLs80zgNObeu4D/nyKSLaqqj+uquOBm4F9q2q3pv73VNXvgBOAz1TV\nwqo6r13DanKTtB7acNgFSJKkteoFwHlVdQ9AVd3bjO8NvLJZPhs4ubXPhc3X64GfVNVNzfqNwLbA\ndcCDwLnN+CeBz7X2/0xr+YXAs5OkWX9ikicA3wJOTfIp4Pyq+nFz1OvM5ijSF6vq2gnfy77A56vq\ntwBJzqfXnF0I3FZV1zfbXdPU2c95reUnA59IsgO9BqrL70kTczulwz6SHsU8giVJkuDhIzL93N98\nfbC1PLY+VRPSfr1ft5YD7FVVC5rH1lX1m6o6GXgTsDHwrSQ7VtXlwJ8APwY+nuSwAb6fdp2/n6bO\ndm3vAi6tqucBBwKP7/A+E3Pz5HZpPWeDJUnS+uVS4NWtc5ie0oxfARzaLB8GXD7F/plifAN6U/4A\nXgcsnWK7r/HwtETGpvQl2b6qbqyqU4CrgWcl2Rr4aVWdCXwUWDihhsuBg5M8Pskm9I4kXT5hm0Fs\nSq+ZAziyNb6qea6frrlJWk/YYEmStB5ppvf9C/CNJMuB9zVPHQMcmWQFvQZprAma7gjNxKNUeya5\nHhihd2GIfvsvAnZvLrJxA/CWZvxvmotVXAv8H/Dl5nWuTbIMOAT49/ZrVtVy4OP0GrJvA2e0phF2\nOZI0cZv3AicluYbxvyNdBuw0dpGLCftMlZuk9ZSXaZckSY9YklVV9aRh1yFJw+YRLEmSNBv8i60k\n4REsSZIkSZo1HsGSJEmSpFligyVJkiRJs8QGS5IkSZJmiQ2WJEmSJM0SGyxJkiRJmiX/DxZWyxUh\naxJjAAAAAElFTkSuQmCC\n", "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "ratios = compression_ratios() \n", "labels = ['%s - %s' % (c, o)\n", " for c, o in compression_configs]\n", "\n", "fig = plt.figure(figsize=(12, len(compression_configs)*.3))\n", "fig.suptitle('Compression ratio', fontsize=14, y=1.01)\n", "ax = fig.add_subplot(1, 1, 1)\n", "\n", "y = [i for i, (c, o) in enumerate(compression_configs) if c == 'blosc' and o['shuffle'] == 2]\n", "x = [ratios[i] for i in y]\n", "ax.barh(bottom=np.array(y)+.2, width=np.array(x), height=.6, label='bit shuffle', color='b')\n", "\n", "y = [i for i, (c, o) in enumerate(compression_configs) if c != 'blosc' or o['shuffle'] == 0]\n", "x = [ratios[i] for i in y]\n", "ax.barh(bottom=np.array(y)+.2, width=np.array(x), height=.6, label='no shuffle', color='g')\n", "\n", "ax.set_yticks(np.arange(len(labels))+.5)\n", "ax.set_yticklabels(labels, rotation=0)\n", "\n", "ax.set_xlim(0, max(ratios)+3)\n", "ax.set_ylim(0, len(ratios))\n", "ax.set_xlabel('compression ratio')\n", "ax.grid(axis='x')\n", "ax.legend(loc='upper right')\n", "\n", "fig.tight_layout();\n" ] }, { "cell_type": "code", "execution_count": 56, "metadata": { "collapsed": true }, "outputs": [], "source": [ "@functools.lru_cache(maxsize=None)\n", "def compression_decompression_times(repeat=3, number=1):\n", " c = list()\n", " d = list()\n", " for compression, compression_opts in compression_configs:\n", " \n", " def compress():\n", " zarr.array(genotype_sample, chunks=chunks, compression=compression, \n", " compression_opts=compression_opts)\n", " \n", " t = timeit.Timer(stmt=compress, globals=locals())\n", " compress_times = t.repeat(repeat=repeat, number=number)\n", " c.append(compress_times)\n", " \n", " z = zarr.array(genotype_sample, chunks=chunks, compression=compression, \n", " compression_opts=compression_opts)\n", " \n", " def decompress():\n", " z[:]\n", " \n", " t = timeit.Timer(stmt=decompress, globals=locals())\n", " decompress_times = t.repeat(repeat=repeat, number=number)\n", " d.append(decompress_times)\n", " \n", " log(compression, compression_opts, compress_times, decompress_times)\n", " \n", " return c, d\n", " " ] }, { "cell_type": "code", "execution_count": 59, "metadata": { "collapsed": false }, "outputs": [ { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1gAAAMWCAYAAADszSe0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzs3XmcVcWZ//HPlwaCoAhowqYsEjEuAWk3FAIoanSiSAwi\niMsoxjjmF3FLhnGjiRE1I05MoiZmEDUaI86QDOCggtI4GgnKqoCorIriwqKiEZV+fn+cunD69l2b\nPrdbeN6v13n1vafqVNV57hFP3ao6V2aGc84555xzzrmd16i+G+Ccc84555xzuwrvYDnnnHPOOedc\nHfEOlnPOOeecc87VEe9gOeecc84551wd8Q6Wc84555xzztUR72A555xzzjnnXB3xDpZzzrmvFEmz\nJP26vttRXyRdLWlVfbfDOedcZt7Bcs65BkzSNyTdKekNSZ9JelPS45JOre+21aPvA/9W342oZ/4j\nls4510A1ru8GOOecy0xSZ+BvwIfAvwKLib4YOxG4B+hSb43LQlITM/siyTrMbHOS5TvnnHM7w0ew\nnHOu4boHqAKOMLP/NrPXzWy5md0F9EhlkrS/pL9I+ihs/y2pYyx9jKSXJZ0vaZWkLZImSGoi6Sdh\nVOwDSf8erzzkHSPpj5I+lvSOpKvT8lRJuizUuQW4Oew/RNK00J53Jf1JUtvYcYdJminpw1D2Akn9\nQ1pjSb+WtC6M2q2RNC52bLUpgpJaSXpA0kZJn0qaIemQWPoFoY4TQhy2SHomdGCzkvQjScsl/UPS\n+5KmS2oU0iZKmirpOknrQ/n3SfpaWhk/C6OPn0paJGlEWnoHSX8Obd8YYvbNDGW8E2J5P7BnrnY7\n55yrX97Bcs65BkhSa+C7wG/N7B/p6Wb2UcgnYArwdaA/MADoAPwl7ZAuwCDge0RT7IYCjwOHE42I\njQRGSToj7bgrgSVAL+BGYJykwWl5bgxlHQbcJakdMJtoxO1IYCDQAvif2DF/At4O6T2BCuCzkDYK\nOCO08ZvA2cDymlHa7gHgKOD08PdT4Im0zs7XgNHAPwO9gVbA77IVKOkI4LfAGKA7cALwRFq2/kQd\n3ROAM4GTgdtiZdwMXAj8C3AwcAvwu9T0Tkl7ALOAT4DvhHa9DcyU1CzkGQrcBNwAlAOvAVfliIVz\nzrn6Zma++eabb741sI2oo1AFnJEn30nAF8D+sX1dgW3ACeH9GKKb+D1jeR4D3gUax/bNAn4de78K\neDKtvj8Az8beVwG/SsszFpiRtq91yHtkeP8hcF6Wc7oz/fi09O3tBA4M5faJpbcENgMXhfcXhHh8\nM5bnHOAfOer4PrAJaJElfSKwEdgjtm8E8A9gD6A5UUevT9px/wFMC68vApanpZcBHwBDwvvngd+l\n5ZkBrKzva9Q333zzzbfMm49gOedcw6QC830LeNvM3kztMLNVRCMhh8TyrTWzLbH37wKvmdmXafu+\nkVb+CxneH5K2b17a+yOA/mHa3MeSPgbWEj2YoVvIcwcwQdLTkq6VdFDs+PuBXpJek/RbSf8URuoy\n+RZR52lOaodFo3svp7Vzq5m9EXv/NtBUUqss5c4A1gCrJT0UplemT81bbNVHF18AmoZzPARoRjSS\nFo/DpcABIX85cEBa+mai0bVUnA6On1usHueccw2UP+TCOecapteJOiQHU31qXTHiT5pLf/CEZdlX\nVot6Pkl73wiYBlxNzY7iuwBmNlbSQ8CpwCnAGEk/MrP7zWxBWB/1XaLphQ8AC4lG64oRP/8vs6Rl\n/KLRzLZIKgf6hXpHE02PPNLM1ueoM3W+qXJPA95My/NFLM8CoimQ6XHamKMO55xzDZiPYDnnXANk\nZpuAJ4H/J6l5erqkvcPLZUAHSZ1iaQcQrcNaUgdN6Z32/thQZy7zgUOJRs1Wpm3bO2NmtsLMfmtm\npwETgItjaZ+Y2WQz+zHRurGB6Q9/CJYR/b/s2NQOSS2Bb7OT529mVWZWaWbXEa0Ta0HUYUr5dlhH\nlXIssBVYASwNr7tkiEGqwzWfaI3Zhgx5Uk9KXEbmz8A551wD5R0s55xruH5MNLLxkqQhkrpLOkjS\nvwCLAMxsJtF0uIclHSHpSOAh4CUzq6yDNvSW9K+Svinph8C5RNP7crkL2BuYJOloSV0lnSjp95Ja\nSGoWpv71l9RZ0jFAX0KHSNKVkoZJ+lboVI0gWrP1VnpFYdrfFOD3kvpK+nY4/w+BR/K0M+s0TEnf\nk3S5pMND53UE0dP7lsayNQbuU/TExJOIHmJxr5n9I0zHvB24XdKFkrpJ6hmeTJjqSD5MNKL3P5L6\nSeoS/t4uKTVF8E7gAkkXh8/g34Cj85yXc865euRTBJ1zroEys1Vhmtq1wK1AR2AD8ApwRSzrIODX\nwDPh/Qzg8jpqxh1ET8q7HtgC3GBm8ScU1vjBWzN7R1Ifog7HdKK1SGuBp4hGdUT00IuJQPtwTlOB\nn4YiPg6vvxnKXwCcamappwym1/nPwK+IplI2A54DTjGzrXnOLdeP9W4GBhM9va850ajUSDP7WyzP\nbKJO4SyiB1v8F9HvlaXicIOk9URTJe8GPiKa6vjLkP4PSf2IPttJRJ3St0N5m0KeSZK6Ar8I7ZgC\njA/n7JxzrgGSmf8YvHPOuZokrQJ+Y2b5Rqx2O5ImAvuY2aD6botzzrmGxacIOuecc84551wd8Q6W\nc865bHyKg3POOVcknyLonHPOOeecc3XER7Ccc84555xzro54B8s555xzzjnn6oh3sJxzzjnnnHOu\njvjvYDnnqpHkCzOdc8455wpgZjV+tN5HsJxzNZiZb3W8XXDBBfXehl1189h6XL9Km8fVY/tV2zyu\n2bdsvIPlnHMl0KVLl/puwi7LY5sMj2syPK7J8dgmw+NaPO9gOeecc84551wd8Q6Wc64GSXWytWvX\npb5PpcFo1apVfTdhl+WxTYbHNRke1+R4bJPhcS2eP+TCOZdB3Tzn4t13a6z73G0dfvjh9d2EXZbH\nNhke12R4XJPjsU2Gx7V4yrVAyzm3+4meIlhX/y4o5yJQ55xzzkXrnNasWVPfzXBZdO7cmdWrV9fY\nLwmrz6cISuos6eUsabMklZeqLWl1d5I0X9L02L5V9dGWbCT1lzSxgHw52y3p4/C3vaRJ4fUFkn5T\nm/IKrHOMpKvylVOMeJmSJkrqlyd/f0mbw+c8X9L1BdQxS1KnPOlFXbOShkhaKunp8P4RSQsljQrn\ncWae4ws513MkLQrbc5J6xNLGS1oiqX8x7XbOOedcstasWVPvT8TzLftWbOe31GuwGuJX2YOBp8zs\n1Ni+htjOQtqUL48BmNk7Zja0gOPqos6G4lkzKw/bL+qpDSOBi81soKR2wJFmdriZ3VmHdawE+plZ\nT+AXwL2pBDO7Gvg5cFEd1ucKVFlZWd9N2GV5bJPhcU2GxzU5HlvXUJS6g9VE0kPhW/xJkpqlZ5A0\nXNLisN0a9jUK394vDt/Mjwr7u0maEUYBXpLUtRZtagW8l7bv/Vh7zg91LpD0QNg3UdKdkp6X9EZq\n5EFSC0kzQ1sWSRoU9neWtCwct1zSw5JOCscvl3RkyNdc0gRJcyTNk3R6aMbnwIcFnMv7oZyxob3z\nJb0laULqdGLtiY8mdgojMssl3ZgpDvnqzBarOEkHSJou6UVJsyV1l9RS0upYnuaS1koqy5Q/Q/2b\nieKTT7GLgTYA27Jde8FQSX+X9KqkPqH91UYEJU2V1E/SDUBfYIKkXwJPAh3DZ9S3WkOlckmV4byn\nS2pb6Lma2RwzS10rc4COaVnWE13zzjnnnHMuAaV+yMVBwIVmNifc9F8G3JFKlNQeuBXoRXQzOSN0\nUt4COppZj5CvZTjkYWCcmU2R1JTadRjLgKr4DjM7JtRzCHAtcKyZbZIUvzFtZ2Z9JB0MTAEmA58B\ng81si6R9iG5wp4T83YAfmNlSSS8Bw8Lxg0IdZwLXAU+b2UhJewNzJc00sxeAF0KbjgB+ZGaXpJ9I\nqt1mNgYYE8p4Fkjd8MdHm+KvjwIODe1/UdI0M5ufKi+XAmOVcm9o+wpJRwP3hNGcBZL6m9ls4DTg\nCTPbJqlGfmBgWv1Xpl5LGgu8aGbTMtR9rKSFwDrgp2a2NM95DQlllpP52gMoM7NjJJ0KVAAnpQ7P\nUN5Nkk4ArjKzBZLuAqaaWXkod2T425jo8xpkZhskDQXGASOLONeUi4HpafuqiK75PCpirweEze2M\nAQMG1HcTdlke22R4XJPhcU2Ox9YlrbKysqCR0lJ3sNaa2Zzw+iHgJ8Q6WEQ3+rPMbCOApIeBfkRT\nnbpKuhP4X+ApSXsCHcxsCoCZFTKKUY0kAT1DWzI5AXjMzDaFOjbH0v4a9i2T9I1UkcAtitbJVAEd\nYmmrYjf1S4CZ4fXLQJfw+mTgdEk/De+bAp2A5alKzWweUKNzlcVDwB1mtjBPvhmpc5M0mWikZX6B\ndaTkihWSWgDHAY+FuAM0CX8nAWcDs4FhwF158mcUOpaZzAM6mdmnoTP0VyDTaFgmK0m79mJpk2Pl\ndy6wvHwjaQcBhxF9uSCiLw3eTs+U41yjSqTjgQuJPsu4dUB3SV8zs63ZS6jI00znnHPOud3LgAED\nqnXkx44dmzFffa/ByrR+p8YNaLhZ7wlUApcCf8iWt1pB0mWxqXLt0tIaAauAg4HHC2p9dfGb01Q7\nRgD7Ar3MrBfR1MNmGfJXxd5XsaOjK6JRrl5h62pmy6kFSRVEHdoaU/UyKORz2VmNgE1hDVTq/A4L\naVOAUyS1BsqBZ/LkL4qZbTGzT8Pr6URTVdsUeGy2aw92fIbb2PEZfkn1/65qTIPNQ8ArsfPumbY+\nMH8B0YMt7iUaBdsUTzOzlcAyYI2kQ4tsm9sJvjYgOR7bZHhck+FxTc6uFtt27brU2e9SZtoK/a3K\nrl278swzz2RMe+655zj44IPr5Hxz1ZPPZ599xumnn06rVq04++yzAbj++uv5+te/TocOHVizZg2N\nGjWiqqoqT0l1o9QdrM6SUtPOzgH+Ly19LtBPUhtJZcBwYHaYbldmZn8BrgfKzWwL8KakMwAkNZW0\nR7wwM7s73KSWm9n6tLQqM+sCvEQ0epLJM8BZqZvx0AHIJNXB2ht4z8yqwghC5wx5cnkSuHz7AVKt\nfnhA0dqtE4FR6UlZDjlJUqsQv8HA8xnKXJan2pyxMrOPgVWShsTK7BHSPiH6HO4Eplkka/5ixdYw\nEaYaKjZKOjNMTc12bI1rL1vW8Hc1cLgi+wNH52pahn3Lga9L6h3qbxymXxZE0ZMP/xs4z8xWZEjv\nAXQlGv1dUmi5zjnnnCutd99dQ/SddzJbVP7O6du3L8uW7bhF3JlO0s74r//6L95//302bdrEo48+\nyptvvskdd9zBq6++yttvRxOBdkyISl6pO1ivAj+WtJRoof3vwv7U0+3WA6OJRgsWEK0xmUq0UL9S\n0gLgjyEPwPnA5ZIWEXUKtt9IF+E1IONoRpjSdzNRJ28BMD7e3njW8Pdh4KjQnnOJRgrS82Q6PuUm\notGVxYoeQvHz9AySjghrk3K5EuhAtJ5qfhjNylXvXKLpbguJpvlVmx4YOhk55YhV3LnASEUPJXkF\nGBRLe5RoBPDPsX0jcuSvQdHDPU7LkDRE0iuhXb8imoaYmiLaDdiYo9hs117Ga8DMnifqZC0Jdc1L\nz5Plfer4L4AhwG2K1owtAI4t4lxvILqe7w6jt3PT0lsDq82sNF/huO18bUByPLbJ8Lgmw+OaHI/t\n7mvNmjV07959eydqzZo17LvvvuyzT95b2GTkeub77rABPwVure92NOQN+B7w/+q7HQmc16HA7fXd\njhKf81DgkTx5DKyONsw555xzuWX6/2Xd/v+49v+P7tKli91yyy12yCGHWJs2beyiiy6yrVu3mplZ\nZWWl7bfffmZmdt5551mjRo2sefPmttdee9m///u/1yjrgw8+sNNOO81atWplbdq0sX79+lWr5/bb\nb7cePXpYq1atbNiwYdvruf/++61v377VypJkK1assDFjxljTpk2tSZMmttdee9nvf/9722OPPays\nrMz22msvu/DCC2316tXWqFEj27Ztm5mZffjhhzZy5Ehr37697bfffnb99ddbVVVVUZ9PbH+Ne6lS\nj2A1RJOBPor90LCrzsweN7Pf1nc76pqZLTGza+q7HaUiaTxwDfCfBeSuk61t20Kf/bHr29XWBjQk\nHttkeFyT4XFNjsc2OX/605+YMWMGK1asYPny5fziFzt+UjQ1avTggw/SqVMnpk2bxkcffcQ119S8\nxRo/fjz7778/GzZs4L333mPcuHHV0h977DGeeuopVq1axaJFi7j//vtr1JP+vqKigmuvvZZhw4bx\n0UcfcckllzB9+nQ6dOjARx99xH333VejHRdccAFNmzZl5cqVLFiwgBkzZvCf/1nA7VGBdvsOlpmt\nMLPvWJEPEnDuq8bMrjazo83s6QLy1sm2fv3qEpyZc84555L0k5/8hA4dOtCqVSuuu+46Hnnkkax5\no4GdzJo0acI777zDqlWrKCsro0+fPtXSR40aRdu2bWnVqhWnn346CxdmfxB2rnpyeffdd5k+fTr/\n8R//QbNmzdh333254oorcp5TsXb7DpZzzpWCrw1Ijsc2GR7XZHhck+OxTc5+++23/XXnzp23Pzii\nWD/72c/o1q0bJ598Mt/85je57bbbqqW3bbvjcQrNmzdny5YttWtwDmvXruWLL76gffv2tGnThtat\nW3PppZfywQcf1Fkdpf4dLOecc84559xXyJtvvrn99Zo1a+jQoUPGfPme1NeiRQtuv/12br/9dpYu\nXcrxxx/P0UcfzfHHH5/3uE8//XT7+/Xr19f6qYD7778/zZo1Y8OGDYk9WdBHsJxzrgR8bUByPLbJ\n8Lgmw+OaHI9tcu666y7WrVvHxo0bGTduHMOGDcuYr127dqxcuTJrOY8//jgrVkS/IrPXXnvRuHFj\nysrK8tbfs2dPlixZwuLFi9m6dWvWH/jNJTWlsF27dpx88slceeWVfPzxx5gZK1eu5Nlnny26zGy8\ng+Wcc84551wDEz0oqm4eOrUzD6KSxDnnnLN9Wt+BBx7IddddlzHv6NGjuemmm2jTpg133HFHjfTX\nX3+dE088kb322os+ffrw4x//mH79+m2vJ5sDDzyQG2+8kYEDB9K9e3e+853vFNT29PNIefDBB/n8\n88855JBDaNOmDWeddRbr16/PcXSRddV2gZhzbtckyfzfBeecc650JNX6oQ0uedk+n7C/Rs/QR7Cc\nc84555xzro54B8s5V4Okndra7deuvk+hwfG1Acnx2CbD45oMj2tyPLauofCnCDrnaqrYucPfrXi3\nTprhnHPOOfdV42uwnHPVSLKd7WBRUfsfAHTOOed2N74Gq2FrsGuwJHWW9HKWtFmSykvVlrS6O0ma\nL2l6bN+q+mhLNpL6S5pYQL6c7Zb0cfjbXtKk8PoCSb+pTXkF1jlG0lX5yilGvExJEyX1y5O/v6TN\n4XOeL+n6AuqYJalTnvSirllJQyQtlfR0eP+IpIWSRoXzODPP8XnPNeT7taTXQ9mHx/aPl7REUv9i\n2u2cc8455wpX6jVYDbFrPhh4ysxOje1riO0spE358hiAmb1jZkMLOK4u6mwonjWz8rD9op7aMBK4\n2MwGSmoHHGlmh5vZnXVVgaRTgW5mdiDwI+B3qTQzuxr4OXBRXdXnCudrA5LjsU2GxzUZHtfkeGxd\nQ1HqDlYTSQ+Fb/EnSWqWnkHScEmLw3Zr2NcofHu/WNIiSaPC/m6SZoRv6l+S1LUWbWoFvJe27/1Y\ne84PdS6Q9EDYN1HSnZKel/RGauRBUgtJM0NbFkkaFPZ3lrQsHLdc0sOSTgrHL5d0ZMjXXNIESXMk\nzZN0emjG58CHBZzL+6GcsaG98yW9JWlC6nRi7YmPJnYKIzLLJd2YKQ756swWqzhJB0iaLulFSbMl\ndZfUUtLqWJ7mktZKKsuUP0P9m4nik0+xP9W9AdiW7doLhkr6u6RXJfUJ7a82IihpqqR+km4A+gIT\nJP0SeBLoGD6jvtUaKpVLqgznPV1S2yLO9QzgQQAz+zuwd+x4gPVE17xzzjnnnEtAqR9ycRBwoZnN\nCTf9lwHbf4VMUnvgVqAX0c3kjNBJeQvoaGY9Qr6W4ZCHgXFmNkVSU2rXYSwDquI7zOyYUM8hwLXA\nsWa2SVL8xrSdmfWRdDAwBZgMfAYMNrMtkvYB5oQ0gG7AD8xsqaSXgGHh+EGhjjOB64CnzWykpL2B\nuZJmmtkLwAuhTUcAPzKzS9JPJNVuMxsDjAllPAukbvjjo03x10cBh4b2vyhpmpnNT5WXS4GxSrk3\ntH2FpKOBe8JozgJJ/c1sNnAa8ISZbZNUIz8wMK3+K1OvJY0FXjSzaRnqPlbSQmAd8FMzW5rnvIaE\nMsvJfO0BlJnZMWHUqAI4KXV4hvJuknQCcJWZLZB0FzDVzMpDuSPD38ZEn9cgM9sgaSgwDhhZ4Ll2\nBN6MvV8X9qWeOlFFdM3nNiv2ugtQm68uXDUDBgyo7ybssjy2yfC4JsPjmhyPrUtaZWVlQSOlpR7B\nWmtmc8Lrh4i+0Y87CphlZhvNrIqoA9UPWAl0DaNG3wU+lrQn0MHMpgCY2edm9lkxjZEkoCdRBy6T\nE4DHzGxTqGNzLO2vYd8y4BupIoFbJC0CZgIdJKXSVsVu6peEdICXiW5hAU4GRktaAFQCTYFq64DM\nbF6mzlUWDwF3mNnCPPlmmNnmEL/J1PxcCpErVkhqARwHPBbO7/dAamRlEnB2eD0MeDRP/ozMbEyW\nztU8oJOZHQ78lvDZFajGtRdLmxwrv7CfQ88/knYQcBjRlwsLiDrdHdIz5TjXfNYB3SV9LWeu42Ob\nd66cc865kmu3X7ud/tmUXFtD+kmV448/nvvuu6/Wx1944YW0adOG3r17A3DPPffQrl07WrZsycaN\nG2nUqBErV67c6XYOGDCAioqK7Vs2pR7BSv9mP9P6nRo3oGa2WVJP4LvApcBZwBWZ8lYrSLoM+GGo\n55/MbH0srRHRzfNW4PEiziFla4Y2jwD2BXqZWZWiB0A0y5C/Kva+ih2fg4hGuV6vRXuqkVRB1KGt\nMVUvg0I+l53VCNiUGrFJMwW4WVJroBx4BtgzR/6imNmW2Ovpku6W1MbMNhZwbKZr7+KQnPoMt7Hj\nM/yS6l9c1JgGm4eAV8ysT5HHpawD9o+93y/sA8DMVkpaBqyRNNDMltSyHlekyspK/3Y1IR7bZHhc\nk+FxTc6uFtt317270z+bkrP8XeQnVZ577jmefvpp3n77bZo1a8aXX37J1Vdfzdy5cznssMOA6Gl/\npVTqEazOklLTzs4B/i8tfS7QT1IbSWXAcGB2mG5XZmZ/Aa4HysNN85uSzgCQ1FTSHvHCzOxuM+sV\nHmywPi2tysy6AC+xY/Qk3TPAWZLahDpaZ8mX+tT2Bt4LnavjqT6qUcgn+yRw+fYDYk+AK4aitVsn\nAqPSk7IccpKkViF+g4HnM5S5LE+1OWNlZh8DqyQNiZXZI6R9QvQ53AlMs0jW/MWKr0EKUw2V6lwp\nWjPXPsexNa69bFnD39XA4YrsDxydq2kZ9i0Hvi6pd6i/cZh+WagpwPnh2N7AZjPb/i9oiGFXotFf\n71w555xz7itt9erVdOnShWbNou+0169fz9atWzn44IO35yn1I/BL3cF6FfixpKVEC+1TTzhLPd1u\nPTCaaHrcAqI1JlOJ1pBUhilTfwx5ILqRvDxMyXuePFPIsngNaJMpIUzpu5mok7cAGB9vbzxr+Psw\ncFRoz7nAsgx5Mh2fchPRg0AWK3oIxc/TM0g6IqxNyuVKomllLyp6iEJFnnrnEk13W0g0zW9+Wp37\n5KkvV6zizgVGKnooySvAoFjao0QjgH+O7RuRI38Nih7ucVqGpCGSXgnt+hXRNMTUFNFuQK6RrGzX\nXsZrwMyeJ+pkLQl1zUvPk+V96vgvgCHAbYrWjC0Aji30XM3sf4k6pm8QTau8LC1La2B1mILrSmhX\n+la1ofHYJsPjmgyPa3I8tsno2rUr48ePp2fPnrRu3Zrhw4fz+ec7nrn1hz/8gQMPPJB9992XwYMH\n884772QsZ+vWrZx33nnsu+++tG7dmmOOOYb339/xPLXVq1fTt29fWrZsySmnnMLGjdHt2ezZs9l/\n//2rldW1a1eeeeYZ7rvvPn74wx/ywgsv0LJlS0aMGMG3vvUtAFq3bs2JJ55Yox2ff/4511xzDZ07\nd6Z9+/ZcdtllbN26tUa+nbHb/9CwpJ8C+5jZ6LyZd1OSvgd0NbPf1ndb6pKkQ4keunJNfbelVBQ9\nNOP7ZjY8Rx7/oWHnnHOuhJThh2wlJTpFsND/V3ft2pW2bdvyP//zP3zta1/juOOO44orruCSSy7h\nmWee4eyzz2bmzJkccsghXH311SxatIjZs2fXKOfee+/l8ccfZ9KkSTRt2pSFCxdy4IEHsueee3L8\n8cfz1ltv8cQTT7DffvtxyimncOyxxzJu3Dhmz57Neeedx9q1a6u1acKECZxwwgk88MADTJgwgWef\nfRaANWvWcMABB/Dll19unxrYqFEj3njjDQ444ACuvPJKVq1axQMPPEDjxo0555xzOOyww7j55puz\nxiDT5xPbX2NGUqnXYDVEk4H7JU1P+y0sF5hZbdaoNXhhitzu1LkaD3wH+Le8mSt2rq62HWszmLxr\n29XWBjQkHttkeFyT4XFNjsc2OaNGjaJt2+j/7aeffjoLF0bPT/vTn/7EyJEj6dmzJwC33HILrVu3\nZu3atXTqVO05bTRp0oQNGzbw2muv8e1vf5tevXpVS7/wwgvp1q0bAEOHDmXq1Kk71WYzy7j26g9/\n+AMvv/wye++9NwCjR49mxIgROTtYxdrtO1hmtoLoptO5XVr4oeFC8ybZFOecc859haQ6VwDNmzff\nPg3w7bff5ogjjtie1qJFC/bZZx/WrVtXo4N1/vnn89ZbbzFs2DA+/PBDRowYwbhx4ygri349pl27\ndtXq2LJlC3Xt/fff59NPP63W5qqqqjq/7yn1GiznnNst+beqyfHYJsPjmgyPa3I8tqXXoUMH1qxZ\ns/39J5/ThHwuAAAgAElEQVR8woYNG+jYsWONvGVlZdxwww0sWbKEv/3tb0ybNo0HH3wwbx0tWrTg\n008/3f5+27Zt1dZuFWPfffelefPmLFmyhI0bN7Jx40Y2b97Mhx9+WKvysvEOlnPOOeecc65ow4cP\nZ+LEiSxevJitW7dy7bXX0rt37xqjVxBN4XzllVeoqqpizz33pEmTJttHr3Lp3r07n332GdOnT+fL\nL7/kF7/4RbWHbGSSbURKEj/84Q+54oortnfS1q1bx1NPPVXA2RZut58i6JxzpeBrA5LjsU2GxzUZ\nHtfk7GqxbduxbaK/VVXoeulcvyE1cOBAbrrpJs4880w2b97Mcccdx5///OeMedevX8+ll17KunXr\n2HPPPRk2bBjnnntu3jpatmzJ3XffzciRI6mqquJnP/sZ++23X1Ftjr+/7bbbGDt2LL17994+2vYv\n//IvnHzyyTnLLMZu/xRB51x1ksz/Xah7u9r/+BsSj20yPK7J8Lgm56sc22xPqXMNQ7FPEfQOlnOu\nGu9gOeecc6XlHayGrdgOlq/Bcs4555xzzrk64h0s51wNknaJrV27LvUdyu0qKyvruwm7LI9tMjyu\nyfC4Jsdj6xoKf8iFcy6DXWOawrvvZl8065xzzjmXBF+D5ZyrRpLtKh0s8DntzjnnGj5fg9WwNdg1\nWJI6S3o5S9osSeWlakta3Z0kzZc0PbZvVX20JRtJ/SVNLCBfznZL+jj8bS9pUnh9gaTf1Ka8Ausc\nI+mqfOUUI16mpImS+uXJP0jSIkkLJL0k6YQC6pglqeaPOFRPL+qalTRE0lJJT4f3j0haKGlUOI8z\n8xxfyLmeE851kaTnJPWIpY2XtERS/2La7Zxzzrlkde7cud6n1fuWfevcuXNRn2ep12A1xK75YOAp\nMzs1tq8htrOQNuXLYwBm9o6ZDS3guLqosyGYaWY9zawXcCFwbz21YyRwsZkNlNQOONLMDjezO+uw\njpVAPzPrCfyC2Lma2dXAz4GL6rA+VyBfG5Acj20yPK7J8Lgm56sc29WrV2NmDXKbNWtWvbehvrfV\nq1cX9XmWuoPVRNJD4Vv8SZKapWeQNFzS4rDdGvY1Ct/eLw7fzI8K+7tJmqFoFOAlSV1r0aZWwHtp\n+96Pted87Rj9eCDsmyjpTknPS3pDYeRBUgtJM0NbFkkaFPZ3lrQsHLdc0sOSTgrHL5d0ZMjXXNIE\nSXMkzZN0emjG58CHBZzL+6GcsaG98yW9JWlC6nRi7YmPJnZSNCKzXNKNmeKQr85ssYqTdICk6ZJe\nlDRbUndJLSWtjuVpLmmtpLJM+TPUv5koPlmZ2aext3sCHxRwXhuAbdmuvWCopL9LelVSn9D+aiOC\nkqZK6ifpBqAvMEHSL4EngY7hM+qbFqdySZXhvKdLSv0SYCHnOsfMUtfKHKBjWpb1RNe8c84555xL\nQKkfcnEQcKGZzQk3/ZcBd6QSJbUHbgV6Ed1MzgidlLeAjmbWI+RrGQ55GBhnZlMkNaV2HcYyoCq+\nw8yOCfUcAlwLHGtmmyTFb0zbmVkfSQcDU4DJwGfAYDPbImkfohvcKSF/N+AHZrZU0kvAsHD8oFDH\nmcB1wNNmNlLS3sBcSTPN7AXghdCmI4Afmdkl6SeSareZjQHGhDKeBVI3/PHRpvjro4BDQ/tflDTN\nzOanysulwFil3BvavkLS0cA9Fo3mLJDU38xmA6cBT5jZNkk18gMD0+q/MvVa0ljgRTObll6xpMHA\nLUA74LsFnNeQcFw5ma89gDIzO0bSqUAFcFLq8Azl3aRoauJVZrZA0l3AVDMrD+WODH8bE31eg8xs\ng6ShwDhgZKHnGnMxMD1tXxXRNZ9HRez1gLC5nfFV/fHLrwKPbTI8rsnwuCbHY5sMj+sOlZWVBY2U\nlrqDtdbM5oTXDwE/IdbBIrrRn2VmGwEkPQz0I5rq1FXSncD/Ak9J2hPoYGZTAMws5zf7mUgS0DO0\nJZMTgMfMbFOoY3Ms7a9h3zJJ30gVCdyiaJ1MFdAhlrbKzJaG10uAmeH1y0CX8Ppk4HRJPw3vmwKd\ngOWpSs1sHlCjc5XFQ8AdZrYwT74ZqXOTNJlopGV+gXWk5IoVkloAxwGPhbgDNAl/JwFnA7OBYcBd\nefJnFDqW2dL+Cvw1jBb9kaizX4iVpF17sbTJ4e88oNDJufkea3cQcBjRlwsi+tLg7fRMuc4VQNLx\nRNMh+6YlrQO6S/qamW3NXkJFnmY655xzzu1eBgwYUK3DOXbs2Iz56nsNVqb1OzVuQMPNek+gErgU\n+EO2vNUKki6LTZVrl5bWCFgFHAw8XlDrq4vfnKbaMQLYF+hl0Xqf94BmGfJXxd5XsaOjK6JRrl5h\n62pmy6kFSRVEHdoaU/UyKORz2VmNgE1mVh47v8NC2hTgFEmtgXLgmTz5a83MngMahxHGQvJnu/Zg\nx2e4jR2f4ZdU/++qxjTYPAS8EjvvnlZ9fWD+AqIHW9xLNAq2KZ5mZiuBZcAaSYcW2Ta3E77KawMa\nOo9tMjyuyfC4JsdjmwyPa/FK3cHqLCk17ewc4P/S0ucC/SS1kVQGDAdmh5vhMjP7C3A9UG5mW4A3\nJZ0BIKmppD3ihZnZ3eEmtdzM1qelVZlZF+AlotGTTJ4BzpLUJtTROku+VAdrb+A9M6sKIwidM+TJ\n5Ung8u0HSIcXcEzNxkRrt04ERqUnZTnkJEmtQvwGA89nKHNZnmpzxsrMPgZWSRoSK7NHSPuE6HO4\nE5hmkaz5iyWpW+x1eahzQ3g/M0xNzXZsjWsvW9bwdzVwuCL7A0fnalqGfcuBr0vqHepvHKZfFkTR\nkw//GzjPzFZkSO8BdCUa/V1SaLnOOeecc64wpe5gvQr8WNJSooX2vwv7U0+3Ww+MJhotWEC0xmQq\n0UL9SkkLiKZ3jQ7HnQ9cLmkRUacg9TCAYrwGtMmUEKb03UzUyVsAjI+3N541/H0YOCq051yikYL0\nPJmOT7mJ6EEgixU9hOLn6RkkHRHWJuVyJdCBaD3V/DCalaveuUTT3RYSTfOrNj2wkNGeHLGKOxcY\nqeihJK8Ag2JpjxKNAP45tm9Ejvw1KHq4x2kZkn4g6RVJ84k6ccNCfhGtjduYo9hs117Ga8DMnifq\nZC0BfkU0fZBcx6Qd/wUwBLhN0kKi/w6OLeJcbyC6nu8Oo7dz09JbA6vNrKrmoS5JPoc9OR7bZHhc\nk+FxTY7HNhke1+Lt9j80HNY77WNmo/Nm3k1J+h7Q1cx+W99tqUthityFZnZNfbelVMJDM75vZsNz\n5PEfGnbOOeecy0P1/UPDDdhkoI9iPzTsqjOzx3e1zhWAmS3ZzTpX44FrgP+s77bsjnwOe3I8tsnw\nuCbD45ocj20yPK7FK/VTBBucsE7lO/XdDueSZtEPDReokCWDDV/btsX98rpzzjnn3M7a7acIOueq\nk2T+74JzzjnnXG4+RdA555xzzjnnEuYdLOecKwGfw54cj20yPK7J8Lgmx2ObDI9r8byD5Zxzzjnn\nnHN1xNdgOeeq8TVYzjnnnHP5+Ros55xzzjnnnEuYd7Ccc64EfA57cjy2yfC4JsPjmhyPbTI8rsXb\n7X8HyzlXk/TV/B2sth3bsv6t9fXdDOecc87txnwNlnOuGklGRX23opYqwP9Nc84551wp+Bos55xz\nzjnnnEtYyTpYkjpLejlL2ixJ5aVqS1rdnSTNlzQ9tm9VfbQlG0n9JU0sIF/Odkv6OPxtL2lSeH2B\npN/UprwC6xwj6ap85RQjXqakiZL65ck/SNIiSQskvSTphALqmCWpU570oq5ZSUMkLZX0dHj/iKSF\nkkaF8zgzz/F5zzXk+7Wk10PZh8f2j5e0RFL/Ytrt6obPYU+OxzYZHtdkeFyT47FNhse1eKVeg9UQ\n5+4MBp4ys9GxfQ2xnYW0KV8eAzCzd4ChBRxXF3U2BDPNbAqApG8DfwG+WQ/tGAlcbGZ/k9QOONLM\nDgztytuBLoSkU4FuZnagpGOA3wG9AczsaklzgYuA2XVRn3POOeecq67UUwSbSHoofIs/SVKz9AyS\nhktaHLZbw75G4dv7xWEkYlTY303SjPBN/UuSutaiTa2A99L2vR9rz/mx0Y8Hwr6Jku6U9LykN1Ij\nD5JaSJoZ2rJI0qCwv7OkZeG45ZIelnRSOH65pCNDvuaSJkiaI2mepNNDMz4HPizgXN4P5YwN7Z0v\n6S1JE1KnE2tPfDSxUxiRWS7pxkxxyFdntljFSTpA0nRJL0qaLam7pJaSVsfyNJe0VlJZpvwZ6t9M\nFJ+szOzT2Ns9gQ8KOK8NwLZs114wVNLfJb0qqU9of7URQUlTJfWTdAPQF5gg6ZfAk0DH8Bn1TYtT\nuaTKcN7TJbUt9FyBM4AHw3n/Hdg7djzAeqJr3pXYgAED6rsJuyyPbTI8rsnwuCbHY5sMj2vxSj2C\ndRBwoZnNCTf9lwF3pBIltQduBXoR3UzOCJ2Ut4COZtYj5GsZDnkYGGdmUyQ1pXYdxjKgKr7DzI4J\n9RwCXAsca2abJMVvTNuZWR9JBwNTgMnAZ8BgM9siaR9gTkgD6Ab8wMyWSnoJGBaOHxTqOBO4Dnja\nzEZK2huYK2mmmb0AvBDadATwIzO7JP1EUu02szHAmFDGs0Dqhj8+2hR/fRRwaGj/i5Kmmdn8VHm5\nFBirlHtD21dIOhq4x8wGhg5ZfzObDZwGPGFm2yTVyA8MTKv/ytRrSWOBF81sWnrFkgYDtwDtgO8W\ncF5DwnHlZL72AMrM7BhFo0YVwEmpwzOUd5OiqYlXmdkCSXcBU82sPJQ7MvxtTPR5DTKzDZKGAuOA\nkQWea0fgzdj7dWHfu+F9FdE1n9us2OsuQG2+unDOOeec24VUVlYWNGWy1CNYa81sTnj9ENE3+nFH\nAbPMbKOZVRF1oPoBK4GuYdTou8DHkvYEOqSmfpnZ52b2WTGNkSSgJ1EHLpMTgMfMbFOoY3Ms7a9h\n3zLgG6kigVskLQJmAh0kpdJWmdnS8HpJSAd4megWFuBkYLSkBUAl0BSotg7IzOZl6lxl8RBwh5kt\nzJNvhpltDvGbTM3PpRC5YoWkFsBxwGPh/H4PpEZWJgFnh9fDgEfz5M/IzMZk6lyFtL+a2cHA6cAf\nizivGtdeLG1y+DsP6Fxgefmef34QcBjRlwsLiDrdHdIz5TrXPNYB3SV9LWeu42Obd67qhM9hT47H\nNhke12R4XJPjsU2Gx3WHAQMGUFFRsX3Lpr7XYGVav1PjBtTMNkvqSTTycClwFnBFprzVCpIuA34Y\n6vknM1sfS2tEdPO8FXi8iHNI2ZqhzSOAfYFeZlal6AEQzTLkr4q9r2LH5yCiUa7Xa9GeaiRVEHVo\na0zVy6CQz2VnNQI2pUZs0kwBbpbUGigHniGaypctf62Z2XOSGkvax8w2FJA/07V3cUhOfYbb2PEZ\nfkn1Ly5qTIPNQ8ArZtanyONS1gH7x97vF/YBYGYrJS0D1kgaaGZLalmPc84555zLoNQjWJ0VLbwH\nOAf4v7T0uUA/SW0klQHDgdlhul2Zmf0FuB4oN7MtwJuSzgCQ1FTSHvHCzOxuM+tlZuXxzlVIqzKz\nLsBL7Bg9SfcMcJakNqGO1lnypTpYewPvhc7V8VQf1Sjkl1ufBC7ffkDsCXDFULR260RgVHpSlkNO\nktQqxG8w8HyGMpflqTZnrMzsY2CVpCGxMnuEtE+IPoc7gWkWyZq/WJK6xV6Xhzo3hPczw9TUbMfW\nuPayZQ1/VwOHK7I/cHSupmXYtxz4uqTeof7GYfploaYA54djewObzSw1PTAVw65Eo7/euSohn8Oe\nHI9tMjyuyfC4JsdjmwyPa/FK3cF6FfixpKVEC+1/F/annm63HhhNND1uAdEak6lEa0gqw5SpP4Y8\nEN1IXh6m5D1PnilkWbwGtMmUEKb03UzUyVsAjI+3N541/H0YOCq051xgWYY8mY5PuYnoQSCLFT2E\n4ufpGSQdEdYm5XIl0bSyF8NDFCry1DuXaLrbQqJpfvPT6twnT325YhV3LjBS0UNJXgEGxdIeJRoB\n/HNs34gc+WtQ9HCP0zIk/UDSK5LmE3XihoX8IlobtzFHsdmuvYzXgJk9T9TJWgL8imj6ILmOSTv+\nC2AIcJukhUT/HRxb6Lma2f8SdUzfIJpWeVlaltbA6jAF1znnnHPO1TGZfRWesp0cST8F9kl7TLuL\nkfQ9oKuZ/ba+21KXJB1K9NCVa+q7LaUSHprxfTMbniOPUVG6NtWpCmio/6ZVVlb6t4AJ8dgmw+Oa\nDI9rcjy2yfC4ZicJM6sxI6nUa7AaosnA/ZKmm9mp9d2YhsjMarNGrcELU+R2p87VeOA7wL/lzVyR\ndGuS0bZjbQaxnXPOOefqzm4/guWcq06S+b8LzjnnnHO5ZRvBKvUaLOecc84555zbZXkHyznnSsB/\nRyQ5HttkeFyT4XFNjsc2GR7X4nkHyznnnHPOOefqiK/Bcs5V42uwnHPOOefy8zVYzjnnnHPOOZcw\n72A551wJ+Bz25Hhsk+FxTYbHNTke22R4XIvnv4PlnKtBqjHavUtq27Yz69evru9mOOecc24X4muw\nnHPVSDLYXf5dEP5voHPOOedqw9dgOeecc84551zCStbBktRZ0stZ0mZJKi9VW9Lq7iRpvqTpsX2r\n6qMt2UjqL2liAflytlvSx+Fve0mTwusLJP2mNuUVWOcYSVflK6cY8TIlTZTUL0/+gyT9TdJnhbYl\nXJOd8qQXdc1KGiJpqaSnw/tHJC2UNCqcx5l5ji/kXM+RtChsz0nqEUsbL2mJpP7FtNvVDZ/DnhyP\nbTI8rsnwuCbHY5sMj2vxSr0GqyHOxRkMPGVmo2P7GmI7C2lTvjwGYGbvAEMLOK4u6mwINgA/Ifqs\n69NI4GIz+5ukdsCRZnYgRJ2nOqpjJdDPzD6UdApwL9AbwMyuljQXuAiYXUf1Oeecc865mFJPEWwi\n6aHwLf4kSc3SM0gaLmlx2G4N+xqFb+8Xh2/mR4X93STNCKMAL0nqWos2tQLeS9v3fqw954c6F0h6\nIOybKOlOSc9LeiM18iCphaSZoS2LJA0K+ztLWhaOWy7pYUknheOXSzoy5GsuaYKkOZLmSTo9NONz\n4MMCzuX9UM7Y0N75kt6SNCF1OrH2xEcTO4URmeWSbswUh3x1ZotVnKQDJE2X9KKk2ZK6S2opaXUs\nT3NJayWVZcqfof7NRPHJysw+MLN5wJcFnE/KBmBbtmsvGCrp75JeldQntL/aiKCkqZL6SboB6AtM\nkPRL4EmgY/iM+qbFqVxSZTjv6ZLaFnGuc8wsda3MATqmZVlPdM27EhswYEB9N2GX5bFNhsc1GR7X\n5Hhsk+FxLV6pR7AOAi40sznhpv8y4I5UoqT2wK1AL6KbyRmhk/IW0NHMeoR8LcMhDwPjzGyKpKbU\nrsNYBlTFd5jZMaGeQ4BrgWPNbJOk+I1pOzPrI+lgYAowGfgMGGxmWyTtQ3SDOyXk7wb8wMyWSnoJ\nGBaOHxTqOBO4DnjazEZK2huYK2mmmb0AvBDadATwIzO7JP1EUu02szHAmFDGs0Dqhj8+2hR/fRRw\naGj/i5Kmmdn8VHm5FBirlHtD21dIOhq4x8wGhg5ZfzObDZwGPGFm2yTVyA8MTKv/ytRrSWOBF81s\nWr52F3BeQ0KZ5WS+9gDKzOwYSacCFcBJqcMzlHeTpBOAq8xsgaS7gKlmVh7KHRn+Nib6vAaZ2QZJ\nQ4FxwMhanOvFwPS0fVVE13weFbHXA8LmnHPOObf7qqysLGjKZKlHsNaa2Zzw+iGib/TjjgJmmdlG\nM6si6kD1I5r21DWMGn0X+FjSnkAHM5sCYGafm9lnxTRGkoCeRB24TE4AHjOzTaGOzbG0v4Z9y4Bv\npIoEbpG0CJgJdJCUSltlZkvD6yUhHeBloEt4fTIwWtICoBJoClRbB2Rm8zJ1rrJ4CLjDzBbmyTfD\nzDaH+E2m5udSiFyxQlIL4DjgsXB+vwdSIzOTgLPD62HAo3nyZ2RmY+qic5WmxrUXS5sc/s4DOhdY\nXr7nnx8EHEb05cICok53h/RM+c5V0vHAhcC/piWtA7pL+lruZlTEtgF5muwK4XPYk+OxTYbHNRke\n1+R4bJPhcd1hwIABVFRUbN+yqe81WJnW79S4ATWzzZJ6At8FLgXOAq7IlLdaQdJlwA9DPf9kZutj\naY2Ibp63Ao8XcQ4pWzO0eQSwL9DLzKoUPQCiWYb8VbH3Vez4HEQ0yvV6LdpTjaQKog5tjal6GRTy\nueysRsCm1IhNminAzZJaA+XAM8CeOfKXTJZr7+KQnPoMt7HjM/yS6l9c1JgGm4eAV8ysT+1aDIoe\nbHEvcEqqw5tiZislLQPWSBpoZktqW49zzjnnnKup1CNYnSWlpp2dA/xfWvpcoJ+kNpLKgOHA7DDd\nrszM/gJcD5Sb2RbgTUlnAEhqKmmPeGFmdreZ9TKz8njnKqRVmVkX4CV2jJ6kewY4S1KbUEfrLPlS\nHay9gfdC5+p4qo9qFPLLrU8Cl28/QDq8gGNqNiZau3UiMCo9KcshJ0lqFeI3GHg+Q5nL8lSbM1Zm\n9jGwStKQWJk9QtonRJ/DncA0i2TNv5OqxUDRmrn2WTNnuPbylLsaOFyR/YGjC21LsBz4uqTeof7G\nYfplQRQ9+fC/gfPMbEWG9B5AV6LRX+9clZDPYU+OxzYZHtdkeFyT47FNhse1eKXuYL0K/FjSUqKF\n9r8L+1NPt1sPjCaaHreAaI3JVKKF+pVhytQfQx6A84HLw5S858kzhSyL14A2mRLClL6biTp5C4Dx\n8fbGs4a/DwNHhfacCyzLkCfT8Sk3ET0IZLGih1D8PD2DpCPC2qRcriSaVvZieIhCRZ565xJNd1tI\nNM1vflqd++SpL1es4s4FRip6KMkrwKBY2qNEI4B/ju0bkSN/DYoe7nFahv1tJb1JFJfrFD1EY88w\nRbQbsDFHsdmuvYzXgJk9T9TJWgL8imj6ILmOSTv+C2AIcJukhUT/HRxb6LkCNxBdz3eHtW1z09Jb\nA6vDFFznnHPOOVfHZPZVeMp2ciT9FNgn7THtLkbS94CuZvbb+m5LXZJ0KNFDV66p77aUSnhoxvfN\nbHiOPPbVePp+XRCl+jewsrLSvwVMiMc2GR7XZHhck+OxTYbHNTtJmFmNGUmlXoPVEE0G7pc03cxO\nre/GNERmVps1ag1emCK3O3WuxgPfAf6tgNxJN6dBaNu20GeTOOecc84VZrcfwXLOVSfJ/N8F55xz\nzrncso1glXoNlnPOOeecc87tsryD5ZxzJeC/I5Icj20yPK7J8Lgmx2ObDI9r8byD5ZxzzjnnnHN1\nxNdgOeeq8TVYzjnnnHP5+Ros55xzzjnnnEuYd7Ccc64EfA57cjy2yfC4JsPjmhyPbTI8rsXz38Fy\nztUg7R6/gxXXtmNb1r+1vr6b4ZxzzrmvOF+D5ZyrRpJRUd+tqAcV4P8eOuecc65QvgbLOeecc845\n5xJWsg6WpM6SXs6SNktSeanaklZ3J0nzJU2P7VtVH23JRlJ/SRMLyJez3ZI+Dn/bS5oUXl8g6Te1\nKa/AOsdIuipfOcWIlylpoqR+efIfJOlvkj4rtC3hmuyUJ72oa1bSEElLJT0d3j8iaaGkUeE8zsxz\nfN5zDfl+Len1UPbhsf3jJS2R1L+Ydru64XPYk+OxTYbHNRke1+R4bJPhcS1eqddgNcT5N4OBp8xs\ndGxfQ2xnIW3Kl8cAzOwdYGgBx9VFnQ3BBuAnRJ91fRoJXGxmf5PUDjjSzA6EqPNUFxVIOhXoZmYH\nSjoG+B3QG8DMrpY0F7gImF0X9TnnnHPOuepKPUWwiaSHwrf4kyQ1S88gabikxWG7NexrFL69Xyxp\nkaRRYX83STPCN/UvSepaiza1At5L2/d+rD3nhzoXSHog7Jso6U5Jz0t6IzXyIKmFpJmhLYskDQr7\nO0taFo5bLulhSSeF45dLOjLkay5pgqQ5kuZJOj0043PgwwLO5f1QztjQ3vmS3pI0IXU6sfbERxM7\nhRGZ5ZJuzBSHfHVmi1WcpAMkTZf0oqTZkrpLailpdSxPc0lrJZVlyp+h/s1E8cnKzD4ws3nAlwWc\nT8oGYFu2ay8YKunvkl6V1Ce0v9qIoKSpkvpJugHoC0yQ9EvgSaBj+Iz6psWpXFJlOO/pktoWeq7A\nGcCD4bz/DuwdOx5gPdE170pswIAB9d2EXZbHNhke12R4XJPjsU2Gx7V4pR7BOgi40MzmhJv+y4A7\nUomS2gO3Ar2IbiZnhE7KW0BHM+sR8rUMhzwMjDOzKZKaUrsOYxlQFd9hZseEeg4BrgWONbNNkuI3\npu3MrI+kg4EpwGTgM2CwmW2RtA8wJ6QBdAN+YGZLJb0EDAvHDwp1nAlcBzxtZiMl7Q3MlTTTzF4A\nXghtOgL4kZldkn4iqXab2RhgTCjjWSB1wx8fbYq/Pgo4NLT/RUnTzGx+qrxcCoxVyr2h7SskHQ3c\nY2YDQ4esv5nNBk4DnjCzbZJq5AcGptV/Zeq1pLHAi2Y2LV+7CzivIaHMcjJfewBlZnaMolGjCuCk\n1OEZyrtJ0gnAVWa2QNJdwFQzKw/ljgx/GxN9XoPMbIOkocA4YGSB59oReDP2fl3Y9254X0V0zec2\nK/a6C1Cbry6cc84553YhlZWVBU2ZLPUI1tr/z97dB1lV3fn+f39AGUVRwQcezIiG0hhujIpRY3S0\n9RcxMdF4URIfMliJo0lhqZiYKn6Z3HSjRIzGRJ0px2gcEgVrkHtJBjWKQLodRRGE5kFB1AkasYJ6\no6hMER3le/9Y3wO7T5/d54He3QjfV1VX77P32nt993fvA2edtdZuM1voy9NI3+hnHQu0mtlbZraZ\n1IA6GfgjcIj3Gp0BvCdpT2CYmc0GMLMPzOyv9QQjScCRpAZcJacBM83sba9jQ2bb73zdauCA0iGB\nKZKWA/OAYZJK29aa2Spffs63A6wkfYQFGA1MlNQOtAH9gA7zgMxsSaXGVY5pwM/NbFmVcnPNbIPn\nbxUygQ8AACAASURBVBadr0stusoVkvYAvgDM9PP7JVDqWbkf+IYvnw/MqFK+IjNr7o7GVZlO915m\n2yz/vQQYXuPxqj3//FPAZ0hfLrSTGt3Dygttw7m+Bhwm6W+6LHVq5icaV90ixrAXJ3JbjMhrMSKv\nxYncFiPyulVTUxMtLS1bfvL09hysSvN3On0ANbMNko4EzgC+C4wFJlQq2+FA0njgUq/nTDNbn9nW\nh/Th+X3goTrOoeT9CjFfBOwHHG1mm5UeALFbhfKbM683s/U6iNTL9WID8XQgqYXUoO00VK+CWq7L\ntuoDvF3qsSkzG/iJpIHAKOAPwJ5dlO8xOffeP/jm0jX8iK3X8EM6fnHRaRhsFQKeNbMTG4uY14C/\nzbz+hK8DwMz+KGk18Iqk/8/MnmuwnhBCCCGEUEFP92ANV5p4D3Ah8HjZ9kXAyZIGSeoLXAA85sPt\n+prZb4EfAaPMbCPwqqSvAUjqJ2n37MHM7HYzO9rMRmUbV75ts5kdDDzD1t6Tcn8Axkoa5HUMzClX\namDtDbzhjatT6dirUctfbp0DXLllh8wT4OqhNHfri8BV5Ztydjld0j6ev3OABRWOubpKtV3mysze\nA9ZKOi9zzM/6tv8iXYdbgQctyS2/jTrkQGnO3NDcwhXuvSrHfRk4SsnfAsfVGotbA+wv6fNe/y4+\n/LJWs4Fxvu/ngQ1mVhoeWMrhIaTe32hc9aAYw16cyG0xIq/FiLwWJ3JbjMhr/Xq6gfU8cLmkVaSJ\n9nf4+tLT7dYDE0nD49pJc0weIM0hafMhU/d6GUgfJK/0IXkLqDKELMcLwKBKG3xI309Ijbx24OZs\nvNmi/ns6cKzH801gdYUylfYvuY70IJAVSg+huLa8gKRjfG5SV64mDStb7A9RaKlS7yLScLdlpGF+\nS8vq3LdKfV3lKuubwCVKDyV5Fjg7s20GqQfw3zLrLuqifCdKD/f4aoX1gyW9SsrLPyo9RGNPHyI6\nAniri8Pm3XsV7wEzW0BqZD0H3EIaPkhX+5Tt/9/AecBPJS0jvQ9OqPVczez3pIbpS6RhlePLigwE\nXvYhuCGEEEIIoZvJ7OPwlO3iSPoBsG/ZY9pDhqSvAIeY2T/3dizdSdL/ID105ZrejqWn+EMz/qeZ\nXdBFGaOl52LabrRAkf8etrW1xbeABYncFiPyWozIa3Eit8WIvOaThJl1GpHU03OwtkezgF9LetjM\nvtzbwWyPzKyROWrbPR8itzM1rm4G/g74/6sWbik6mu3P4AMb6QAPIYQQQuhop+/BCiF0JMni34UQ\nQgghhK7l9WD19BysEEIIIYQQQthhRQMrhBB6QPwdkeJEbosReS1G5LU4kdtiRF7rFw2sEEIIIYQQ\nQugmMQcrhNBBzMEKIYQQQqgu5mCFEEIIIYQQQsGigRVCCD0gxrAXJ3JbjMhrMSKvxYncFiPyWr9o\nYIUQOpFU2M+QIQf39umFEEIIIRQm5mCFEDqQZFDkvwsi/t0JIYQQwsddzMEKIYQQQgghhIIV2sCS\nNFzSypxtrZJGFVl/HkkHSVoq6eHMurW9EUseSadImlpDubrjlnSVpN1ytl0s6TZfbpY0rsqxLpbU\nXKXMe/XGWE3pmH6PtdZQvlXS85La/drvV6V8l/n37Q/UGXM/SXO9/rGSTpL0rL8+PO+9ktm/6rlK\n2l3Sg5JWS1op6frMtsO8vhn1xB26R4xhL07kthiR12JEXosTuS1G5LV+PdGDtT2OBToHeNTMvpxZ\ntz3GWUtMjcQ9AejfwH6NxlBEbi1nuSsXmNnRZjbKzP5vnXU0sr3cKMC8/pnARcD1ZjYK2FTj8Wop\nc5OZfRo4GjhJ0hmkil8ws88AR0g6pM7YQwghhBBCDXqigbWrpGmSVkm6v1LPiaQLJK3wnxt8XR9J\nU33dcklX+foR3guwTNIzDX5Q3Ad4o2zdm5l4xnmd7ZJ+4+umSrpV0gJJL0ka4+v3kDTPY1ku6Wxf\nP9x7EaZKWiNpuqTTff81kj7n5fpLulvSQklLJJ3lYXwAvFPDubzpx5mU6Z1Z58fs770Z7Z7HsZKu\nAIYBrZLm+77f8pgWAidmjr2R9MG/K5u8HJIOkDTLr027pM+XUprJ7TWSFnmZZl83RdL4TJlmSd/L\nK1/mI+CtGvIE9d3vW/LvvVWl3C6RtIeXGSBppl/nezPxr5U0yJeP8d6z/YF7gWP9OJcBXweuy+7r\n+/SRdKOkp/28L631XM1sk5k95ssfAkuBT5QVe530Hgg9qKmpqbdD2GFFbosReS1G5LU4kdtiRF7r\nt0sP1PEp4FtmtlDS3cB44OeljZKGAjeQvm3fAMz1Rso64EAz+6yX28t3mU761n+2pH401kjsC2zO\nrjCz472ekcAPgRPM7G1J2Q+iQ8zsREmfBmYDs4C/AueY2UZJ+wILfRvACOBcM1sl6RngfN//bK9j\nDPCPwHwzu0TS3sAiSfPM7CngKY/pGOA7ZnZZ+YmU4jazZqDZj/EfwD8DXwJeM7Ov+nEGmNl7kq4G\nmvz8hgAtpPy/C7SRPpRjZjdXS6SZ3Z95eRvQZmZjJAnYs1TM6z8dONTMjvPtsyWdBMwAbgFu9/Jf\nB0bnlTezJ/BGm5mtA87z4w8F7iqdbwW/lvTfwCwzm1zlvLbkH/g+MN7MnpLUn3TNAY4CRgLrgQWS\nvmBmT9K5l8nM7E1J/wB838xKjfATgAfMbJak4ZnylwAbzOx4v8cXSHrUzF6p41zxe/csUm6zNpPe\nA11oySw3+U8IIYQQws6rra2tpiGTPdGD9SczW+jL04CTyrYfC7Sa2VtmtpnUgDoZ+CNwiPcanQG8\nJ2lPYJiZzQYwsw/M7K/UwT+oH0lqwFVyGjDTzN72OjZktv3O160GDigdEpgiaTkwDxgmqbRtrZmt\n8uXnfDvASuBgXx4NTJTUTmrc9AMOygZkZksqNa5yTANuNrN2r+d07yE6ycxKc6HE1l6l49ma/w9J\njZ1GnQb8i8dsmfpKRns8S0mNuE+RGlDLgP0lDZH0WeAtM3str3xe5Wb25y4aHBea2RHA3wF/J+mb\ndZzXAuAX3vs30O9TgEVepwHL2HpNOz1Npk6jgXF+TzwNDKLsvKucK5L6AvcBt5jZy2Wb15HeA11o\nyfw01R55yBVj2IsTuS1G5LUYkdfiRG6LEXndqqmpiZaWli0/eXqiB6vTt/kVynT6QGpmGyQdCZwB\nfBcYS5o71OWHVx9qdqnXc6aZrc9s60NquL0PPFTHOZS8XyHmi4D9gKPNbLPSQyd2q1B+c+b1Zrbm\nXqRerhcbiKcDSS2kBu09AGb2otKDRM4EJnvPWKWem21tEJRUmx8kYIqZ3VVh20zSNR7C1kZeV+Xr\nmv9kZn/23/8l6T7gOFJjtJZ9fyrpQeArpN6k0b4pe30/Yus1/ZCtX15UfJhIFQKuMLO5Dexbciew\nxsz+qcK2XwJzJB1nZt/ZhjpCCCGEEEKZnujBGi7peF++EHi8bPsi4GRJg/xb9wuAx3y4XV8z+y3w\nI2CUmW0EXpX0NdjyVLbdswczs9szDzJYX7Zts5kdDDwDfCMn3j8AYzNzaAbmlCs1SvYG3vDG1anA\n8AplujIHuHLLDtJRNezTOZg0d+uLwFWZdUOBTWZ2H3AT6SELkIYCloZcPk3K/0BJu5IaOZWOf3l2\nnlSO+aQhoKV5RANKu/vvOcC3S3OYJA3zuUkA9wPnA+eSGlt55fcrO2ZVkvr6/YSf41eBZ/31Oco8\naS9n/0+a2XNmdiOwGDi8SpVrgWN8+dxa48yYA4yXtIvXf2j5fV4l3snAXmZ2dU6Ra4BLonHVs2IM\ne3Eit8WIvBYj8lqcyG0xIq/164kG1vPA5ZJWkSbW3+HrDcAbQRNJw+PagcVm9gBwINDmw6Tu9TIA\n44ArfUjeAmBwAzG9QBp21YkP6fsJqZHXDpTmIeX1xE0nPbhgOfBNYHWFMpX2L7mO9CCQFUqP6b62\nvIA/KOHOLs4H4GrSwysW+0MUWoAjSHO62oEfA6Xeq7uARyTN9/xPIs0dexxY1enIyeHAX6rEMAE4\nVdIKUiN2pK8vXeu5pGFrT3mZmfg8Lc/7AGCdmb3eRfkB2WNmSRrqPU3l/obUY7OMNNRwnecA0jy5\nag8TmaD0yPPlpIdfPFyhTDaea4HbJC0i9WblybsnfkW6Dkv9nriDst7mvHOVdCBpft9IbX0wx7fL\nig0EXuoirhBCCCGE0CCl6SM7F0k/APY1s4lVCwcAJM0Gxvg8rR2GpHuAq82sWuNxh+BzEFcA55nZ\nmpwyVuxfLRA74787bW1t8S1gQSK3xYi8FiPyWpzIbTEir/kkYWadRlX1RA/W9mgWcKIyf2g4dM3M\nzt7RGlcAZjZuJ2pcHUbqJW4n9eKGEEIIIYRutlP2YIUQ8qUerOIMHjyc9etfLrKKEEIIIYTC5fVg\n9cRTBEMIHzPxxUsIIYQQQmN21iGCIYTQo+LviBQncluMyGsxIq/FidwWI/Jav2hghRBCCCGEEEI3\niTlYIYQOJFn8uxBCCCGE0LV4imAIIYQQQgghFCwaWCGE0ANiDHtxIrfFiLwWI/JanMhtMSKv9YsG\nVgghhBBCCCF0k5iDFULooLv+DtbgAwezft367jhUCCGEEMJ2J28OVjSwQggdSDJauuFALfH3tEII\nIYSw44qHXIQQQi+KMezFidwWI/JajMhrcSK3xYi81q/QBpak4ZJW5mxrlTSqyPrzSDpI0lJJD2fW\nre2NWPJIOkXS1BrK1R23pKsk7Zaz7WJJt/lys6RxVY51saTmKmXeqzfGakrH9HustYbyrZKel9Tu\n136/KuW7zL9vf6DOmPtJmuv1j5V0kqRn/fXhee+VzP61nusoSSskvSDplsz6w7y+GfXEHUIIIYQQ\natcTPVjb4xihc4BHzezLmXXbY5y1xNRI3BOA/g3s12gMReTWcpa7coGZHW1mo8zs/9ZZRyPby40C\nzOufCVwEXG9mo4BNNR6vljL/AlxiZocBh0k6g1TxC2b2GeAISYfUGXvYRk1NTb0dwg4rcluMyGsx\nIq/FidwWI/Jav55oYO0qaZqkVZLur9RzIukC/8Z9haQbfF0fSVN93XJJV/n6Ed4LsEzSMw1+UNwH\neKNs3ZuZeMZ5ne2SfuPrpkq6VdICSS9JGuPr95A0z2NZLulsXz9c0mrfb42k6ZJO9/3XSPqcl+sv\n6W5JCyUtkXSWh/EB8E4N5/KmH2dSpndmnR+zv6QHff0K7zW5AhgGtEqa7/t+y2NaCJyYOfZG0gf/\nrmzyckg6QNIsvzbtkj5fSmkmt9dIWuRlmn3dFEnjM2WaJX0vr3yZj4C3asgT1He/b8m/91aVcrtE\n0h5eZoCkmX6d783Ev1bSIF8+xnvP9gfuBY7141wGfB24Lruv79NH0o2SnvbzvrTWc5U0BBhgZot9\n1T2kLxSyXie9B0IIIYQQQjfbpQfq+BTwLTNbKOluYDzw89JGSUOBG4CjgQ3AXG+krAMONLPPerm9\nfJfppG/9Z0vqR2ONxL7A5uwKMzve6xkJ/BA4wczelpT9IDrEzE6U9GlgNjAL+CtwjpltlLQvsNC3\nAYwAzjWzVZKeAc73/c/2OsYA/wjMN7NLJO0NLJI0z8yeAp7ymI4BvmNml5WfSCluM2sGmv0Y/wH8\nM/Al4DUz+6ofZ4CZvSfpaqDJz28I0ELK/7tAG7DUj3lztUSa2f2Zl7cBbWY2RpKAPUvFvP7TgUPN\n7DjfPlvSScAM4Bbgdi//dWB0XnkzewJvtJnZOuA8P/5Q4K7S+Vbwa0n/Dcwys8lVzmtL/oHvA+PN\n7ClJ/UnXHOAoYCSwHlgg6Qtm9iSde5nMzN6U9A/A982s1Ag/AXjAzGZJGp4pfwmwwcyO93t8gaRH\nzeyVGs71QNJ7p2Sdr8vaTHoP5MsORDwYiP6ubdbW1hbfAhYkcluMyGsxIq/FidwWI/K6VVtbW01z\n0nqigfUnM1voy9OAK8g0sIBjgVYzewtA0nTgZGAycIikW4HfA49K2hMYZmazAczsg3qD8Q/qR3os\nlZwGzDSzt72ODZltv/N1qyUdUDokMEXSyaQPrsMy29aa2Spffg6Y58srSR9bAUYDZ0n6gb/uBxwE\nrClVamZLgE6NqxzTgJvNrF3SRuBnkqYAD3nDpBRzqVfpeDrmfwZwaI11lTsN+HuP2YDyuVejgdMl\nLfX69yA1oKZK2t8bewcAb5nZa5ImVCoPPEEFZvZnIK9xdaGZ/dl7n2ZJ+qaZ5d0D5RYAv/B7c5bH\nBrDI60TSMtI1fZJMj12DRpOG8Y3113uRzvuVUoEq51rNOtJ74JncEqc2eOQQQgghhB1UU1NTh8bm\npEmTKpbriQZWp2/zK5Tp9IHUzDZIOhI4A/guMJY0d6jLD68+1OxSr+dMM1uf2dYH+CPwPvBQHedQ\n8n6FmC8C9gOONrPNSg+d2K1C+c2Z15vZmnuRerlebCCeDiS1kBq09wCY2YtKDxI5E5jsPWOVem62\ntUFQUm1+kIApZnZXhW0zSdd4CKlHq1r5uuY/lRpCZvZfku4DjiO/kV2+708lPQh8hdSbNNo3Za/v\nR2y9ph+ytWe14sNEqhBwhZnNbWDf14C/zbz+hK/L+iUwR9JxZvadBuoIDYhv/4oTuS1G5LUYkdfi\nRG6LEXmtX0/MwRou6XhfvhB4vGz7IuBkSYMk9QUuAB7z4XZ9zey3wI+AUWa2EXhV0tdgy1PZds8e\nzMxuzzzIYH3Zts1mdjDpm/tv5MT7B2BsZg7NwJxypUbJ3sAb3rg6FRheoUxX5gBXbtlBOqqGfToH\nk+ZufRG4KrNuKLDJzO4DbiI9ZAHSUMDSkMunSfkfKGlXUiOn0vEvz86TyjGfNAS0NI9oQGl3/z0H\n+HZpDpOkYT43CeB+4HzgXFJjK6/8fmXHrEpSX7+f8HP8KvCsvz5H0vVV9v+kmT1nZjcCi4HDq1S5\nFjjGl8+tNc6MOcB4Sbt4/YeW3+d5/J5/R1JpWOU44N/Lil1DeghGNK5CCCGEELpZTzSwngcul7SK\nNLH+Dl9vsOUD4UTS3J92YLGZPUCaN9ImqZ30cICJvt844EpJy0lDtwY3ENMLwKBKG3xI309Ijbx2\noDQPKa8nbjrpwQXLgW8CqyuUqbR/yXWkB4GsUHpM97XlBZQelHBnF+cDcDXp4RWLlR6i0AIcQZrT\n1Q78mDTsEuAu4BFJ8z3/k0hzxx4HVnU6cnI48JcqMUwATpW0gtSIHenrS9d6LnAf8JSXmYnP0/K8\nDwDWmdnrXZQfkD1mlqSh3tNU7m9IPTbLSPPL1nkOIM2Tq/YwkQmSVvo1/gB4uEKZbDzXArdJWkTq\nzcqTd0/8inQdlvo9cQdlvc1dnCvA5cDdpPv8RTN7pGz7QOClLuIKBYi/I1KcyG0xIq/FiLwWJ3Jb\njMhr/ZSmyuxcfL7TvmY2sWrhAICk2cAYM+uqwfCxI+ke4Gozq9Z43CF4r9YK4DwzW5NTxmjphspa\nYGf89yVPTBIuTuS2GJHXYkReixO5LUbkNZ8kzKzTqKqdtYE1Avg1sLHsb2GFsMOSdBhpKOYK4GLL\nefNL6pZ/FAYfOJj169ZXLxhCCCGE8DEUDawQQk0k5bW9QgghhBCCy2tg9cQcrBBC2OnFGPbiRG6L\nEXktRuS1OJHbYkRe6xcNrBBCCCGEEELoJjFEMITQQQwRDCGEEEKoLoYIhhBCCCGEEELBooEVQgg9\nIMawFydyW4zIazEir8WJ3BYj8lq/aGCFEEIIIYQQQjeJOVghhA666+9g7QgGDx7O+vUv93YYIYQQ\nQtgOxd/BCiHUJDWw4t+FRMS/kSGEEEKoJB5yEUIIvSjGsBcncluMyGsxIq/FidwWI/Jav0IbWJKG\nS1qZs61V0qgi688j6SBJSyU9nFm3tjdiySPpFElTayhXd9ySrpK0W862iyXd5svNksZVOdbFkpqr\nlHmv3hirKR3T77HWGso/LKld0rOSfiVplyrlu8y/b3+gzpj7SZrr995YSSd5PEslHZ73XsnsX/Vc\nJe0u6UFJqyWtlHR9ZtthXt+MeuIOIYQQQgi164kerO1xfM05wKNm9uXMuu0xzlpiaiTuCUD/BvZr\nNIYicms5y3nGmtnRZvYZYB/gG3XW0cj2cqMAM7NRZjYTuAi43sxGAZtqPF4tZW4ys08DRwMnSTqD\nVPELfv5HSDqkztjDNmpqaurtEHZYkdtiRF6LEXktTuS2GJHX+vVEA2tXSdMkrZJ0f6WeE0kXSFrh\nPzf4uj6Spvq65ZKu8vUjvBdgmaRnGvyguA/wRtm6NzPxjPM62yX9xtdNlXSrpAWSXpI0xtfvIWme\nx7Jc0tm+frj3IkyVtEbSdEmn+/5rJH3Oy/WXdLekhZKWSDrLw/gAeKeGc3nTjzPJ410qaZ0fs7/3\nZrR7HsdKugIYBrRKmu/7fstjWgicmDn2RtIH/65s8nJIOkDSLL827ZI+X0ppJrfXSFrkZZp93RRJ\n4zNlmiV9L698mY+At6olycxKMe4K9AP+UmWXLfn33qpSbpdI2sPLDJA006/zvZn410oa5MvHKPXW\n7g/cCxzrx7kM+DpwXXZf36ePpBslPe3nfWmt52pmm8zsMV/+EFgKfKKs2Ouk90AIIYQQQuhuZlbY\nDzAc2Ax83l/fDXzPl1tJ3+gPBV4BBpEafPOBs33bo5lj7eW/FwJn+3I/YLcG4poETMjZNhJ4Hhjo\nr/fx31OBGb78aeBFX+4L7OnL+2bWDyd9SB/pr58B7vbls4FZvvwT4EJf3htYA+xeFtMxwJ01ntve\nwHJS78UY4JeZbQP89x8z5zckk/9dgCeA2xq83v8GXOnLytT3rv8+vRSPb38AOAk4CmjLHOc54MC8\n8v76vQr1DwUe7CK+R0gNqxl1ntds4ARf7u/36SnA216ngCeBL2TyOyhz7f7gy6cAszPHnQqMydwv\nK3z5UuCHmXt8MTC8nnMt3bvAfwIHl62fD3yui/0MmjM/rQa2k/5g3aW1tbXbjhU6itwWI/JajMhr\ncSK3xYi8btXa2mrNzc1bfvxzQqfPUl3OQ+kmfzKzhb48DbgC+Hlm+7FAq5m9BSBpOnAyMBk4RNKt\nwO+BRyXtCQwzs9mkM/qg3mAkCTjSY6nkNGCmmb3tdWzIbPudr1st6YDSIYEpkk4mNSaHZbatNbNV\nvvwcMM+XVwIH+/Jo4CxJP/DX/YCDSA0tvL4lwGU1nuI04GYza5e0EfiZpCnAQ2b2RCbmUq/S8XTM\n/wzg0BrrKnca8PceswHlc69GA6dLWur17wEcamZTJe0vaQhwAPCWmb0maUKl8qRGYCdm9mfgq3nB\nmdmXJPUD7pc0zszuqfG8FgC/8HtzlscGsMjrRNIy0jV9kkyPXYNGk4bxjfXXe5HO+5XMuXR5rpL6\nAvcBt5jZy2Wb15HeA8/kh9BSf9QhhBBCCDuwpqamDkMmJ02aVLFcTzSwyueMVJpD0ukDqZltkHQk\ncAbwXWAsae5Qlx9efajZpV7PmWa2PrOtD6l34X3goTrOoeT9CjFfBOwHHG1mm5UeOrFbhfKbM683\nszX3As41sxcbiKcDSS2kBu09AGb2otKDRM4EJkuaZ2aTK+26rXW7Ste2vJ4pZnZXhW0zSdd4CDCj\nhvLV6qocoNkHkv4PcBxQUwPLzH4q6UHgK8ACSaN9U/b6fsTWa/ohW4ffVnyYSBUCrjCzuQ3sW3In\nsMbM/qnCtl8CcyQdZ2bf2YY6Qh1iDHtxIrfFiLwWI/JanMhtMSKv9euJOVjDJR3vyxcCj5dtXwSc\nLGmQf+t+AfCYpH2Bvmb2W+BHwChL82helfQ12PJUtt2zBzOz2y09zGBUtnHl2zab2cGkb+7zHnLw\nB2BsZg7NwJxypUbJ3sAb3rg6lTTUq7xMV+YAV27ZQTqqhn06B5Pmbn0RuCqzbiiwyczuA24iDbsE\neJfUKwLwNCn/A31+0lgqkHR5dp5UjvnAeC/fR9KA0u7+ew7w7dIcJknDfG4SwP3A+cC5pMZWXvn9\nyo5ZldI8uSG+vAupobTMX5+jzJP2cvb/pJk9Z2Y3kobrHV6lyrWkoYH4+dRrDjDeY0XSoeX3eZV4\nJ5OG1F6dU+Qa4JJoXIUQQgghdL+eaGA9D1wuaRVpTsgdvj5N9kiNoIlAG9AOLDazB0hzcNoktZMe\nDjDR9xsHXClpOWno1uAGYnqBNOeoEx/S9xNSI68duDkbb7ao/55OenDBcuCbwOoKZSrtX3Id6UEg\nK5Qe031teQF/UMKdXZwPwNWkh1cs9ocotABHAIv8PH5MGnYJcBfwiKT5nv9JpLltjwOrOh05OZzq\nD4aYAJwqaQWpETvS15eu9VzSsLWnvMxMYE/ftgoYAKwzs9e7KD8ge8wsSUO9p6ncHsBsH8a3BHgV\n+FffNoLqDxOZoPTI8+WkeXUPVyiTjeda4DZJi0i9WXny7olfka7DUr8n7qCstznvXCUdCPwQGJl5\nMMe3y4oNBF7qIq5QgPg7IsWJ3BYj8lqMyGtxIrfFiLzWT2mqzM7F5zvta2YTqxYOAEiaTXogQ1cN\nho8dSfcAV5tZtcbjDsHnIK4AzjOzNTllbPv8qwW9QXTXv5FtbW0xzKIgkdtiRF6LEXktTuS2GJHX\nfJIws06jqnbWBtYI4NfARuv4t7BC2GFJOow0FHMFcLHlvPlTAysADB48nPXrX+7tMEIIIYSwHYoG\nVgihJpLy2l4hhBBCCMHlNbB6Yg5WCCHs9GIMe3Eit8WIvBYj8lqcyG0xIq/1iwZWCCGEEEIIIXST\nGCIYQugghgiGEEIIIVQXQwRDCCGEEEIIoWDRwAohhB4QY9iLE7ktRuS1GJHX4kRuixF5rV80sEII\nIYQQQgihm8QcrBBCBzvj38EafOBg1q9b39thhBBCCOFjJP4OVgihJpKMlt6Oooe1QPxbGEIIIYR6\nxEMuQgihF8UY9uJEbosReS1G5LU4kdtiRF7rV2gDS9JwSStztrVKGlVk/XkkHSRpqaSHM+vW0G7j\nMgAAIABJREFU9kYseSSdImlqDeXqjlvSVZJ2y9l2saTbfLlZ0rgqx7pYUnOVMu/VG2M1pWP6PdZa\nQ/mHJbVLelbSryTtUqV8l/n37Q/UGXM/SXP93hsr6SSPZ6mkw/PeK5n9az3XUZJWSHpB0i2Z9Yd5\nfTPqiTuEEEIIIdSuJ3qwtsdxN+cAj5rZlzPrtsc4a4mpkbgnAP0b2K/RGIrIreUs5xlrZkeb2WeA\nfYBv1FlHI9vLjQLMzEaZ2UzgIuB6MxsFbKrxeLWU+RfgEjM7DDhM0hmkil/w8z9C0iF1xh62UVNT\nU2+HsMOK3BYj8lqMyGtxIrfFiLzWrycaWLtKmiZplaT7K/WcSLrAv3FfIekGX9dH0lRft1zSVb5+\nhPcCLJP0TIMfFPcB3ihb92YmnnFeZ7uk3/i6qZJulbRA0kuSxvj6PSTN81iWSzrb1w+XtNr3WyNp\nuqTTff81kj7n5fpLulvSQklLJJ3lYXwAvFPDubzpx5nk8S6VtM6P2V/Sg75+hfeaXAEMA1olzfd9\nv+UxLQROzBx7I+mDf1c2eTkkHSBpll+bdkmfL6U0k9trJC3yMs2+boqk8ZkyzZK+l1e+zEfAW9WS\nZGalGHcF+gF/qbLLlvx7b1Upt0sk7eFlBkia6df53kz8ayUN8uVjlHpr9wfuBY7141wGfB24Lruv\n79NH0o2SnvbzvrTWc5U0BBhgZot91T2kLxSyXie9B0IIIYQQQjfrcphUN/kU8C0zWyjpbmA88PPS\nRklDgRuAo4ENwFxvpKwDDjSzz3q5vXyX6aRv/WdL6kdjjcS+wObsCjM73usZCfwQOMHM3paU/SA6\nxMxOlPRpYDYwC/grcI6ZbZS0L7DQtwGMAM41s1WSngHO9/3P9jrGAP8IzDezSyTtDSySNM/MngKe\n8piOAb5jZpeVn0gpbjNrBpr9GP8B/DPwJeA1M/uqH2eAmb0n6Wqgyc9vCNBCyv+7QBuw1I95c7VE\nmtn9mZe3AW1mNkaSgD1Lxbz+04FDzew43z5b0knADOAW4HYv/3VgdF55M3sCb7SZ2TrgPD/+UOCu\n0vmWk/QIcCwwz8weqXJeW/IPfB8Yb2ZPSepPuuYARwEjgfXAAklfMLMn6dzLZGb2pqR/AL5vZqVG\n+AnAA2Y2S9LwTPlLgA1mdrzf4wskPWpmr9RwrgeS3jsl63xd1mbSeyBfdiDiwUD0d22ztra2+Baw\nIJHbYkReixF5LU7kthiR163a2tpqmpPWEw2sP5nZQl+eBlxBpoFF+sDbamZvAUiaDpwMTAYOkXQr\n8HvgUUl7AsPMbDaAmX1QbzD+Qf1Ij6WS04CZZva217Ehs+13vm61pANKhwSmSDqZ9MF1WGbbWjNb\n5cvPAfN8eSXpYyvAaOAsST/w1/2Ag4A1pUrNbAnQqXGVYxpws5m1S9oI/EzSFOAhb5iUYi71Kh1P\nx/zPAA6tsa5ypwF/7zEbUD73ajRwuqSlXv8epAbUVEn7e2PvAOAtM3tN0oRK5YEnqMDM/gxUbFz5\n9i95g+V+SePM7J4az2sB8Au/N2d5bACLvE4kLSNd0yfJ9Ng1aDRpGN9Yf70X6bxfyZxLl+daxTrS\ne+CZ3BKnNnjkEEIIIYQdVFNTU4fG5qRJkyqW64kGVqdv8yuU6fSB1Mw2SDoSOAP4LjCWNHeoyw+v\nPtTsUq/nTDNbn9nWB/gj8D7wUB3nUPJ+hZgvAvYDjjazzUoPnditQvnNmdeb2Zp7kXq5Xmwgng4k\ntZAatPcAmNmLSg8SOROY7D1jkyvtuq11u2rzgwRMMbO7KmybSbrGQ0g9WtXKNzSvy8w+kPR/gONI\nw+dq2eenkh4EvkLqTRrtm7LX9yO2XtMP2dqzWvFhIlUIuMLM5jaw72vA32Zef8LXZf0SmCPpODP7\nTgN1hAbEt3/FidwWI/JajMhrcSK3xYi81q8n5mANl3S8L18IPF62fRFwsqRBkvoCFwCP+XC7vmb2\nW+BHwCifR/OqpK/Blqey7Z49mJnd7g8zGJVtXPm2zWZ2MOmb+7yHHPwBGJuZQzMwp1ypUbI38IY3\nrk4Fhlco05U5wJVbdpCOqmGfzsGkuVtfBK7KrBsKbDKz+4CbSA9ZgDQUsDTk8mlS/gf6/KSxVCDp\n8uw8qRzzSUNAS/OIBpR2999zgG+X5jBJGuZzkwDuB84HziU1tvLK71d2zKqU5skN8eVdSA2lZf76\nHEnXV9n/k2b2nJndCCwGDq9S5VrgGF8+t9Y4M+YA4z1WJB1afp/n8Xv+HUmlYZXjgH8vK3YN6SEY\n0bgKIYQQQuhmPdHAeh64XNIq0sT6O3y9wZYPhBNJc3/agcVm9gBp3kibpHbSwwEm+n7jgCslLScN\n3RrcQEwvAIMqbfAhfT8hNfLagdI8pLyeuOmkBxcsB74JrK5QptL+JdeRHgSyQukx3deWF1B6UMKd\nXZwPwNWkh1csVnqIQgtwBGlOVzvwY9KwS4C7gEckzff8TyLNHXscWNXpyMnhVH8wxATgVEkrSI3Y\nkb6+dK3nAvcBT3mZmfg8Lc/7AGCdmb3eRfkB2WNmSRrqPU3l9iDN31oGLAFeBf7Vt42g+sNEJkha\n6df4A+DhCmWy8VwL3CZpEak3K0/ePfEr0nVY6vfEHZT1NndxrgCXA3eT7vMXK8w3Gwi81EVcoQDx\nd0SKE7ktRuS1GJHX4kRuixF5rZ/SVJmdi8932tfMJlYtHACQNBsYY2ZdNRg+diTdA1xtZtUajzsE\n79VaAZxnZmtyyhgtPRpW72uBov8tjEnCxYncFiPyWozIa3Eit8WIvOaThJl1GlW1szawRgC/BjaW\n/S2sEHZYkg4jDcVcAVxsOW9+STvdPwqDDxzM+nXrqxcMIYQQQnDRwAoh1ERSXtsrhBBCCCG4vAZW\nT8zBCiGEnV6MYS9O5LYYkddiRF6LE7ktRuS1ftHACiGEEEIIIYRuEkMEQwgdxBDBEEIIIYTqYohg\nCCGEEEIIIRQsGlghhNADYgx7cSK3xYi8FiPyWpzIbTEir/WLBlYIIYQQQgghdJOYgxVC6GBn/DtY\nYccxePBw1q9/ubfDCCGEsBOIv4MVQqhJamDFvwvh40rE/2shhBB6QjzkIoQQelVbbwewA2vr7QB2\nSDHvohiR1+JEbosRea1foQ0sScMlrczZ1ippVJH155F0kKSlkh7OrFvbG7HkkXSKpKk1lKs7bklX\nSdotZ9vFkm7z5WZJ46oc62JJzVXKvFdvjNWUjun3WGsN5SdL+pOkd2s8fpf59+0P1B4xSOonaa7f\ne2MlnSTpWX99eN57JbN/1XOVtLukByWtlrRS0vWZbYd5fTPqiTuEEEIIIdSuJ3qwtsexGucAj5rZ\nlzPrtsc4a4mpkbgnAP0b2K/RGIrIreUs55kNHLsNdTSyvdwowMxslJnNBC4CrjezUcCmGo9XS5mb\nzOzTwNHASZLOIFX8gpl9BjhC0iF1xh62WVNvB7ADa+rtAHZITU1NvR3CDinyWpzIbTEir/XriQbW\nrpKmSVol6f5KPSeSLpC0wn9u8HV9JE31dcslXeXrR3gvwDJJzzT4QXEf4I2ydW9m4hnndbZL+o2v\nmyrpVkkLJL0kaYyv30PSPI9luaSzff1w70WYKmmNpOmSTvf910j6nJfrL+luSQslLZF0lofxAfBO\nDefyph9nkse7VNI6P2Z/781o9zyOlXQFMAxolTTf9/2Wx7QQODFz7I2kD/5d2eTlkHSApFl+bdol\nfb6U0kxur5G0yMs0+7opksZnyjRL+l5e+TIfAW9VS5KZLTKz16uVy9iSf++tKuV2iaQ9vMwASTP9\nOt+biX+tpEG+fIxSb+3+wL3AsX6cy4CvA9dl9/V9+ki6UdLTft6X1nquZrbJzB7z5Q+BpcAnyoq9\nTnoPhBBCCCGE7mZmhf0Aw4HNwOf99d3A93y5lfSN/lDgFWAQqcE3Hzjbtz2aOdZe/nshcLYv9wN2\nayCuScCEnG0jgeeBgf56H/89FZjhy58GXvTlvsCevrxvZv1w0of0kf76GeBuXz4bmOXLPwEu9OW9\ngTXA7mUxHQPcWeO57Q0sJ/VejAF+mdk2wH//MXN+QzL53wV4Aritwev9b8CVvqxMfe/679NL8fj2\nB4CTgKOAtsxxngMOzCvvr9+rUP9Q4MEqMb7bwHnNBk7w5f5+n54CvO11CngS+EImv4My1+4PvnwK\nMDtz3KnAmMz9ssKXLwV+mLnHFwPDGzjXfYD/BA4uWz8f+FwX+xk0Z35aDSx+tvkn8tgzucVC92ht\nbe3tEHZIkdfiRG6LEXndqrW11Zqbm7f8+P85lP/sQvH+ZGYLfXkacAXw88z2Y4FWM3sLQNJ04GRg\nMnCIpFuB3wOPStoTGGZms0ln9EG9wUgScKTHUslpwEwze9vr2JDZ9jtft1rSAaVDAlMknUxqTA7L\nbFtrZqt8+Tlgni+vBA725dHAWZJ+4K/7AQeRGlp4fUuAy2o8xWnAzWbWLmkj8DNJU4CHzOyJTMyl\nXqXj6Zj/GcChNdZV7jTg7z1mA8rnXo0GTpe01OvfAzjUzKZK2l/SEOAA4C0ze03ShErlSY3ATszs\nz8BXG4y9KwuAX/i9OctjA1jkdSJpGemaPkmmx65Bo0nD+Mb6671I5/1KqUC1c5XUF7gPuMXMXi7b\nvI70HngmP4SW+qMOIYQQQtiBNTU1dRgyOWnSpIrleqKBZVVeQ4UPpGa2QdKRwBnAd4GxpLlDXX54\n9aFml3o9Z5rZ+sy2PqTehfeBh+o4h5L3K8R8EbAfcLSZbVZ66MRuFcpvzrzezNbcCzjXzF5sIJ4O\nJLWQGrT3AJjZi0oPEjkTmCxpnplNrrTrttbtKl3b8nqmmNldFbbNJF3jIcCMGspXq6vbmNlPJT0I\nfAVYIGm0b8pe34/Yek0/ZOvw24oPE6lCwBVmNreReN2dwBoz+6cK234JzJF0nJl9ZxvqCHVp6u0A\ndmBNvR3ADinmXRQj8lqcyG0xIq/164k5WMMlHe/LFwKPl21fBJwsaZB/634B8JikfYG+ZvZb4EfA\nKDPbCLwq6Wuw5alsu2cPZma3m9nRlh4ksL5s22YzO5j0zf03cuL9AzA2M4dmYE65UqNkb+ANb1yd\nShrqVV6mK3OAK7fsIB1Vwz6dg0lzt74IXJVZNxTYZGb3ATeRhl0CvEvqFQF4mpT/gZJ2JTVyKh3/\n8uw8qRzzgfFevo+kAaXd/fcc4NulOUyShvncJID7gfOBc0mNrbzy+5Uds14d9pN0jjJP2qu4g/RJ\nM3vOzG4kDdc7vEoda0lDAyGdT73mAOMl7eL1H1p+n1eJdzJpSO3VOUWuAS6JxlUIIYQQQvfriQbW\n88DlklaR5oTc4esNwBtBE0l/yKQdWGxmD5Dm4LRJaic9HGCi7zcOuFLSctLQrcENxPQCac5RJz6k\n7yekRl47cHM23mxR/z2d9OCC5cA3gdUVylTav+Q60oNAVig9pvva8gL+oIQ7uzgfgKtJD69Y7A9R\naAGOABb5efyYNOwS4C7gEUnzPf+TSHPbHgdWdTpycjjwlyoxTABOlbSC1Igd6etL13ouadjaU15m\nJrCnb1sFDADWmT+MIqf8gOwxsyQN9Z6mTiT9VNKrwO5Kj2v/sW8aQfWHiUxQeuT5ctK8uocrlMnG\ncy1wm6RFpN6sPHn3xK9I12Gp3xN3UNbbnHeukg4EfgiMzDyY49tlxQYCL3URVyhEW28HsANr6+0A\ndkjxt2+KEXktTuS2GJHX+ilNldm5+Hynfc1sYtXCAQBJs0kPZOiqwfCxI+ke4Gozq9Z43CH4HMQV\nwHlmtianjPXgCMydSBsxlK0obWzNrdgZ/18rQltbWwwNKkDktTiR22JEXvNJwsw6jaraWRtYI4Bf\nAxut49/CCmGHJekw0lDMFcDFlvPmjwZW+HiLBlYIIYSeEQ2sEEJNUgMrhI+nwYOHs379y70dRggh\nhJ1AXgOrJ+ZghRA+Zir9TYf42baf1tbWXo9hR/3J5jYaV90n5l0UI/JanMhtMSKv9YsGVgghhBBC\nCCF0kxgiGELoQJLFvwshhBBCCF2LIYIhhBBCCCGEULBoYIUQQg+IMezFidwWI/JajMhrcSK3xYi8\n1i8aWCGEEEIIIYTQTWIOVgihg5iDFUIIIYRQXd4crF16I5gQwvZN6vRvRQg7ncEHDmb9uvW9HUYI\nIYSPmejBCiF0IMlo6e0odkBrgUN6O4gdVFG5bUl/E25n1dbWRlNTU2+HscOJvBYncluMyGu+eIpg\nCCGEEEIIIRSs0AaWpOGSVuZsa5U0qsj680g6SNJSSQ9n1q3tjVjySDpF0tQaytUdt6SrJO2Ws+1i\nSbf5crOkcVWOdbGk5ipl3qs3xmpKx/R7rLWG8pMl/UnSuzUev8v8+/YHao8YJPWTNNfvvbGSTpL0\nrL8+PO+9ktm/1nMdJWmFpBck3ZJZf5jXN6OeuEM3id6r4kRuCxHfWBcj8lqcyG0xIq/164kerO1x\nfMU5wKNm9uXMuu0xzlpiaiTuCUD/BvZrNIYicms5y3lmA8duQx2NbC83CjAzG2VmM4GLgOvNbBSw\nqcbj1VLmX4BLzOww4DBJZ5AqfsHMPgMcISk+koYQQgghFKAnGli7SpomaZWk+yv1nEi6wL9xXyHp\nBl/XR9JUX7dc0lW+foT3AiyT9EyDHxT3Ad4oW/dmJp5xXme7pN/4uqmSbpW0QNJLksb4+j0kzfNY\nlks629cPl7Ta91sjabqk033/NZI+5+X6S7pb0kJJSySd5WF8ALxTw7m86ceZ5PEulbTOj9lf0oO+\nfoX3mlwBDANaJc33fb/lMS0ETswceyPpg39XNnk5JB0gaZZfm3ZJny+lNJPbayQt8jLNvm6KpPGZ\nMs2SvpdXvsxHwFvVkmRmi8zs9WrlMrbk33urSrldImkPLzNA0ky/zvdm4l8raZAvH6PUW7s/cC9w\nrB/nMuDrwHXZfX2fPpJulPS0n/eltZ6rpCHAADNb7KvuIX2hkPU66T0QetJ21Ue+g4ncFiL+9k0x\nIq/FidwWI/Jav554iuCngG+Z2UJJdwPjgZ+XNkoaCtwAHA1sAOZ6I2UdcKCZfdbL7eW7TCd96z9b\nUj8aayT2BTZnV5jZ8V7PSOCHwAlm9rak7AfRIWZ2oqRPk3pEZgF/Bc4xs42S9gUW+jaAEcC5ZrZK\n0jPA+b7/2V7HGOAfgflmdomkvYFFkuaZ2VPAUx7TMcB3zOyy8hMpxW1mzUCzH+M/gH8GvgS8ZmZf\n9eMMMLP3JF0NNPn5DQFaSPl/F2gDlvoxb66WSDO7P/PyNqDNzMZIErBnqZjXfzpwqJkd59tnSzoJ\nmAHcAtzu5b8OjM4rb2ZP4I02M1sHnOfHHwrcVTrfbZHNP/B9YLyZPSWpP+maAxwFjATWAwskfcHM\nnqRzL5OZ2ZuS/gH4vpmVGuEnAA+Y2SxJwzPlLwE2mNnxfo8vkPSomb1Sw7keSHrvlKzzdVmbSe+B\nfNmBiAcTQ7BCCCGEsNNra2urqcHZEw2sP5nZQl+eBlxBpoFFGrbVamZvAUiaDpwMTAYOkXQr8Hvg\nUUl7AsPMbDaAmX1QbzD+Qf1Ij6WS04CZZva217Ehs+13vm61pANKhwSmSDqZ9MF1WGbbWjNb5cvP\nAfN8eSXpYyvAaOAsST/w1/2Ag4A1pUrNbAnQqXGVYxpws5m1S9oI/EzSFOAhb5iUYi71Kh1Px/zP\nAA6tsa5ypwF/7zEbUD73ajRwuqSlXv8epAbUVEn7e2PvAOAtM3tN0oRK5YEnqMDM/gxsc+OqggXA\nL/zenOWxASzyOpG0jHRNnyTTY9eg0aRhfGP99V6k836lVGAbz3Ud6T3wTG6JUxs8csgXjdTiRG4L\nEfMuihF5LU7kthiR162ampo65GPSpEkVy/VEA6vTt/kVynT6QGpmGyQdCZwBfBcYS5o71OWHVx9q\ndqnXc6aZrc9s6wP8EXgfeKiOcyh5v0LMFwH7AUeb2Walh07sVqH85szrzWzNvUi9XC82EE8HklpI\nDdp7AMzsRaUHiZwJTPaescmVdt3Wul21+UECppjZXRW2zSRd4yGkHq1q5XtszpyZ/VTSg8BXSL1J\no31T9vp+xNZr+iFbe1YrPkykCgFXmNncBvZ9DfjbzOtP+LqsXwJzJB1nZt9poI4QQgghhJCjJ+Zg\nDZd0vC9fCDxetn0RcLKkQZL6AhcAj/lwu75m9lvgR8AoM9sIvCrpa7DlqWy7Zw9mZreb2dH+IIH1\nZds2m9nBpG/uv5ET7x+AsZk5NANzypUaJXsDb3jj6lRgeIUyXZkDXLllB+moGvbpHEyau/VF4KrM\nuqHAJjO7D7iJ9JAFSEMBS0Munyblf6CkXUmNnErHvzw7TyrHfNIQ0NI8ogGl3f33HODbpTlMkob5\n3CSA+4HzgXNJja288vuVHbNeHfaTdI6k67vcQfqkmT1nZjcCi4HDq9SxFjjGl89tIMY5wHhJu3j9\nh5bf53n8nn9HUmlY5Tjg38uKXUN6CEY0rnpSzBMqTuS2EDHvohiR1+JEbosRea1fTzSwngcul7SK\nNLH+Dl9vsOUD4UTS3J92YLGZPUCaN9ImqZ30cICJvt844EpJy0lDtwY3ENMLwKBKG3xI309Ijbx2\noDQPKa8nbjrpwQXLgW8CqyuUqbR/yXWkB4GsUHpM97XlBfxBCXd2cT4AV5MeXrHYH6LQAhxBmtPV\nDvyYNOwS4C7gEUnzPf+TSHPHHgdWdTpycjjwlyoxTABOlbSC1Igd6etL13oucB/wlJeZic/T8rwP\nANaVHkaRU35A9phZkoZ6T1Mnkn4q6VVgd6XHtf/YN42g+sNEJkha6df4A+DhCmWy8VwL3CZpEak3\nK0/ePfEr0nVY6vfEHZT1Nnd1rsDlwN2k+/xFM3ukbPtA4KUu4gohhBBCCA3SzvhX6n2+075mNrFq\n4QCApNnAGDPrqsHwsSPpHuBqM6vWeNwheK/WCuA8M1uTU8Zo6dGwQtg+tcDO+H9kCCGE2kjCzDqN\nqtpZG1gjgF8DG8v+FlYIOyxJh5GGYq4ALracN7+kne8fhRAqGHzgYNavW1+9YAghhJ1SXgOrJ4YI\nbnfM7D/N7O+icRV2Jv6Hho8ys3F5jatM2fjp5p/W1tZej2FH/Skqtzt74yrmXRQj8lqcyG0xIq/1\n2ykbWCGEEEIIIYRQhJ1yiGAIIZ8ki38XQgghhBC6FkMEQwghhBBCCKFg0cAKIYQeEGPYixO5LUbk\ntRiR1+JEbosRea1fNLBCCCGEEEIIoZvEHKwQQgcxByuEEEIIobq8OVi79EYwIYTtW/p7xD1v8ODh\nrF//cq/UHUIIIYTQHWKIYAihAuuVn9dff6VHzq43xBj24kRuixF5LUbktTiR22JEXusXDawQQggh\nhBBC6Cbd0sCSNFzSypxtrZJGdUc99ZJ0kKSlkh7OrFvbG7HkkXSKpKk1lFvrv3NzXVZ+gKRXJd2W\nPYakQXXEVjVXfn0P6mL7xZL+qdY6a4zr4tJ5SWqWNK5K+WMltfvPcknfqKGOqZJOrrJ9TJ1xnyTp\nWb8n/0bSTZJWSvqpn8f3quxfy7l+UdIzfp6LJZ2a2fZ9Sc/Xcv6h+zU1NfV2CDusyG0xIq/FiLwW\nJ3JbjMhr/bpzDtb2OCv+HOBRM5uYWbc9xllLTJaznOc64LEG6tmW8kUfp1ErgWPMbLOkIcCzkv63\nmX3Uw3FcBFxvZvcBSLoUGGhmJqm5m+p4E/iqma2X9D+AOcAnAMzsZklPADcBM7qpvhBCCCGEkNGd\nQwR3lTRN0ipJ90varbyApAskrfCfG3xdH+8NWOHful/l60dImitpmX8jf0gDMe0DvFG27s1MPOO8\nznZJv/F1UyXdKmmBpJdKvRSS9pA0L9M7cLavHy5pte+3RtJ0Saf7/mskfc7L9Zd0t6SFkpZIOsvD\n+AB4p4ZzebN8haS7Mj0zb0j6X77+GOAA4NHyXYArvf7lkg7LnNu/+jVYJul/5tVZwV+Aj/w4X/Jj\nL5M0t0K8+0n635Ke9p8TlKyVtFem3AuS9q9UvkL9G4FNXQVoZn81s83+cnfgnRoaVxtI1wZJN3jP\n0zJJN2bKnFLhPjlF0gOZc/knv88u+X/s3X28XeOd///XWyZpSkhECNLmRvodlY6kSaiaKkfRjpmh\nqqXUXfvwQItSN50avRGlqlW+37T90dGaNCWtikGjmhE0J+0DKXIrEtFqOqhJUKIyo5Tz/v2xrs06\n+6x9d5yVHSef5+NxHvZe67PW+lyfvU/s61zXtTZwJHCRpGsl/QwYAiySdERVnXaRNDeNQC2ovE7A\nC020dZnttenxQ8BgSQNzIWuBoQ3aHkoQc9jLE7UtR9S1HFHX8kRtyxF1bV1fjmDtCnzK9kJJ1wCn\nAldUdkraCbgUmEz24fWO1El5Ahhle2KKq3zQnkX21/45kgbRu87gAKArv8H2Xuk6E4Dzgb1tPydp\nWC5sR9vvk7QbMAe4CfgLcJjtDZK2AxamfQDjgY/aXinpAeCodPyh6RqHA18E7rJ9oqShwH2S7rR9\nL3BvymkqcIrtk6sbUsm7attJ6bjRwFxghiQB3yIbLTmooCZP2Z4q6TPAucDJwJeB9bnXYGitaxbk\n8LF0zAjgamAf249V1bNiOnCF7XskvR243fYESbcAHwFmSnoP8AfbT0uaVR0PTKi6/uWVx5JOyTb5\n6uoLp/P+OzAO+EQT7TorHTec7HV/Z3q+TS6s6H0CBSN2tq+RtA9wq+2b0rn+bHtKepwfwbqa7H3w\naMr7KuAA2/nfp5ptzcV8DFhs+6+5zV009Xs/Lfe4I/2EEEIIIWy+Ojs7m+pw9mUH6zHbC9Pj64DP\nkutgAXsC820/C5A+PO8LXAyMkzQd+AUwT9IQYGfbcwBsv9xqMqmjMSnlUuQDwGzbz6VrrM/tuyVt\nWyVph8opga8rW5fTBeyc27fG9sr0+CHgzvT4QWBsevxB4BBJn0/PBwGjgdWVi9peRNYDx74QAAAg\nAElEQVThaaWdg4HZwOm2n5B0GnCb7SezElB9v+2b038XkXVqAA4EXluXY7uZEbVq7wUW2H4snWN9\nQcyBwG7ptQEYImlL4AbgK8BM4Chen75WK76Q7X+rs+8+4O8k7QrcLmm+7T830a7ngRcl/QC4Dfh5\nbl/R++QNkbQV8PfA7Fy7B1bH1WtrOs+7gK/Ts5P9DLC9pGE1XqNkWvNJh6bEHPbyRG3LEXUtR9S1\nPFHbckRdX9fR0dGtHhdeeGFhXJlrsIrW3fT4ch3b6yVNAj4EfBo4AvhcUWy3E0mnAiel6/xjZVpU\n2rcF8HvgJbIPxa16qSDnY4ARwOS0lmcNMLggviv3PD9aILJRrt/2Ip96rgJutD0/Pd8b2CfVZ2uy\nqZsv2D6/KtdX6fvvQWv05UkC9qoaUQG4V9mU0BFk6+a+Wi9eb+A7mmyvlvQo8H/IOpmN4l9No0gH\nkL03T0+Pofh98grdR1t7TJVtYAvgucrIVm9IehvZaNpxtv+Q32f7RUnXA7+X9HHbPaZyhhBCCCGE\n3uvLNVhjJFWmlH0C+HXV/vuAfSUNlzQAOBpYkKbbDbB9M/AlYIrtDcDjkj4MIGmQpLfmT2b7StuT\nbU/Jd67Svi7bY4EHyI3MVPklcESaAoakbWvEVT44DyWbXtel7M5sYwpi6rkdOOO1A6R3N3FMXWm0\naojtyyrbbB9re6ztXcimAP4o17mq5Q7gtNx5e0zvU7b+bKc651gIvF/SmBRfVM95wJm5c07K7buZ\nbMRzZW5kpV580ySNTe85Un7vAH6bns9UWidX49itgGG2/xM4G5hYKzT997+ACZIGpjoeUCM+f8xr\nbL8ArEnT+yo51LpmUb5DyUbZvpAbUc7vH0b2OzEqOlcbV8xhL0/UthxR13JEXcsTtS1H1LV1fdnB\nehg4TdJKsptLfC9tN0DqBJ0HdAJLgPtt3wqMAjolLQGuTTEAx5PdkGEZcDcwshc5PQIU3pY8Ten7\nGlknbwlQWctTayRuFrBnyudYYFVBTNHxFReRjSYtV3ab9a9WB0iaKqnmmpoC5wC7K7vJxWJJjaYX\n1srtYmC4sluGL6FqwU2aqjYeeLbmie1nyKY33pzOcX1B2JnAHspusLECOCW37wayUcLrm4zvQdIp\nNWqwD7BM0uJ0nZNz0wMnAk/WOe3WwM/T6/4r4Ky0vfB9YvuJdI0VqS2Lq2PqPK84FjhR2U01VgCH\nVgfUaevpZK/VV3LvixG5/UOBdbbr3iwjhBBCCCH0jux230G7PGm903ZVt2kPLUrreT5l+9x259KX\nJG0N/MD2ZvO9UGm643TbRXdkrMS4fXfWF/3536QQQggh9B+SsN1jRlJ/72CNB34IbLB9cJvTCaGt\nJJ1DNkp4me2f1Ilr2z8KI0eOYe3aP7Tr8iGEEEIITavVwerLKYKbHNuP2n5/dK5CyG5pn9Ys1uxc\n5WLb8tOfO1cxh708UdtyRF3LEXUtT9S2HFHX1vXrDlYIIYQQQgghbEz9eopgCKF1khz/LoQQQggh\n1LdZThEMIYQQQgghhI0pOlghhLARxBz28kRtyxF1LUfUtTxR23JEXVsXHawQQgghhBBC6COxBiuE\n0E2swQohhBBCaCzWYIUQmiapz392fNuO7W5WCCGEEELpYgQrhNCNJDOthBNPy75fa3PV2dlJR0dH\nu9Pol6K25Yi6liPqWp6obTmirrXFCFYIIYQQQgghlKxPOliSxkh6sMa++ZKm9MV1WiVptKTFkubm\ntq1pRy61SNpP0owm4tak/9asdVX81pIel/Tt/DkkDW8ht4a1Sq/v6Dr7T5D0nWav2WReJ1TaJekC\nScc3iN9T0pL0s0zSx5u4xgxJ+zbYf3iLee8jaUV6T75F0mWSHpT0jdSOsxsc37CtKe5fJf1W0ipJ\nH8xtP0fSw820P/S9+OtfeaK25Yi6liPqWp6obTmirq3ryxGsTXHuz2HAPNsH57Ztink2k5NrPK7l\nImBBL67zRuLLPk9vPQhMtT0Z+BDw/0ka0IY8jgEusT3F9kvAScBE21/oqwtI2g04EtgNOBi4UpIA\nbF8OnACc1lfXCyGEEEII3fVlB2ugpOskrZR0g6TB1QGSjpa0PP1cmrZtkUYDlqfRhTPT9vGS7pC0\nVNIDksb1IqdhwFNV257O5XN8uuYSSTPTthmSpku6W9LvKqMUkraSdGfKZZmkQ9P2MWmkYIak1ZJm\nSTooHb9a0h4pbktJ10haKGmRpENSGi8DzzfRlqerN0j6fm5k5ilJX07bpwI7APOqDwHOSNdfJulv\nc2379/QaLJX0kVrXLPAn4NV0nn9I514q6Y6CfEdIulHSb9LP3sqskbRNLu4RSdsXxRdcfwPwYr0E\nbf/Fdld6+lbgeduvNmjXerLXBkmXppGnpZK+mYvZr+B9sp+kW3Nt+U56n51I1vG5SNK1kn4GDAEW\nSTqiqk67SJor6X5JCyqvE/BCo7YCHwaut/2K7T8AvwXek9u/Fhja4ByhBPE9IuWJ2pYj6lqOqGt5\norbliLq27m/68Fy7Ap+yvVDSNcCpwBWVnZJ2Ai4FJpN9eL0jdVKeAEbZnpjiKh+0Z5H9tX+OpEH0\nrjM4AOjKb7C9V7rOBOB8YG/bz0kalgvb0fb70mjAHOAm4C/AYbY3SNoOWJj2AYwHPmp7paQHgKPS\n8YemaxwOfBG4y/aJkoYC90m60/a9wL0pp6nAKbZPrm5IJe+qbSel40YDc4EZabTiW2SjJQcV1OQp\n21MlfQY4FzgZ+DKwPvcaDK11zYIcPpaOGQFcDexj+7GqelZMB66wfY+ktwO3254g6RbgI8BMSe8B\n/mD7aUmzquOBCVXXv7zyWNIp2SZfXX3hdN5/B8YBn2iiXWel44aTve7vTM+3yYUVvU+gYMTO9jWS\n9gFutX1TOtefbU9Jjy/IhV9N9j54NOV9FXCA7fzvU622jiK9n5I/pm0VXTTzez8/93gsWdVCCCGE\nEDZjnZ2dTXU4+7KD9ZjthenxdcBnyXWwgD2B+bafBUgfnvcFLgbGSZoO/AKYJ2kIsLPtOQC2X241\nmdTRmJRyKfIBYLbt59I11uf23ZK2rZK0Q+WUwNeVrcvpAnbO7Vtje2V6/BBwZ3r8INnHU4APAodI\n+nx6PggYDayuXNT2IrIOTyvtHAzMBk63/YSk04DbbD+ZlYDqO5vcnP67iKxTA3Ag8Nq6HNvNjKhV\ney+wwPZj6RzrC2IOBHZLrw3AEElbAjcAXwFmAkcBP20QX8j2v9XZdx/wd5J2BW6XNN/2n5to1/PA\ni5J+ANwG/Dy3r+h98oZI2gr4e2B2rt0Dq+PqtbWBZ4DtJQ2r8Rpl9u/l2UNNMYe9PFHbckRdyxF1\nLU/UthxR19d1dHR0q8eFF15YGNeXHazqv9oXrbvpcRtD2+slTSJbG/Np4Ajgc0Wx3U4knUq2hsXA\nP9pem9u3BfB74CWyD8Wteqkg52OAEcBk213KbgAxuCC+K/c8P1ogslGu3/Yin3quAm60XRlz2BvY\nJ9Vna7Kpmy/YPr8q11fp29cfGrxmaf9etv9atf1eZVNCR5Ctm/tqvfjX+x2ts71a0qPA/yHrZDaK\nfzWNIh1A9t48PT2G4vfJK3Qfbe0xVbaBLYDnKiNbvfBH4O25529L2wCw/aKk64HfS/q47R5TOUMI\nIYQQQu/15RqsMZIqU8o+Afy6av99wL6Shiu7wcDRwII03W6A7ZuBLwFTbG8AHpf0YQBJgyS9NX8y\n21fanpxuGLC2al+X7bHAA+RGZqr8EjgiTQFD0rY14iofnIeSTa/rkrQ/MKYgpp7bgTNeO0B6dxPH\n1JVGq4bYvqyyzfaxtsfa3oVsCuCPcp2rWu4gd+ODoul9ytaf7VTnHAuB90sak+KL6jkPODN3zkm5\nfTeTjXiuzI2s1ItvmqSx6T1Hyu8dZGuTkDRTaZ1cjWO3AobZ/k/gbGBirdD03/8CJkgamOp4QI34\n/DGvsf0CsEbSx3I51LpmkTnAUel3ZhxZW+/LnWsY2e/EqOhcbVwxh708UdtyRF3LEXUtT9S2HFHX\n1vVlB+th4DRJK8luLvG9tN0AqRN0HtAJLAHut30r2fqQTklLgGtTDMDxZDdkWAbcDYzsRU6PAIW3\nJU9T+r5G1slbAlTW8tQaiZsF7JnyORZYVRBTdHzFRWSjScuV3Wb9q9UBkqZK6rF+qI5zgN2V3eRi\nsaRG0wtr5XYxMFzZLcOXAB1VeYlsndmzNU9sP0M2vfHmdI7rC8LOBPZQdoONFcApuX03kI0SXt9k\nfA+STqlRg32AZZIWp+ucnJseOBF4ss5ptwZ+nl73XwFnpe2F7xPbT6RrrEhtWVwdU+d5xbHAicpu\nqrECOLQ6oFZb0/v6BmAl2ZTbU939232HAutsN7pZRgghhBBC6AV1/+zVv6T1TtvZPq9hcKhJ0rvI\nbmBybrtz6UuStgZ+YHuz+V6oNN1xuu2iOzJWYsy0Ei4+DfrzvzchhBBC2LxIwnaPGUn9vYM1Hvgh\nsKHqu7BC2OxIOodslPAy2z+pE1fKPwojR41k7RNrGweGEEIIIbwJ1Opg9eUUwU2O7Udtvz86VyFk\nt7RPaxZrdq5ysX3+s7l3rmIOe3mituWIupYj6lqeqG05oq6t69cdrBBCCCGEEELYmPr1FMEQQusk\nOf5dCCGEEEKob7OcIhhCCCGEEEIIG1N0sEIIYSOIOezlidqWI+pajqhreaK25Yi6ti46WCGEEEII\nIYTQR2INVgihm1iDFUIIIYTQWK01WH/TjmRCCJs2qce/FWEjGTlyDGvX/qHdaYQQQgihl2KKYAih\ngOOnz3/mNxW3bt1/NfMChZxYH1COqGs5oq7lidqWI+rauuhghRBCCCGEEEIf6ZMOlqQxkh6ssW++\npCl9cZ1WSRotabGkublta9qRSy2S9pM0o4m4Nem/NWtdFb+1pMclfTt/DknDW8itYa3S6zu6zv4T\nJH2n2Ws2mdcJlXZJukDS8Q3ih0v6paQX8vVocMwMSfs22H94i3nvI2lFek++RdJlkh6U9I3UjrMb\nHN9MWw+U9ICkZZLul7R/bt85kh6W9PFW8g59paPdCfRbHR0d7U6hX4q6liPqWp6obTmirq3ryxGs\nTXFV/GHAPNsH57Ztink2k5NrPK7lImBBL67zRuLLPk9v/QX4EnBOm/M4BrjE9hTbLwEnARNtf6EP\nr/E08M+2JwGfBK6t7LB9OXACcFofXi+EEEIIIeT0ZQdroKTrJK2UdIOkwdUBko6WtDz9XJq2bZFG\nA5anv7qfmbaPl3SHpKXpL/LjepHTMOCpqm1P5/I5Pl1ziaSZadsMSdMl3S3pd5VRCklbSbozNzpw\naNo+RtKqdNxqSbMkHZSOXy1pjxS3paRrJC2UtEjSISmNl4Hnm2jL09UbJH0/5b5E0lOSvpy2TwV2\nAOZVHwKcka6/TNLf5tr27+k1WCrpI7WuWeBPwKvpPP+Qzr1U0h0F+Y6QdKOk36SfvZVZI2mbXNwj\nkrYvii+4/gbgxXoJ2v5f2/cALzXRnor1ZK8Nki5NI09LJX0zF7NfwftkP0m35trynfQ+OxE4ErhI\n0rWSfgYMARZJOqKqTrtImptGoBZUXifghSbausz22vT4IWCwpIG5kLXA0BbqEPpMZ7sT6LdifUA5\noq7liLqWJ2pbjqhr6/ryLoK7Ap+yvVDSNcCpwBWVnZJ2Ai4FJpN9eL0jdVKeAEbZnpjiKh+0Z5H9\ntX+OpEH0rjM4AOjKb7C9V7rOBOB8YG/bz0kalgvb0fb7JO0GzAFuIhsFOcz2BknbAQvTPoDxwEdt\nr5T0AHBUOv7QdI3DgS8Cd9k+UdJQ4D5Jd9q+F7g35TQVOMX2ydUNqeRdte2kdNxoYC4wQ5KAb5GN\nlhxUUJOnbE+V9BngXOBk4MvA+txrMLTWNQty+Fg6ZgRwNbCP7ceq6lkxHbjC9j2S3g7cbnuCpFuA\njwAzJb0H+IPtpyXNqo4HJlRd//LKY0mnZJt8daO8m2jXWemcw8le93em59vkworeJ1AwYmf7Gkn7\nALfavimd68+2p6THF+TCryZ7Hzya6nEVcIDt/O9Tw7ZK+hiw2PZfc5u7aOr3flrucQcxvS2EEEII\nm7vOzs6mOpx92cF6zPbC9Pg64LPkOljAnsB8288CpA/P+wIXA+MkTQd+AcyTNATY2fYcANsvt5pM\n6mhMSrkU+QAw2/Zz6Rrrc/tuSdtWSdqhckrg68rW5XQBO+f2rbG9Mj1+CLgzPX4QGJsefxA4RNLn\n0/NBwGhgdeWitheRdXhaaedgYDZwuu0nJJ0G3Gb7yawEVN9v++b030VknRqAA4HX1uXYbmZErdp7\ngQW2H0vnWF8QcyCwW3ptAIZI2hK4AfgKMBM4Cvhpg/hCtv+tF3k38jzwoqQfALcBP8/tK3qfvCGS\ntgL+Hpida/fA6rhGbZX0LuDr9OxkPwNsL2lYjdcomdZ80qFJHe1OoN+K9QHliLqWI+panqhtOaKu\nr+vo6OhWjwsvvLAwri87WNV/tS9ad9Pjy3Vsr5c0CfgQ8GngCOBzRbHdTiSdSraGxcA/VqZFpX1b\nAL8nmxJ2WwttqMhPJavkcQwwAphsu0vZDSAGF8R35Z7nRwtENsr1217kU89VwI2256fnewP7pPps\nTTZ18wXb51fl+ip9/z1ojb48ScBeVSMqAPcqmxI6gmzd3FfrxWsjfkeT7VfTKNIBZO/N09NjKH6f\nvEL30dYeU2Ub2AJ4rjKy1RuS3kY2mnac7T/k99l+UdL1wO8lfdx2j6mcIYQQQgih9/pyDdYYSZUp\nZZ8Afl21/z5gX2V3dBsAHA0sSNPtBti+mexGBFNsbwAel/RhAEmDJL01fzLbV9qenG4YsLZqX5ft\nscAD5EZmqvwSOCJNAUPStjXiKh+ch5JNr+tSdme2MQUx9dwOnPHaAdK7mzimrjRaNcT2ZZVtto+1\nPdb2LmRTAH+U61zVcge5Gx8UTe9Ttv5spzrnWAi8X9KYFF9Uz3nAmblzTsrtu5lsxHNlbmSlXnxv\ndXutJM1UWidXGJyNKA2z/Z/A2cDEBuf9L2CCpIGpjgfUiO+RC4DtF4A1aXpfJYda1yzKdyjZKNsX\nciPK+f3DyH4nRkXnamPrbHcC/VasDyhH1LUcUdfyRG3LEXVtXV92sB4GTpO0kuzmEt9L2w2QOkHn\nkX3KWALcb/tWYBTQKWkJ2R3PzkvHHU92Q4ZlwN3AyF7k9AhQeFvyNKXva2SdvCVAZS1PrZG4WcCe\nKZ9jgVUFMUXHV1xENpq0XNlt1r9aHSBpqqRW1g+dA+yu7CYXiyU1ml5YK7eLgeHKbhm+hKq5TGmq\n2njg2Zontp8hm954czrH9QVhZwJ7KLvBxgrglNy+G8hGCa9vMr4HSafUqkEacbwcOEHSY5LemXZN\nBJ6sc9qtgZ+n1/1XwFlpe+H7xPYTqS0rUlsWV8fUeV5xLHCisptqrAAOLWhPrbaeTvZafSX3vhiR\n2z8UWGe77s0yQgghhBBC78hu9x20y5PWO21n+7yGwaGmtJ7nU7bPbXcufUnS1sAPbG823wuVpjtO\nt110R8ZKjNt/Z/3NmejP/y6HEEII/YUkbPeYkdTfO1jjgR8CG6q+CyuEzY6kc8hGCS+z/ZM6cdHB\naqvoYIUQQghvBrU6WH05RXCTY/tR2++PzlUI2S3t05rFmp2r1yl+2vQzcmR+eWdoRqwPKEfUtRxR\n1/JEbcsRdW1dX99FLoTQD8QISt/r7OyMW92GEEIIm4F+PUUwhNA6SY5/F0IIIYQQ6tsspwiGEEII\nIYQQwsYUHawQQtgIYg57eaK25Yi6liPqWp6obTmirq2LDlYIIYQQQggh9JFYgxVC6CbWYIUQQggh\nNBZrsEIIIYQQQgihZNHBCiH0ICl+evGz49t2rFnTmMNenqhtOaKu5Yi6lidqW46oa+vie7BCCD1N\na3cCb07rpq1rdwohhBBCaLNYgxVC6EaSo4PVS9PiS5pDCCGEzUWpa7AkjZH0YI198yVN6YvrtErS\naEmLJc3NbVvTjlxqkbSfpBlNxK1J/61Z66r4rSU9Lunb+XNIGt5Cbg1rlV7f0XX2nyDpO81es8m8\nTqi0S9IFko5vED9c0i8lvZCvR4NjZkjat8H+w1vMex9JK9J78i2SLpP0oKRvpHac3eD4hm1Ncf8q\n6beSVkn6YG77OZIelvTxVvIOIYQQQgjN68s1WJvin20PA+bZPji3bVPMs5mcXONxLRcBC3pxnTcS\nX/Z5eusvwJeAc9qcxzHAJban2H4JOAmYaPsLfXUBSbsBRwK7AQcDV0oSgO3LgROA0/rqeqF5MYe9\nPFHbckRdyxF1LU/UthxR19b1ZQdroKTrJK2UdIOkwdUBko6WtDz9XJq2bZFGA5ZLWibpzLR9vKQ7\nJC2V9ICkcb3IaRjwVNW2p3P5HJ+uuUTSzLRthqTpku6W9LvKKIWkrSTdmXJZJunQtH1MGimYIWm1\npFmSDkrHr5a0R4rbUtI1khZKWiTpkJTGy8DzTbTl6eoNkr6fcl8i6SlJX07bpwI7APOqDwHOSNdf\nJulvc2379/QaLJX0kVrXLPAn4NV0nn9I514q6Y6CfEdIulHSb9LP3sqskbRNLu4RSdsXxRdcfwPw\nYr0Ebf+v7XuAl5poT8V6stcGSZemkaelkr6Zi9mv4H2yn6Rbc235TnqfnUjW8blI0rWSfgYMARZJ\nOqKqTrtImivpfkkLKq8T8EKjtgIfBq63/YrtPwC/Bd6T278WGNpCHUIIIYQQQgv68iYXuwKfsr1Q\n0jXAqcAVlZ2SdgIuBSaTfXi9I3VSngBG2Z6Y4ioftGeR/bV/jqRB9K4zOADoym+wvVe6zgTgfGBv\n289JGpYL29H2+9JowBzgJrJRkMNsb5C0HbAw7QMYD3zU9kpJDwBHpeMPTdc4HPgicJftEyUNBe6T\ndKfte4F7U05TgVNsn1zdkEreVdtOSseNBuYCM9JoxbfIRksOKqjJU7anSvoMcC5wMvBlYH3uNRha\n65oFOXwsHTMCuBrYx/ZjVfWsmA5cYfseSW8Hbrc9QdItwEeAmZLeA/zB9tOSZlXHAxOqrn955bGk\nU7JNvrpR3k2066x0zuFkr/s70/NtcmFF7xMoGLGzfY2kfYBbbd+UzvVn21PS4wty4VeTvQ8eTfW4\nCjjAdv73qVZbR5HeT8kf07aKLpr5vZ+fezwW6M2fN0I3HR0d7U6h34raliPqWo6oa3mituWIur6u\ns7OzqRG9vuxgPWZ7YXp8HfBZch0sYE9gvu1nAdKH532Bi4FxkqYDvwDmSRoC7Gx7DoDtl1tNJnU0\nJqVcinwAmG37uXSN9bl9t6RtqyTtUDkl8HVl63K6gJ1z+9bYXpkePwTcmR4/SPbxFOCDwCGSPp+e\nDwJGA6srF7W9iKzD00o7BwOzgdNtPyHpNOA2209mJaB64d3N6b+LyDo1AAcCr63Lsd3MiFq19wIL\nbD+WzrG+IOZAYLf02gAMkbQlcAPwFWAmcBTw0wbxhWz/Wy/ybuR54EVJPwBuA36e21f0PnlDJG0F\n/D0wO9fugdVxb6CtzwDbSxpW4zXK7N/Ls4cQQggh9FMdHR3dOpwXXnhhYVyZa7CK1t30uMtG+pA3\nCegEPg18v1ZstxNJp6apcYsl7Vi1bwtgDdk6lNuayr67/FSySh7HACOAybYnk009HFwQ35V7nh8t\nENko1+T0M872at64q4AbbVfGHPYGTpf0e7KRrOMkXVLQtlfp+9v0133N0v69cjUYnabv3QuMT6Ng\nhwH/US++j3Ouy/arZFPsbgT+GfjP3O6i98krdP+96jFVtoEtgOfSOq1Ku/+uheP/CLw99/xtaRsA\ntl8Ergd+L6lohDOUJOawlydqW46oazmiruWJ2pYj6tq6vuxgjZFUmVL2CeDXVfvvA/ZVdke3AcDR\nwII03W6A7ZvJbkQwxfYG4HFJHwaQNEjSW/Mns31l+vA5xfbaqn1dtscCD5AbmanyS+CINAUMSdvW\niKt8cB5KNr2uS9L+wJiCmHpuB8547QDp3U0cU1carRpi+7LKNtvH2h5rexeyKYA/sn1+g1PdQe7G\nB0XT+5StP9upzjkWAu+XNCbFF9VzHnBm7pyTcvtuJhvxXJkbWakX31vdXitJM5XWyRUGZyNKw2z/\nJ3A2MLHBef8LmCBpYKrjAc3mAmD7BWCNpI/lcqh1zSJzgKPS78w44B1kv3uVcw0j+50YZbvHOrkQ\nQgghhPDG9GUH62HgNEkryW4u8b203QCpE3Qe2UjVEuB+27eSrQ/plLQEuDbFABxPdkOGZcDdwMhe\n5PQIUHhb8jSl72tknbwlQGUtT62RuFnAnimfY4FVBTFFx1dcRHYjkOXKbrP+1eoASVMltbJ+6Bxg\n99xIXqPphbVyuxgYruyW4UuAjqq8RLbO7NmaJ7afIZveeHM6x/UFYWcCeyi7wcYK4JTcvhvIRgmv\nbzK+B0mn1KqBslvOXw6cIOkxSe9MuyYCT9Y57dbAz9Pr/ivgrLS98H1i+4nUlhWpLYurY+o8rzgW\nOFHZTTVWAIcWtKewrel9fQOwkmzK7anu/sVMQ4F1aSQrbEQxh708UdtyRF3LEXUtT9S2HFHX1vXr\nLxpO6522s31ew+BQk6R3kd3A5Nx259KXJG0N/MD2ZvO9UOmmGdNtF92RsRITXzTcW9Pii4ZDCCGE\nzYVqfNFwf+9gjQd+CGyo+i6sEDY7ks4hGyW8zPZP6sT1338USjZy1EjWPrG2cF9nZ2f8FbAkUdty\nRF3LEXUtT9S2HFHX2mp1sPr6JgebFNuPAu9vdx4hbArSLe0vbxhIjMKEEEIIIfRWvx7BCiG0TpLj\n34UQQgghhPpqjWD15U0uQgghhBBCCGGzFh2sEELYCOJ7RMoTtS1H1LUcUdfyRG3LEXVtXXSwQggh\nhBBCCKGPxBqsEEI3sQYrhBBCCKGxWIMVQgghhBBCCCWLDlYIoQdJ8dPPfnbcccPzrOIAACAASURB\nVGy731alifUB5Yi6liPqWp6obTmirq3r19+DFULorZgi2Pc6gY62XX3duh4zGEIIIYRQgliDFULo\nRpKjg9UfKb5AOoQQQuhDm9QaLEljJD1YY998SVM2dk7p2qMlLZY0N7dtTTtyqUXSfpJmNBG3Jv23\nZq2r4reW9Likb+e2zZc0usFxMyTt2yDfWxtdvxX5c0o6QdIFTRzzDUkPSlou6cgm4i+QdHyD/We3\nmPeukpZIWiRpnKQzJK2UdG1qx3caHN+wrZImSbontXVpvq2Sjpb0sKSzWsk7hBBCCCE0r51rsDbF\nP6UeBsyzfXBu26aYZzM5ucbjWi4CFvQunZZyKeOcdc8v6R+BdwMTgfcC50oaUkJOjRwGzLY91fYa\n4DPAgbaPS/tbfV2L/A9wnO3dgYOB/ydpGwDbPwH2A6KD1Rad7U6g34r1AeWIupYj6lqeqG05oq6t\na2cHa6Ck69Jf8G+QNLg6IP3FfXn6uTRt2yKNmiyXtEzSmWn7eEl3pL/aPyBpXC9yGgY8VbXt6Vw+\nx6drLpE0M22bIWm6pLsl/U7S4Wn7VpLuTLksk3Ro2j5G0qp03GpJsyQdlI5fLWmPFLelpGskLUwj\nHoekNF4Gnm+iLU9Xb5D0/ZT7EklPSfpy2j4V2AGYV3XIn4BXG1xnfcoJSXumdixNeW9Vdf3CNkm6\nV9Juubj5kqbUqUHei8CGBjlOAH7lzP8Cy4F/aHDMC+ncpJGmh1K7fpyLeVfK9XeSPptiu40YSjon\njXYdDHwO+IykuyRdBewCzK28h3PHjJB0o6TfpJ+9m22r7d/ZfjQ9/m+y9/P2uf3rgKEN2h5CCCGE\nEHqpnTe52BX4lO2Fkq4BTgWuqOyUtBNwKTCZ7EP8HamT8gQwyvbEFLdNOmQWcIntOZIG0bvO4wCg\nK7/B9l7pOhOA84G9bT8naVgubEfb70udhDnATcBfgMNsb5C0HbAw7QMYD3zU9kpJDwBHpeMPTdc4\nHPgicJftEyUNBe6TdKfte4F7U05TgVNsn1zdkEreVdtOSseNBuYCMyQJ+BZwDHBQVfzHGhXM9lnp\nnAOB64EjbC9OI0QvVoUXtikd93FgmqQdUz0XS/pajfj89W+oPE4dsKm2p1VddxnwFUlXAFsB+wMP\nNWjXFbmnXwDG2v5r7v0G2Xu4g6zDslrSlZXDe57OcyV9D3ihcm5JHwI60vvphFz8dOAK2/dIejtw\nOzChybaSi3kPMLDS4cpp4ncjf9oO2nlzhv6jo90J9FsdHR3tTqFfirqWI+panqhtOaKur+vs7Gxq\nRK+dHazHbC9Mj68DPkuugwXsCcy3/SyApFnAvsDFwDhJ04FfAPPSh/mdbc8BsP1yq8mkjsaklEuR\nD5BN73ouXWN9bt8tadsqSTtUTgl8Xdn6pC5g59y+NbZXpscPAZVOw4PA2PT4g8Ahkj6fng8CRgOr\nKxe1vQjo0blq0M7BwGzgdNtPSDoNuM32k1kJ6O2txnYFnrS9OOW2IV0vH1OrTbPJRs+mAUcCNzaI\nL2T7VqDHei/bd0jaE7iHbETnHhqPzOUtA34s6RbSa53cZvsV4E+S1gEjWzgnZLUuqveBwG56vXhD\nJG2ZRt+A2m197cTZHyh+BBxXsPtZSeMLOl450xomH0IIIYSwOeno6OjW4bzwwgsL4zalNVhFa0t6\nfPhMHZtJZAsaPg18v1ZstxNJp6apcYvTKEl+3xbAGmA34Lamsu/upYKcjwFGAJNtTyb7YD+4IL4r\n97yL1zu9Ihvlmpx+xtlezRt3FXCj7fnp+d7A6ZJ+TzaSdZykS3p57kads8I22X4SeEbS7mQjWT/N\nHdMnNbB9STrHh8je94+0cPg/Ad8FpgD3p/cL9Hwd/wZ4hWwktKLH1NcmCNgr1+7R+c5Vw4OlrYGf\nA/9q+/6CkOnAUkmf7EVuodc6251AvxXrA8oRdS1H1LU8UdtyRF1b184O1hhJlWlsnwB+XbX/PmBf\nScMlDQCOBhak6XYDbN8MfAmYkkZLHpf0YQBJgyS9NX8y21emD6tTbK+t2tdleyzwANkH/CK/BI6Q\nNDxdY9sacZVOxlDgKdtdkvYHxhTE1HM7cMZrB0jvbuKYutJo1RDbl1W22T7W9ljbuwDnAj+yfX7B\nsTOV1ofVsBrYMU1bRNKQ9Lrl1WvTT4F/AbaxvaKJ+KYpW7dXed0mAruT1ptJuqTyvqlxrIDRthcA\n5wHbAPVukLEO2F7StpLeAvxzL1KeB7y2LkvSpGYPTFM1bwFmpt+RIucD77D9w17kFkIIIYQQ6mhn\nB+th4DRJK8luLvG9tN0AqRN0HtmffZcA96dpUaOATklLgGtTDMDxwBmSlgF30/p0LchGNYYX7UhT\n+r5G1slbAlyezzcfmv47C9gz5XMssKogpuj4iovIbgSyPN004avVAZKmSrq6TnuqnQPsnhvJa2V6\n4UTgyVo7bf+VrHP6XUlLyToJb6kKq9em/6Dn6NXFdeJ7kHSIpGkFuwYCv5a0gux9dqztylq73YG1\nBcdUDACuS6/jImC67T8XxFXet6+kPO8n6yCuKojtdkyBM4E9lN0cZQVwSnVAnbYeCewDfDL3Ok+s\nihmUbnYRNqqOdifQb8X6gHJEXcsRdS1P1LYcUdfWxRcN56S1PtvZPq9h8GYkTTn7ge1ao3tvWpLm\nVt2Wv19L6wCX2d6pTkx80XC/FF80HEIIIfQlbUpfNLwJuwl4n3JfNBzA9gv9sXMFsJl1ro4mG1n8\nZhPR8dPPfkaOzM9S7l9ifUA5oq7liLqWJ2pbjqhr69p5F8FNTrqr2vvbnUcIZUhfNPyTJmNLzmbz\n09nZGdMsQgghhM1ATBEMIXQjyfHvQgghhBBCfTFFMIQQQgghhBBKFh2sEELYCGIOe3mituWIupYj\n6lqeqG05oq6tiw5WCCGEEEIIIfSRWIMVQugm1mCFEEIIITQWa7BCCCGEEEIIoWTRwQoh9CApfuKn\n9J8d37Zjn7xfY31AOaKu5Yi6lidqW46oa+vie7BCCD1Na3cC/dAaYFy7k9i0rJu2rt0phBBCCH0u\n1mCFELqR5OhghY1iWnypdQghhDcvaRNagyVpjKQHa+ybL2nKxs4pXXu0pMWS5ua2rWlHLrVI2k/S\njCbi1qT/1qx1VfzWkh6X9O3ctvmSRjc4boakfRvke2uj67cif05JJ0i6oIljviHpQUnLJR3ZRPwF\nko5vsP/sFvPeVdISSYskjZN0hqSVkq5N7fhOg+ObbesJkh6RtDrfBklHS3pY0lmt5B1CCCGEEJrX\nzjVYm+KfLQ8D5tk+OLdtU8yzmZxc43EtFwELepdOS7mUcc6655f0j8C7gYnAe4FzJQ0pIadGDgNm\n255qew3wGeBA28el/a2+rj1I2hb4CrAnsBdwgaShALZ/AuwHRAerHTapP9X0L7E+oBxR13JEXcsT\ntS1H1LV17exgDZR0XfoL/g2SBlcHpL+4L08/l6ZtW6RRk+WSlkk6M20fL+kOSUslPSCpN6sdhgFP\nVW17OpfP8emaSyTNTNtmSJou6W5Jv5N0eNq+laQ7Uy7LJB2ato+RtCodt1rSLEkHpeNXS9ojxW0p\n6RpJC9OIxyEpjZeB55toy9PVGyR9P+W+RNJTkr6ctk8FdgDmVR3yJ+DVBtdZn3JC0p6pHUtT3ltV\nXb+wTZLulbRbLm6+pCl1apD3IrChQY4TgF8587/AcuAfGhzzQjo3aaTpodSuH+di3pVy/Z2kz6bY\nbiOGks5Jo10HA58DPiPpLklXAbsAcyvv4dwxIyTdKOk36WfvFtr6IbI/Ejxvez3Za/paW22vA4Y2\nOEcIIYQQQuildt7kYlfgU7YXSroGOBW4orJT0k7ApcBksg/xd6ROyhPAKNsTU9w26ZBZwCW250ga\nRO86jwOArvwG23ul60wAzgf2tv2cpGG5sB1tvy91EuYANwF/AQ6zvUHSdsDCtA9gPPBR2yslPQAc\nlY4/NF3jcOCLwF22T0wjEPdJutP2vcC9KaepwCm2T65uSCXvqm0npeNGA3OBGZIEfAs4BjioKv5j\njQpm+6x0zoHA9cARthenEaIXq8IL25SO+zgwTdKOqZ6LJX2tRnz++jdUHqcO2FTb06quuwz4iqQr\ngK2A/YGHGrTritzTLwBjbf81936D7D3cQdZhWS3pysrhPU/nuZK+B7xQObekDwEd6f10Qi5+OnCF\n7XskvR24HZjQZFtHAY/nnv8xbctr/LsxP/d4LHFzhr4QNSxNR0dHu1Pol6Ku5Yi6lidqW46o6+s6\nOzubGtFrZwfrMdsL0+PrgM+S62CRTXGab/tZAEmzgH2Bi4FxkqYDvwDmpQ/zO9ueA2D75VaTSR2N\nSSmXIh8gm971XLrG+ty+W9K2VZJ2qJwS+Lqy9UldwM65fWtsr0yPHwIqnYYHyT7OAnwQOETS59Pz\nQcBoYHXlorYXAT06Vw3aORiYDZxu+wlJpwG32X4yKwE9Fuo1aVfgSduLU24b0vXyMbXaNJtspGUa\ncCRwY4P4QrZvBXqs97J9h6Q9gXvIRijvofHIXN4y4MeSbiG91slttl8B/iRpHTCyhXNCVuuieh8I\n7KbXizdE0pZp9A2o3dYmPStpvO1Ha0bs38szhxBCCCH0Ux0dHd06nBdeeGFh3Ka0BqtobUmPD5+p\nYzMJ6AQ+DXy/Vmy3E0mnpqlxi9MoSX7fFmQrJHYDbmsq++5eKsj5GGAEMNn2ZLIP9oML4rtyz7t4\nvdMrslGuyelnnO3VvHFXATfaroxR7A2cLun3ZCNZx0m6pJfnbtQ5K2yT7SeBZyTtTjaS9dPcMX1S\nA9uXpHN8iOx9/0gLh/8T8F1gCnB/er9Az9fxb4BXyEZCK3pMfW2CgL1y7R6d71w18Ee6d0Lflrbl\nTQeWSvpkL3ILvRVrsEoT6wPKEXUtR9S1PFHbckRdW9fODtYYSZVpbJ8Afl21/z5gX0nDJQ0AjgYW\npOl2A2zfDHwJmJJGSx6X9GEASYMkvTV/MttXpg+rU2yvrdrXZXss8ADZB/wivwSOkDQ8XWPbGnGV\nTsZQ4CnbXZL2B8YUxNRzO3DGawdI727imLrSaNUQ25dVttk+1vZY27sA5wI/sn1+wbEzldaH1bAa\n2DFNW0TSkPS65dVr00+BfwG2sb2iifimKVu3V3ndJgK7k9abSbqk8r6pcayA0bYXAOcB2wD1bpCx\nDthe0raS3gL8cy9Snge8ti5L0qQWjr0dOEjS0PQePShtyzsfeIftH/YitxBCCCGEUEc7O1gPA6dJ\nWkl2c4nvpe0GSJ2g88hGqpYA96dpUaOATklLgGtTDMDxwBmSlgF30/p0LchGNYYX7UhT+r5G1slb\nAlyezzcfmv47C9gz5XMssKogpuj4iovIbgSyPN004avVAZKmSrq6TnuqnQPsnhvJa2V64UTgyVo7\nbf+VrHP6XUlLyToJb6kKq9em/6Dn6NXFdeJ7kHSIpGkFuwYCv5a0gux9dqztylq73YG1BcdUDACu\nS6/jImC67T8XxFXet6+kPO8n69isKojtdkyBM4E9lN0cZQVwSnVArbamKawXkf2x4DfAhVXTWQEG\npZtdhI0p1mCVJtYHlCPqWo6oa3mituWIurYuvmg4J6312c72eQ2DNyOStgZ+YLvW6N6blqS5Vbfl\n79fSOsBltneqExNfNBw2jmnxRcMhhBDevFTji4ajg5UjaTzwQ2DD5vShO2weJB1NdkfEmbb/b524\n+EchbBQjR41k7RP1BpCb09nZGX9hLUHUtRxR1/JEbcsRda2tVgernXcR3OSku6q9v915hFCG9EXD\nP2kytuRsNj/xP6gQQghh8xAjWCGEbiQ5/l0IIYQQQqiv1ghWO29yEUIIIYQQQgj9SnSwQghhI4jv\nESlP1LYcUddyRF3LE7UtR9S1ddHBCiGEEEIIIYQ+EmuwQgjdxBqsEEIIIYTGYg1WCCGEEEIIIZQs\nbtMeQuhB6vHHmBBCCG8CI0eOYe3aP7Q7jbaIr8MoR9S1ddHBCiEUiCmCfa8T6GhzDv1VJ1HbMnQS\ndS1DJ2XWdd26+ANZCO0Wa7BCCN1IcnSwQgjhzUrxZfEhbCSb1BosSWMkPVhj33xJUzZ2TunaoyUt\nljQ3t21NO3KpRdJ+kmY0Ebcm/bdmravit5b0uKRv57bNlzS6wXEzJO3bIN9bG12/FflzSjpB0gVN\nHPNqem2XSLqlifgLJB3fYP/ZLea9a7r+IknjJJ0haaWka1M7vtPg+IZtlTRJ0j2SHpS0VNKRuX1H\nS3pY0lmt5B1CCCGEEJrXzptcbIp/XjkMmGf74Ny2TTHPZnJyjce1XAQs6F06LeVSxjmbOf//2J5i\ne7Ltw0rIpxmHAbNtT7W9BvgMcKDt49L+Vl/XIv8DHGd7d+Bg4P9J2gbA9k+A/YDoYLVFZ7sT6Mc6\n251AP9XZ7gT6qc52J9Bvxfc1lSPq2rp2drAGSrou/QX/BkmDqwPSX9yXp59L07Yt0qjJcknLJJ2Z\nto+XdEf6q/0Dksb1IqdhwFNV257O5XN8uuYSSTPTthmSpku6W9LvJB2etm8l6c6UyzJJh6btYySt\nSsetljRL0kHp+NWS9khxW0q6RtLCNOJxSErjZeD5JtrydPUGSd9PuS+R9JSkL6ftU4EdgHlVh/wJ\neLXBddannJC0Z2rH0pT3VlXXL2yTpHsl7ZaLmy9pSp0a5L0IbGiQI0Crk9JfSOcmjTQ9lNr141zM\nu1Kuv5P02RTbbcRQ0jlptOtg4HPAZyTdJekqYBdgbuU9nDtmhKQbJf0m/ezdbFtt/872o+nxf5O9\nn7fP7V8HDG2xFiGEEEIIoUntvMnFrsCnbC+UdA1wKnBFZaeknYBLgclkH+LvSJ2UJ4BRtiemuG3S\nIbOAS2zPkTSI3nUeBwBd+Q2290rXmQCcD+xt+zlJw3JhO9p+X+okzAFuAv4CHGZ7g6TtgIVpH8B4\n4KO2V0p6ADgqHX9ousbhwBeBu2yfKGkocJ+kO23fC9ybcpoKnGL75OqGVPKu2nZSOm40MBeYIUnA\nt4BjgIOq4j/WqGC2z0rnHAhcDxxhe7GkIaQOSk5hm9JxHwemSdox1XOxpK/ViM9f/4bK49QBm2p7\nWkGqb0m1fhn4hu2fNWjXFbmnXwDG2v5r7v0G2Xu4g6zDslrSlZXDe57OcyV9D3ihcm5JHwI60vvp\nhFz8dOAK2/dIejtwOzChhbZWYt4DDKx0uHKa+N3In7aDWOjeFzranUA/1tHuBPqpjnYn0E91tDuB\nfivudFeOqOvrOjs7mxrRa2cH6zHbC9Pj64DPkutgAXsC820/CyBpFrAvcDEwTtJ04BfAvPRhfmfb\ncwBsv9xqMqmjMSnlUuQDZNO7nkvXWJ/bd0vatkrSDpVTAl9Xtj6pC9g5t2+N7ZXp8UNApdPwIDA2\nPf4gcIikz6fng4DRwOrKRW0vAnp0rhq0czAwGzjd9hOSTgNus/1kVoKWR3oqdgWetL045bYhXS8f\nU6tNs8lGz6YBRwI3NogvZPtWoNZ6rzG2/zuNbP5S0vI0Ta8Zy4AfK1u7lV+/dZvtV4A/SVoHjGzy\nfBWiuN4HArvp9eINkbSl7f+tBDRoa+UPFD8CjivY/ayk8QUdr5xpDZMPIYQQQticdHR0dOtwXnjh\nhYVxm9IarKK1JT0+fKaOzSSyScyfBr5fK7bbiaRT09S4xWmUJL9vC2ANsBtwW1PZd/dSQc7HACOA\nybYnk03VGlwQ35V73sXrnV6RjXJNTj/jbK/mjbsKuNH2/PR8b+B0Sb8nG8k6TtIlvTx3o85ZYZts\nPwk8I2l3spGsn+aO6ZMapOlypE5VJ9nIaLP+CfguMAW4P71foOfr+DfAK2QjoRU9pr42QcBeuXaP\nzneuGh4sbQ38HPhX2/cXhEwHlkr6ZC9yC73W2e4E+rHOdifQT3W2O4F+qrPdCfRbsVaoHFHX1rWz\ngzVGUmUa2yeAX1ftvw/YV9JwSQOAo4EFabrdANs3A18CpqTRksclfRhA0iBJb82fzPaV6cPqFNtr\nq/Z12R4LPED2Ab/IL4EjJA1P19i2RlylkzEUeMp2l6T9gTEFMfXcDpzx2gHSu5s4pq40WjXE9mWV\nbbaPtT3W9i7AucCPbJ9fcOxMpfVhNawGdkzTFpE0JL1uefXa9FPgX4BtbK9oIr5pkoalaaNIGgG8\nD1iZnl9Sed/UOFbAaNsLgPOAbYAhdS63Dthe0raS3gL8cy9Snge8ti5L0qRmD0xTNW8BZqbfkSLn\nA++w/cNe5BZCCCGEEOpoZwfrYeA0SSvJbi7xvbTdAKkTdB7Zn3qWAPenaVGjgE5JS4BrUwzA8cAZ\nkpYBd9P6dC2AR4DhRTvSlL6vkXXylgCX5/PNh6b/zgL2TPkcC6wqiCk6vuIishuBLE83TfhqdYCk\nqZKurtOeaucAu+dG8lqZXjgReLLWTtt/JeucflfSUrJOwluqwuq16T/oOXp1cZ34HiQdImlawa7d\ngAfS63YX2Vq9h9O+3YG1BcdUDACuS6/jImC67T8XxFXet6+kPO8n6yCuKojtdkyBM4E9lN0cZQVw\nSnVAnbYeCewDfDL3Ok+sihmUbnYRNqqOdifQj3W0O4F+qqPdCfRTHe1OoN+KtULliLq2Lr5oOCet\n9dnO9nkNgzcjacrZD2zXGt1705I0t+q2/P1aWge4zPZOdWLii4ZDCOFNK75oOISNRZvSFw1vwm4C\n3qfcFw0HsP1Cf+xcAWxmnaujyUYWv9nuXDZPne1OoB/rbHcC/VRnuxPopzrbnUC/FWuFyhF1bV07\n7yK4yUl3VXt/u/MIoQzpi4Z/0lx0b28mGUIIoZ1GjhzTOCiEUKqYIhhC6EaS49+FEEIIIYT6Yopg\nCCGEEEIIIZQsOlghhLARxBz28kRtyxF1LUfUtTxR23JEXVsXHawQQgghhBBC6COxBiuE0E2swQoh\nhBBCaCzWYIUQQgghhBBCyaKDFUIIG0HMYS9P1LYcUddyRF3LE7UtR9S1dfE9WCGEHqT4HqwQQghh\nYxo5aiRrn1jb7jRCH4g1WCGEbiSZae3OIoQQQtjMTIP4XP7mEmuwQgghhBBCCKFkbelgSRoj6cEa\n++ZLmrKxc0rXHi1psaS5uW1r2pFLLZL2kzSjibg16b81a10Vv7WkxyV9O7dtvqTRDY6bIWnfBvne\n2uj6rcifU9IJki5o4phX02u7RNItTcRfIOn4BvvPbjHvXdP1F0kaJ+kMSSslXZva8Z0Gxzfb1hMk\nPSJpdb4Nko6W9LCks1rJO/SRTepfkn4maluOqGs5oq7lidqWItZgta6da7A2xTHQw4B5ts/LbdsU\n82wmJ9d4XMtFwILepdNSLmWcs5nz/4/ttnTccw4DZtu+BEDSZ4ADbD8p6QRaf117kLQt8BVgCiBg\nkaSf2f8/e2cfblVV7f/PFxJRUURNULqgcXu8Wr6B/MyrF4/mS2q+XJOMMK3bY5amll7NtKuH1NRK\nf3kzNZMfcgVNLVSUSEQ5ZCKCcDioIGlhvl3UUgrN9zN+f8yxYZ199uvpLDYcxud59nPWmnPMOccc\na7FZY40x57a/mtmtkh4E5gP/9x+ZSBAEQRAEQVCaRqYIbiRpkr/Bv11S32IBf+O+2D+Xe1kvj5os\nltQm6UwvHybpfkmLJD0maccu6LQl8EpR2asZfU70MVslTfSyCZKulvSwpGckHevlm0ma6bq0STrK\ny4dKWurtlkmaLOlgb79M0l4ut6mk8ZLmesTjSFfjXeCvNczl1eICST933VslvSLpv7x8BLAtMKOo\nyV+AD6qMs9J1QtJIn8ci13uzovFLzknSI5J2zsjNkjS8gg2yvAW8UUVHSM5GPazyvvFI05M+r1sy\nMh93XZ+RdLrLdogYSjrbo12HAd8Evi7pAUnXAR8Fphfu4UybbST9UtKj/tmnjrkeSnpJ8FczW0m6\npp8uVJrZy0D/Om0RdAdd+UYKaiNsmw9h13wIu+ZH2DYXmpqaGq3CekcjI1g7AV82s7mSxgOnAlcV\nKiVtB1wO7El6iL/fnZQXgMFmtpvLbeFNJgPfN7OpkvrQNeexN9CeLTCzvX2cXYDzgX3M7HVJW2bE\nBpnZvu4kTAWmAG8Dx5jZG5K2BuZ6HcAw4LNmtkTSY8Dnvf1RPsaxwAXAA2b2FUn9gXmSZprZI8Aj\nrtMI4BQz+2rxRAp6F5Wd7O2GANOBCZIE/AgYCxxcJH9cNYOZ2be8z42AXwCjzWyhpH64g5Kh5Jy8\n3fFAs6RBbs+Fki4tI58d//bCsTtgI8ysuYSqG7ut3wWuMLO7q8zrqszpt4EdzOy9zP0G6R5uIjks\nyyRdW2jeuTubLul6YFWhb0mHAk1+P52Ukb8auMrM5kj6J+A+YJca5zoYeD5z/qKXZan+b2NW5ngH\n4j+tIAiCIAg2eFpaWmpKmWykg/Wcmc3140nA6WQcLGAkMMvMXgOQNBkYBVwC7CjpauDXwAx/mN/e\nzKYCmNm79SrjjsburkspDiSld73uY6zM1N3lZUslbVvoErhMaX1SO7B9pm65mS3x4yeBgtPwOOlx\nFuAQ4EhJ5/h5H2AIsKwwqJktADo5V1Xm2Re4A/iGmb0g6TRgmqepFfTuCjsBL5nZQtftDR8vK1Nu\nTneQIi3NwOeAX1aRL4mZ3QOUW+811Mz+1yObD0pabGa1Zmu3Abcord3Krt+aZmbvA3+R9DIwsMb+\nCojS9j4I2FlrjNdP0qZm9veCQJW5VuM1ScPM7A9lJQ7oYs9BeZYTjmpehG3zIeyaD2HX/Ajb5kJL\nS0tEsZympqYOthg3blxJuXVpDVaptSWdHj7NbKWk3UmpUF8DRpNSryo6BpJOBU72cQ43sxWZul7A\nH4F3gGl1zKHAOyV0HgtsA+xpZu1Km070LSHfnjlvZ801ESnK9XQX9KnEdcAvzawQo9gH2M/tszkp\ndXOVmZ3fhb6rOWdl5yTpz5J2JUWyTslUdZL3KFddmNn/+t/lklpIkdFaif3A3AAAIABJREFUHawj\nSM79UcAFkj7h5cXX8UPA+6RIaIFOqa81IGBvM3uvC21fJEXVCnyEjvEoSBGyRZJON7ObujBGEARB\nEARBUIZGrsEaKqmQxvYF4KGi+nnAKElbSeoNjAFme7pdbzO7E/guMNyjJc9LOhpAUh9Jm2Q7M7Nr\nzWxPMxueda68rt3MdgAeIz3gl+JBYLSkrXyMAWXkCk5Gf+AVd64OAIaWkKnEfcAZqxtIe9TQpiIe\nrepnZj8slJnZCWa2g5l9FPhP4H9KOVeSJsrXh5VhGTDI0xaR1M+vW5ZKc7oNOBfYwsyeqEG+ZiRt\n6WmjSNoG2BdY4uffL9w3ZdoKGGJms4HzgC2AfhWGexn4sKQBkjYGPtMFlWcAq9dl+QuFWrkPOFhS\nf79HD/ayLOcD/xzO1Vom3qrmR9g2H8Ku+RB2zY+wbS5E9Kp+GulgPQWcJmkJaXOJ673cANwJOg9o\nAVqB+Z4WNRhokdQK3OwyACcCZ0hqAx6m/nQtgN8DW5Wq8JS+S0lOXitwZVbfrKj/nQyMdH1OAJaW\nkCnVvsDFpGjSYt804XvFApJGSLqhwnyKORvYVWmTi4WS6kkv3A14qVylR1uOB66RtIjkJGxcJFZp\nTr/y9rdlyi6pIN8JSUdKai5RtTPwmF+3B0hr9Z7yul2BSj+b3huY5NdxAXC1mf2thFzhvn3f9ZxP\ncmyWlpDt0KYEZwJ7KW2O8gQdI3pA+bl6CuvFpJcFjwLjitJZAfr4ZhdBEARBEARBN6P4xeg1+Fqf\nrYu2ad/gkbQ5cKOZlYvurbdImm5mhzVaj7WFrwNsM7PtKsgYzWtPpw2GWBuQH2HbfAi75kPYNT/W\nd9s2w7r4XB5rsMojCTPrlJnWyDVY6yJTgJs2tIfuapjZKsqnTq7XbEjXWdIY0o6IP6gq3Jy3NkEQ\nBEEQZBk4uCvJV8G6SESwgiDogCSL74UgCIIgCILKlItgNXINVhAEQRAEQRAEQY8iHKwgCIK1QC0/\nTBh0jbBtPoRd8yHsmh9h23wIu9ZPOFhBEARBEARBEATdRKzBCoKgA7EGKwiCIAiCoDqxBisIgiAI\ngiAIgiBnwsEKgiBYC0QOe36EbfMh7JoPYdf8CNvmQ9i1fuJ3sIIg6ITUKdodBEEQ9FAGDhzKihXP\nNlqNIOgxxBqsIAg6IMkgvheCIAg2HEQ8DwZB/cQarCAIgiAIgiAIgpxpiIMlaaikx8vUzZI0fG3r\n5GMPkbRQ0vRM2fJG6FIOSftLmlCD3HL/W9bWRfKbS3pe0n9nymZJGlKl3QRJo6roe0+18esh26ek\nkyRdVEOb6ZJelzS1xjEuknRilfqzatcaJO0kqVXSAkk7SjpD0hJJN/s8flKlfdW5Stpd0hxJj0ta\nJOlzmboxkp6S9K169A66i5ZGK9CDaWm0Aj2UlkYr0ENpabQCPZZYK5QPYdf6aWQEa12MRR8DzDCz\nwzJl66KetehkZY7LcTEwu2vq1KVLHn3W0v8PgBNy0KMejgHuMLMRZrYc+DpwkJl90evrva6leBP4\nopntChwG/FjSFgBmdiuwPxAOVhAEQRAEQU400sHaSNIkf4N/u6S+xQL+xn2xfy73sl4eNVksqU3S\nmV4+TNL9/tb+MUk7dkGnLYFXispezehzoo/ZKmmil02QdLWkhyU9I+lYL99M0kzXpU3SUV4+VNJS\nb7dM0mRJB3v7ZZL2crlNJY2XNNcjHke6Gu8Cf61hLq8WF0j6ueveKukVSf/l5SOAbYEZRU3+AnxQ\nZZyVrhOSRvo8FrnemxWNX3JOkh6RtHNGbpak4RVskOUt4I0qOmJms2qRy7DK+8YjTU/6vG7JyHzc\ndX1G0uku2yFiKOlsj3YdBnwT+LqkByRdB3wUmF64hzNttpH0S0mP+mefWudqZs+Y2R/8+H9J9/OH\nM/UvA/3rsEPQbTQ1WoEeTFOjFeihNDVagR5KU6MV6LE0NTU1WoUeSdi1fhq5i+BOwJfNbK6k8cCp\nwFWFSknbAZcDe5Ie4u93J+UFYLCZ7eZyW3iTycD3zWyqpD50zXnsDbRnC8xsbx9nF+B8YB8ze13S\nlhmxQWa2rzsJU4EpwNvAMWb2hqStgbleBzAM+KyZLZH0GPB5b3+Uj3EscAHwgJl9RVJ/YJ6kmWb2\nCPCI6zQCOMXMvlo8kYLeRWUne7shwHRggiQBPwLGAgcXyR9XzWBm9i3vcyPgF8BoM1soqR/uoGQo\nOSdvdzzQLGmQ23OhpEvLyGfHv71w7A7YCDNrrqZ3DfO6KnP6bWAHM3svc79BuoebSA7LMknXFpp3\n7s6mS7oeWFXoW9KhQJPfTydl5K8GrjKzOZL+CbgP2KXeuUr6P8BGBYcrQw3/NrLdNhEPBEEQBEEQ\nbOi0tLTUlDLZSAfrOTOb68eTgNPJOFjASGCWmb0GIGkyMAq4BNhR0tXAr4EZ/jC/vZlNBTCzd+tV\nxh2N3V2XUhxISu963cdYmam7y8uWStq20CVwmdL6pHZg+0zdcjNb4sdPAgWn4XFgBz8+BDhS0jl+\n3gcYAiwrDGpmC4BOzlWVefYF7gC+YWYvSDoNmGZmLyUT0NX9uXcCXjKzha7bGz5eVqbcnO4gRc+a\ngc8Bv6wiXxIzuwfo1vVeThtwi6S78GvtTDOz94G/SHoZGFhnv6K0vQ8CdtYa4/WTtKmZ/b0gUG2u\n/oLif4Avlqh+TdKwEo5Xhuaqygf10kI4qnnRQtg2D1oIu+ZBC2HXfGhpaYloSw6EXdfQ1NTUwRbj\nxo0rKddIB6vTW/4SMp0ePs1spaTdgUOBrwGjSalXFR0DSacCJ/s4h5vZikxdL+CPwDvAtDrmUOCd\nEjqPBbYB9jSzdqVNJ/qWkG/PnLez5pqIFOV6ugv6VOI64JeeMgewD7Cf22dzUurmKjM7vwt9V3PO\nys5J0p8l7UqKZJ2Sqeok71GutckRJOf+KOACSZ/w8uLr+CHgfVIktECn1NcaELC3mb3XhbZI2hy4\nF/iOmc0vIXI1sEjS6WZ2U1fGCIIgCIIgCErTyDVYQyUV0ti+ADxUVD8PGCVpK0m9gTHAbE+3621m\ndwLfBYZ7tOR5SUcDSOojaZNsZ2Z2rZntaWbDs86V17Wb2Q7AY6QH/FI8CIyWtJWPMaCMXMHJ6A+8\n4s7VAcDQEjKVuA84Y3UDaY8a2lTEo1X9zOyHhTIzO8HMdjCzjwL/CfxPKedK0kT5+rAyLAMGedoi\nkvr5dctSaU63AecCW5jZEzXId4VOESNJ3y/cNyUbpCjSEDObDZwHbAH0qzDGy8CHJQ2QtDHwmS7o\nOQNYvS7LXyjUhKdq3gVM9H8jpTgf+OdwrtY2TY1WoAfT1GgFeihNjVagh9LUaAV6LBFlyYewa/00\n0sF6CjhN0hLS5hLXe7kBuBN0HimW3grM97SowUCLpFbgZpcBOBE4Q1Ib8DD1p2sB/B7YqlSFp/Rd\nSnLyWoErs/pmRf3vZGCk63MCsLSETKn2BS4mRZMW+6YJ3ysWkDRC0g0V5lPM2cCuSptcLJRUT3rh\nbsBL5So92nI8cI2kRSQnYeMisUpz+pW3vy1TdkkF+U5IOlJSc5m633rfB0p6TlJhvdmuwIpSbZze\nwCS/jguAq83sbyXkCvft+67nfJKDuLSEbIc2JTgT2Etpc5Qn6BjRK8yn3Fw/B+wHfClznXcrkunj\nm10EQRAEQRAE3Yzil7vX4Gt9tjaz86oKb0B4ytmNZlYuurfeIml60bb8PRpfB9hmZttVkLF189cJ\n1ndaiDfXedFC2DYPWgi75kEL655dRU94Hoy1QvkQdi2PJMysU2ZaI9dgrYtMAW7a0B66q2Fmqyif\nOrlesyFdZ0ljSDsi/qAG6bzVCYIgCNYRBg4cWl0oCIKaiQhWEAQdkGTxvRAEQRAEQVCZchGsRq7B\nCoIgCIIgCIIg6FGEgxUEQbAWqOWHCYOuEbbNh7BrPoRd8yNsmw9h1/oJBysIgiAIgiAIgqCbiDVY\nQRB0INZgBUEQBEEQVCfWYAVBEARBEARBEORMOFhBEARrgchhz4+wbT6EXfMh7JofYdt8CLvWT/wO\nVhAEnZDid7CCIAiCoLsZOHggK15Y0Wg1gpyJNVhBEHRAktHcaC2CIAiCoAfSDPHs3XOINVhBEARB\nEARBEAQ50xAHS9JQSY+XqZslafja1snHHiJpoaTpmbLljdClHJL2lzShBrnl/resrYvkN5f0vKT/\nzpTNkjSkSrsJkkZV0feeauPXQ7ZPSSdJuqiGNtMlvS5pao1jXCTpxCr1Z9WuNUjaSVKrpAWSdpR0\nhqQlkm72efykSvta53qSpN9LWpadg6Qxkp6S9K169A66iXXqm6SHEbbNh7BrPoRd8yNsmwuxBqt+\nGhnBWhfjo8cAM8zssEzZuqhnLTpZmeNyXAzM7po6demSR5+19P8D4IQc9KiHY4A7zGyEmS0Hvg4c\nZGZf9Pp6r2snJA0ALgRGAnsDF0nqD2BmtwL7A+FgBUEQBEEQ5EQjHayNJE3yN/i3S+pbLOBv3Bf7\n53Iv6+VRk8WS2iSd6eXDJN0vaZGkxyTt2AWdtgReKSp7NaPPiT5mq6SJXjZB0tWSHpb0jKRjvXwz\nSTNdlzZJR3n5UElLvd0ySZMlHeztl0nay+U2lTRe0lyPeBzparwL/LWGubxaXCDp5657q6RXJP2X\nl48AtgVmFDX5C/BBlXFWuk5IGunzWOR6b1Y0fsk5SXpE0s4ZuVmShlewQZa3gDeq6IiZzapFLsMq\n7xuPND3p87olI/Nx1/UZSae7bIeIoaSzPdp1GPBN4OuSHpB0HfBRYHrhHs602UbSLyU96p996pjr\noaSXBH81s5Wka/rpjB1eBvrXYYegu+jKN1JQG2HbfAi75kPYNT/CtrnQ1NTUaBXWOxq5i+BOwJfN\nbK6k8cCpwFWFSknbAZcDe5Ie4u93J+UFYLCZ7eZyW3iTycD3zWyqpD50zXnsDbRnC8xsbx9nF+B8\nYB8ze13SlhmxQWa2rzsJU4EpwNvAMWb2hqStgbleBzAM+KyZLZH0GPB5b3+Uj3EscAHwgJl9xSMQ\n8yTNNLNHgEdcpxHAKWb21eKJFPQuKjvZ2w0BpgMTJAn4ETAWOLhI/rhqBjOzb3mfGwG/AEab2UJJ\n/XAHJUPJOXm744FmSYPcngslXVpGPjv+7YVjd8BGmFlzNb1rmNdVmdNvAzuY2XuZ+w3SPdxEcliW\nSbq20LxzdzZd0vXAqkLfkg4Fmvx+OikjfzVwlZnNkfRPwH3ALjXOdTDwfOb8RS/LUv3fxqzM8Q7E\nf1pBEARBEGzwtLS01JQy2UgH6zkzm+vHk4DTyThYpBSnWWb2GoCkycAo4BJgR0lXA78GZvjD/PZm\nNhXAzN6tVxl3NHZ3XUpxICm963UfY2Wm7i4vWypp20KXwGVK65Page0zdcvNbIkfPwkUnIbHSY+z\nAIcAR0o6x8/7AEOAZYVBzWwB0Mm5qjLPvsAdwDfM7AVJpwHTzOylZAK6uj/3TsBLZrbQdXvDx8vK\nlJvTHaRISzPwOeCXVeRLYmb3AN263stpA26RdBd+rZ1pZvY+8BdJLwMD6+xXlLb3QcDOWmO8fpI2\nNbO/FwT+wbm+JmmYmf2hrMQBXew5KM9ywlHNi7BtPoRd8yHsmh9h21xoaWmJKJbT1NTUwRbjxo0r\nKddIB6vTW/4SMp0ePs1spaTdSalQXwNGk1KvKjoGkk4FTvZxDjezFZm6XsAfgXeAaXXMocA7JXQe\nC2wD7Glm7UqbTvQtId+eOW9nzTURKcr1dBf0qcR1wC89ZQ5gH2A/t8/mpNTNVWZ2fhf6ruaclZ2T\npD9L2pUUyTolU9VJ3qNca5MjSM79UcAFkj7h5cXX8UPA+6RIaIFOqa81IGBvM3uvC21fJEXVCnyE\njvEoSBGyRZJON7ObujBGEARBEARBUIZGrsEaKqmQxvYF4KGi+nnAKElbSeoNjAFme7pdbzO7E/gu\nMNyjJc9LOhpAUh9Jm2Q7M7NrzWxPMxueda68rt3MdgAeIz3gl+JBYLSkrXyMAWXkCk5Gf+AVd64O\nAIaWkKnEfcAZqxtIe9TQpiIerepnZj8slJnZCWa2g5l9FPhP4H9KOVeSJsrXh5VhGTDI0xaR1M+v\nW5ZKc7oNOBfYwsyeqEG+K3SKGEn6fuG+KdkgRZGGmNls4DxgC6BfhTFeBj4saYCkjYHPdEHPGcDq\ndVn+QqFW7gMOltTf79GDvSzL+cA/h3O1lom3qvkRts2HsGs+hF3zI2ybCxG9qp9GOlhPAadJWkLa\nXOJ6LzcAd4LOA1qAVmC+p0UNBloktQI3uwzAicAZktqAh6k/XQvg98BWpSo8pe9SkpPXClyZ1Tcr\n6n8nAyNdnxOApSVkSrUvcDEpmrTYN034XrGApBGSbqgwn2LOBnZV2uRioaR60gt3A14qV+nRluOB\nayQtIjkJGxeJVZrTr7z9bZmySyrId0LSkZKay9T91vs+UNJzkgrrzXYFKv2kem9gkl/HBcDVZva3\nEnKF+/Z913M+ybFZWkK2Q5sSnAnspbQ5yhN0jOgV5lNyrp7CejHpZcGjwLiidFaAPr7ZRRAEQRAE\nQdDNKH5Neg2+1mdrMzuvqvAGhKTNgRvNrFx0b71F0vSibfl7NL4OsM3MtqsgYzSvPZ02GGJtQH6E\nbfMh7JoPYdf8WB9s2wzr27N3rMEqjyTMrFNmWiPXYK2LTAFu2tAeuqthZqsonzq5XrMhXWdJY0g7\nIv6gqnBz3toEQRAEwYbHwMFdSbAK1jcighUEQQckWXwvBEEQBEEQVKZcBKuRa7CCIAiCIAiCIAh6\nFOFgBUEQrAVq+WHCoGuEbfMh7JoPYdf8CNvmQ9i1fsLBCoIgCIIgCIIg6CZiDVYQBB2INVhBEARB\nEATViTVYQRAEQRAEQRAEORMOVhAEwVogctjzI2ybD2HXfAi75kfYNh/CrvUTv4MVBEEnpE7R7qCb\nGDhwKCtWPNtoNYIgCIIgyIlYgxUEQQckGcT3Qn6I+N4NgiAIgvWfWIMVBEEQBEEQBEGQMzU5WJKG\nSnq8TN0sScO7V63akDRE0kJJ0zNlyxuhSzkk7S9pQg1yyzPy95STkbRVN+pVcpxMfUW9/b6YVUWm\n2++PbJ+1XG9Ju0maI6lN0t2S+tXQpmK/klbVrvHqNj+U9LikKyRtI2mupAWS9qvl2tY41x9IWipp\nkaRfSdoiU/dbSfMkbVuv7kF30NJoBXossT4gH8Ku+RB2zY+wbT6EXeunngjWupjTcgwww8wOy5St\ni3rWopOVOa63n3qo1l+9ejeCWsa/ETjXzHYH7gTO7YZ+uzLvk4HdzOzbwEHAYjMbYWa/q7G/WmRm\nAB83sz2Ap4HvrG5sNgpYABxRt+ZBEARBEARBTdTjYG0kaZKkJZJul9S3WEDSGEmL/XO5l/WSNMHL\n2iSd6eXDJN3vb9ofk7RjF/TfEnilqOzVjD4n+pitkiZ62QRJV0t6WNIzko718s0kzXRd2iQd5eVD\nPSIwQdIySZMlHeztl0nay+U2lTQ+E5U40tV4F/hrDXN5NXPcX9K9kp6SdG2mfHWOp6SzPBqyOGPT\nTb1dq5eP9vKRru8i12+z7MCSpnkksFXSSklfrFHvD4DXvI9emQjNIkmnFQu73ea4jW9zfQ+VdHtG\nZnVkTdIhxfJV7FaOj7kTAzAT+GwNbV51HQZJmu32WSxp3zWq6hKf6xxJH/bCCYV7ys9X+d+7gX7A\nAknnAlcAx3i/fel4bcdKetTrrpNW7zhRda5mNtPM2v10LvCRIpEVpH83wVqnqdEK9FiampoarUKP\nJOyaD2HX/Ajb5kPYtX7q2UVwJ+DLZjZX0njgVOCqQqWk7YDLgT2BlcD97qS8AAw2s91crpCyNBn4\nvplNldSHrq0H6w20ZwvMbG8fZxfgfGAfM3tdUvahcpCZ7StpZ2AqMAV4GzjGzN6QtDXp4XSqyw8D\nPmtmSyQ9Bnze2x/lYxwLXAA8YGZfkdQfmCdpppk9AjziOo0ATjGzrxZPpKC3MxLYGXgOuE/SsWY2\npVCplB53ksv1Bh6V1OJ6vmhmn3G5zSVtBPwCGG1mC5XS494qGvuITL//D7jLzFYV9C6Hmb0AHOen\nXwWGkiI0VmRv3KbfBT5lZm+5k3EWcBnwM0mbmNlbwPHALS5/QQn5S8rZTdI04CtmtqJI1SclHWVm\nU4HP0dnpKDW3Qr9fAH5jZpe5o1Nw8jYD5pjZdyVdQYpOfb9UV97f0ZL+ZmaF1MaXgRFmdoafF+bw\nL26DfzWzDyT9FBgLTKpxrln+g3Tts7ST7pkqNGeOmwjnIAiCIAiCDZ2WlpaaUibrcWqeM7O5fjwJ\n2K+ofiQwy8xe8zfok4FRwB+BHT1qdCiwyh/yt/cHXszsXTN7uw5d8Ifd3UkOXCkOBO4ws9d9jJWZ\nuru8bClQWI8i4DJJbaQox/Zas1ZluZkt8eMnvR7gcWAHPz4EOE9SK2mxRR9gSFYhM1tQyrkqwTwz\n+5OlrcZupbOt9wPuNLO3zexNkoP4b67PwZIuk7SfO0k7AS+Z2ULX4Y1MhGM1krYBbgbGeLt6OQj4\nmetcbG+ATwK7AA+7jU4EhpjZB8BvgCMl9Salr00tJ19JATM7oozD8R/AaZLmkxyjd+uY13zgy5Iu\nJDmPb3r5O2b2az9ewJr7oJha9zsvpP99ChgOzPd5Hwh8tJNw+bmmQaULgPfM7JaiqheB3aqr05z5\nNFUXD2qgpdEK9FhifUA+hF3zIeyaH2HbfAi7rqGpqYnm5ubVn3LUE8EqXv9Raj1Ip4dJM1spaXfg\nUOBrwGjgm6VkO3QknUqKChhwePZhUlIvkuP2DjCtjjkUeKeEzmOBbYA9zaxdaUOBviXk2zPn7ayx\noUhRrqe7oE8xtdi6cyOzpz0KdThwsaQHSM5kNVv3Ijlyze505oFI6+XGlqi7DfgG8Dow38zedAe6\nnHxdmNnvSfcfkj5GHWuQzOwhSaO8zU2SrjSzScB7GbEPWHMfvI+/uPA5bFSnugImmtkFdbZb04H0\nJdI9cGCJ6inAhZKWmNkuXR0jCIIgCIIgKE09EayhkrJpUw8V1c8DRknayiMRY4DZnurV28zuJKWI\nDTezN4DnJR0NIKmPpE2ynZnZtWa2p5kNL35Tb2btZrYD8BgpnaoUDwKj5TuzSRpQRq7gfPQHXnHn\n6gBSuluxTCXuA85Y3UDao4Y25dhbae1XL9L8im39EGn9Tl+l9VT/DjzkaZpvedTiR6RIyDJgkKcn\nIqmfX58sVwBtZnZHKWWU1nBNrKLz/cAphb5L2HsusK+kYV6/qTs7ALNd15NZk9JWSb4uMuujepHu\nwev9fHtJM6u0HUK6L8aTNsso7IhY7p54FtjLj4+mo4NV6T4q1D0AHJfReYDrUBOSPg2cAxxlZu+U\nEDkRmB7OVSNoarQCPZZYH5APYdd8CLvmR9g2H8Ku9VOPg/UUKc1qCWmR/PVeXkgJWwGcR8qDaSVF\nIu4BBgMtnu50s8tAetA7w1PyHgYGdkH/3wMlt7b2lL5LSU5eK3BlVt+sqP+dDIx0fU4AlpaQKdW+\nwMWkjUAWK21p/71iAUkjJN1QYT4F5gHXkNIR/2Bmd2XHNrNW4CZS+tojwA1m1gbsSlr71QpcCFxi\nZu+RnLRrJC0i7TK3cdF4ZwOHKG1ysVDSZ4rqhwB/r6LzjcDzwGIff0yRzn8GvgTc6jaeQ0pfxFMW\n7wU+7X8rylPmGiht1jGoRNUYScuAJaQ1ajd5+XZ0jESVoglok7SQtH7rx5V0AH4O7O82+CTwZqau\nUiSyYKelJCdwhs97BtBpThXm+hPSZhr3+7W8tqh+AGl3wSAIgiAIgiAH5Etm1ksknQNsbWbnVRUO\nuoxv4nCzmT3RaF26E6WdDv9kZvc2Wpe1hW+asdjMflZBxhq/+35PpIXkr4v1+Xt3XaSlpSXesOZA\n2DUfwq75EbbNh7BreSRhZp0ylOpZg7UuMoW0LmZ60W9hBd2I/25Tj8PMftpoHdYmkmaT1g2W2u0w\nCIIgCIIg6AbW6whWEATdT4pgBXkxcOBQVqx4ttFqBEEQBEHwD9JTI1hBEORAvHgJgiAIgiDoGl35\ncd8gCIKgTuJ3RPIjbJsPYdd8CLvmR9g2H8Ku9RMOVhAEQRAEQRAEQTcRa7CCIOiAJIvvhSAIgiAI\ngsqUW4MVEawgCIIgCIIgCIJuIhysIAiCtUDksOdH2DYfwq75EHbNj7BtPoRd6yccrCAIgiAIgiAI\ngm4i1mAFQdCB+B2s9ZOBgwey4oUVjVYjCIIgCDYYyq3BCgcrCIIOSDKaG61FUDfN8ftlQRAEQbA2\niU0ugiAIGsnyRivQc4n1AfkQds2HsGt+hG3zIexaPzU5WJKGSnq8TN0sScO7V63akDRE0kJJ0zNl\n69RjjKT9JU2oQW55Rv6ecjKStupGvUqOk6mvqLffF7OqyHT7/ZHts5brLWk3SXMktUm6W1K/GtpU\n7FfSqto1Xt3mh5Iel3SFpG0kzZW0QNJ+tVzbGuc6QNIMScsk3Sepf6but5LmSdq2Xt2DIAiCIAiC\n2qgngrUu5p4cA8wws8MyZeuinrXoZGWO6+2nHqr1V6/ejaCW8W8EzjWz3YE7gXO7od+uzPtkYDcz\n+zZwELDYzEaY2e9q7K8WmfOAmWa2E/Ag8J3Vjc1GAQuAI+rWPPjH2bHRCvRcmpqaGq1CjyTsmg9h\n1/wI2+ZD2LV+6nGwNpI0SdISSbdL6lssIGmMpMX+udzLekma4GVtks708mGS7pe0SNJjkrry+LEl\n8EpR2asZfU70MVslTfSyCZKulvSwpGckHevlm0ma6bq0STrKy4dKWurtlkmaLOlgb79M0l4ut6mk\n8ZmoxJGuxrvAX2uYy6uZ4/6S7pX0lKRrM+WrczwlneXRkMUZm27q7Vq9fLSXj3R9F7l+m2UHljTN\nI4GtklZK+mKNen8AvOZ99MpEaBZJOq1Y2O02x218m+t7qKTbMzIqbMp2AAAgAElEQVSrI2uSDimW\nr2K3cnzMnRiAmcBna2jzquswSNJst89iSfuuUVWX+FznSPqwF04o3FN+vsr/3g30AxZIOhe4AjjG\n++1Lx2s7VtKjXnedpEJdLXM9GpjoxxNJLyGyrCD9uwmCIAiCIAhyoB4HayfgGjPbBVgFnJqtlLQd\ncDnQBOwBjHQnZQ9gsJnt5hGEQtrZZOAnZrYH8K/A/3ZB/95Ae7bAzPZ2fXYBzgeazGxP4MyM2CAz\n2xc4kvSgC/A2cIyZ7QUcCFyZkR8G/NCjAjsBn/f25/gYABcAD5jZJ739jyRtYmaPmNm3XKcRkm4o\nNZGC3s5I4DRgZ+Cfsw/s3s9w4CSX2wc4WdLuwKeBF81sTzPbDfiNpI2AXwCnu60PAt4qGvsIMxsO\nfAV4Frgrq3c5zOwFMzvOT78KDCVFaPYgXd+szlsD3wU+5TZeAJxFcnj+j6RNXPR44BaXv6CEfFm7\nuaM4qISqTxYcZuBzwEcqzauo3y8Av3H77A4s8vLNgDk+14dI0amSXXl/RwN/N7PhZvYD4ELgF37+\ndmYO/+I2+Fcfsx0YW8dctzWzl11+BVCcDthO+ndTmVmZzzqVdLseE3bMjVgfkA9h13wIu+ZH2DYf\nwq5raGlpobm5efWnHB+qo8/nzGyuH08CTgeuytSPBGaZWSGiMRkYBVwC7CjpauDXwAylNTDbm9lU\nADN7tw498P5FeuCdVEbkQOAOM3vdx1iZqbvLy5ZqzXoUAZdJGkV6CN0+U7fczJb48ZMkpwDgcWAH\nPz4EOFLSOX7eBxgCLCsMamYLSI5INeaZ2Z98nrcC+wFTMvX7AXcWHswlTQH+DbiP5NhdBkwzs99J\n+gTwkpktdB3e8DYdBpS0DXAzcJyZ1b2+iOS4XWe+jVmRvQE+CewCPOzXbiOSg/KBpN+QbPcrUvra\nOSRHvZN8JQXMrFzq238AP5H0X8BUUnSuVuYD491RvdvM2rz8HTP7tR8vIM2/FJ12lilDIf3vU8Bw\nYL7Puy/wcifh8nMt12+BF0m2rcwBNfYeBEEQBEGwgdDU1NQhZXLcuHEl5epxsIof1EqtB+n0MGlm\nKz26cijwNWA08M1Ssh06kk4lRQUMONzfxhfqegF/BN4BptUxhwLvlNB5LLANsKeZtSttKNC3hHx7\n5rydNTYU8Fkze7oL+hRTi607NzJ72qNbhwMXS3qA5ExWs3Uv4Fag2cyWdkHfWhBpvdzYEnW3Ad8A\nXgfmm9mb7lyUk68LM/s96f5D0seoYw2SmT3kTvcRwE2SrjSzScB7GbEPWHMfvI9HhjOOYT0ImGhm\nF9TZrsDLkgaa2cse4SpOoZ0CXChpiUejg7VFrMHKjVgfkA9h13wIu+ZH2DYfwq71U0+K4FBJ2bSp\nh4rq5wGjJG0lqTcwBpjtqV69zexOUorYcI+iPC/paABJfTIpYgCY2bWe6jY861x5XbuZ7QA8Rkqn\nKsWDwGj5zmySBpSRKzgf/YFX3Lk6gJTuVixTifuAM1Y3kPaooU059lZa+9WLNL9iWz9EWr/T19dT\n/TvwkKdpvmVmtwA/IkVClgGDJI1wvfr59clyBdBmZneUUkZpDdfEUnUZ7gdOKfRdwt5zgX0lDfP6\nTd3ZAZjtup5MSmesJl8XmfVRvUj34PV+vr2kmVXaDiHdF+NJm2UUdkQsd088C+zlx0fT0cGqdB8V\n6h4AjsvoPMB1qJWpwJf8+CTg7qL6E4Hp4VwFQRAEQRDkQz0O1lPAaZKWkBbJX+/lhZSwFaQdzFqA\nVlIk4h5gMNAiqZWUgnaetzsROENSG/AwMLAL+v8eKLm1taf0XUpy8lpZs6aqXHRoMmndWBtwArC0\nhEyp9gUuJm0EslhpS/vvFQtUWoNVxDzgGlI64h/M7K7s2GbWCtxESl97BLjBU9d2Beb5fC8ELjGz\n90hO2jWSFgEzgI2LxjsbOERpk4uFkj5TVD8E+HsVnW8EngcW+/hjinT+M+nB/1a38RzSejbMrB24\nl7SG7N5q8pS5BhXWJY2RtAxYQlqjdpOXb0fHSFQpmoA2SQtJ67d+XEkH4OfA/m6DTwJvZuoqRSIL\ndlpKcgJn+LxnAJ3mVGGuVwAH+3w/RVoXmWUA0B1R1qBeYg1WbsT6gHwIu+ZD2DU/wrb5EHatH/mS\nmfUSX++0tZmdV1U46DKSrgBuNrMnGq1Ld6K00+GfzOzeRuuytpD0U9L28D+rIGM0rz2dNhiWk2+a\nYDOsz9/n/wgtLS2RwpIDYdd8CLvmR9g2H8Ku5ZGEmXXKUFrfHaxhpEjOG0W/hRUEQRGSZpPWDZ5g\nZi9WkFt/vxQ2YAYOHsiKF1ZUFwyCIAiCoFvokQ5WEATdjySL74UgCIIgCILKlHOw6lmDFQRBEHSR\nyGHPj7BtPoRd8yHsmh9h23wIu9ZPOFhBEARBEARBEATdRKQIBkHQgUgRDIIgCIIgqE6kCAZBEARB\nEARBEORMOFhBEARrgchhz4+wbT6EXfMh7JofYdt8CLvWTzhYQRAEQRAEQRAE3USswQqCoAPxO1hB\nEAQwcOBQVqx4ttFqBEGwDhO/gxUEQU0kByu+F4Ig2NAR8YwUBEElYpOLIAiChtLSaAV6MC2NVqCH\n0tJoBXoksZ4lP8K2+RB2rZ+aHCxJQyU9XqZulqTh3atWbUgaImmhpOmZsuWN0KUckvaXNKEGueUZ\n+XvKyUjaqhv1KjlOpr6i3n5fzKoi0+33R7bPWq63pIskveD3ykJJn66hTcV+Ja2qXePVbX4o6XFJ\nV0jaRtJcSQsk7VfLta1xrj+QtFTSIkm/krRFpu63kuZJ2rZe3YMgCIIgCILaqCeCtS7GyY8BZpjZ\nYZmydVHPWnSyMsf19lMP1fqrV+9GUOv4V5nZcP/8phv67cq8TwZ2M7NvAwcBi81shJn9rsb+apGZ\nAXzczPYAnga+s7qx2ShgAXBE3ZoH3UBToxXowTQ1WoEeSlOjFeiRNDU1NVqFHkvYNh/CrvVTj4O1\nkaRJkpZIul1S32IBSWMkLfbP5V7WS9IEL2uTdKaXD5N0v79pf0zSjl3Qf0vglaKyVzP6nOhjtkqa\n6GUTJF0t6WFJz0g61ss3kzTTdWmTdJSXD/WIwARJyyRNlnSwt18maS+X21TS+ExU4khX413grzXM\n5dXMcX9J90p6StK1mfLVOZ6SzvJoyOKMTTf1dq1ePtrLR7q+i1y/zbIDS5rmkZ1WSSslfbFGvT8A\nXvM+emUiNIsknVYs7Hab4za+zfU9VNLtGZnVkTVJhxTLV7FbJTrlx1bhVddhkKTZbp/FkvZdo6ou\n8bnOkfRhL5xQuKf8fJX/vRvoByyQdC5wBXCM99uXjtd2rKRHve46SYW6qnM1s5lm1u6nc4GPFIms\nIP27CYIgCIIgCPLAzKp+gKFAO/BJPx8PnOXHs4DhwHbAn4CtSI7bA8BRXjcj09cW/ncucJQf9wH6\n1qJLkV7jgG+WqdsFeAoY4Odb+t8JwG1+vDPwtB/3Bvr58daZ8qEkZ2MXP38MGO/HRwFT/PhS4At+\n3B9YBmxSpNMI4IYqc9of+LuPK1JE4livW+72HQ60AX2BzYAngN2BY4GfZfraHNgI+AMw3Mv6+fXZ\nH5haNPZwYBGweReuxdeA21mzcUrB3oX7Y2tgdsEmwLnAd93uz2bKrwXGlJPP9llCh2nAoBLlF7nt\nFgE3Av3rmNdZwHf8WMBmftwOHO7HVwDnZ+6vYzPt/1bm+CTgvzPnhWv7L8BUoLeX/xQ4oda5FslM\nLdyTmbL/Av6zSjuDizKfWQYWn3/4E3YM265vnw3drlgezJo1K5d+g7BtXoRd1zBr1iy76KKLVn/8\ne4Liz4eonefMbK4fTwJOB67K1I8EZplZIaIxGRgFXALsKOlq4NfADEn9gO3NbCpJs3fr0APvXySn\nYlIZkQOBO8zsdR9jZabuLi9bmlmPIuAySaNID8/bZ+qWm9kSP34SmOnHjwM7+PEhwJGSzvHzPsAQ\nkqOFj7cA+GoN05tnZn/yed4K7AdMydTvB9xpZm+7zBTg34D7gB9JugyYZma/k/QJ4CUzW+g6vOFt\nOgwoaRvgZuA4M6t7fREp5e06MzMfZ2VR/SdJTu/Dfu02AuaY2QeSfkOy3a9I6WvnkHJTOslXUsDM\nyqW+XQt8z8xM0iWk+/YrNc5rPjBe0kbA3WbW5uXvmNmv/XgBaf6lqDVyZv73UySHdL7Puy/wcifh\n8nNNg0oXAO+Z2S1FVS9SU95Pc3WRIAiCIAiCDYimpqYOKZPjxo0rKVePg2VVzqHEw6SZrZS0O3Ao\nKcoxGvhmKdkOHUmnktasGClSsCJT1wv4I/AO6U1+vbxTQuexwDbAnmbWrrShQN8S8u2Z83bW2FDA\nZ83s6S7oU0wttu7cyOxppc0fDgculvQAyZmsZutewK1As5kt7YK+tSBSJHNsibrbgG8ArwPzzexN\ndy7KydeFmWVT634OlN3co0Tbh9zpPgK4SdKVZjYJeC8j9gFr7oP38dTbjGNYDwImmtkFdbZb04H0\nJdI9cGCJ6inAhZKWmNkuXR0j6ApNjVagB9PUaAV6KE2NVqBHEutZ8iNsmw9h1/qpZw3WUEl7+/EX\ngIeK6ucBoyRtJak3Kc1rtqStSelOd5JSwoZ7FOV5SUcDSOojaZNsZ2Z2rZntaWlTghVFde1mtgMp\nXe/4Mvo+CIyW78wmaUAZuYLz0R94xZ2rA0gpesUylbgPOGN1A2mPGtqUY2+ltV+9SPMrtvVDpPU7\nfX091b8DD0naDnjLoxY/IkVClgGDJI1wvfr59clyBdBmZneUUsbXcE2sovP9wCmFvkvYey6wr6Rh\nXr+ppI953WzX9WTgFzXI14WkQZnTY0kplUjaXtLM0q1Wtx1Cui/Gk9ILCzsilrsnngX28uOj6ehg\nVbqPCnUPAMdl1nQNcB1qQmmHxHNI6bfvlBA5EZgezlUQBEEQBEE+1ONgPQWcJmkJaZH89V5eSAlb\nAZxH+uGMVlIk4h5gMNAiqZWUgnaetzsROENSG/AwMLAL+v+etG6lE57SdynJyWsFrszqmxX1v5OB\nka7PCcDSEjKl2he4mLQRyGKlLe2/VywgaYSkGyrMp8A84BpSOuIfzOyu7Nhm1grcREpfe4S0rqsN\n2BWY5/O9ELjEzN4jOWnXSFpEWtO1cdF4ZwOHKG1ysVDSZ4rqh5DWhVXiRuB5YLGPP6ZI5z8DXwJu\ndRvPAXbyunbgXuDT/reiPGWugdJmHYNKVP3Ar8si0tqzb3n5dnSMRJWiCWiTtBD4HPDjSjqQImT7\nuw0+CbyZqasUiSzYaSnpRcQMn/cMoNOcKsz1J6R1dvf7tby2qH4AaXfBYK3T0mgFejAtjVagh9LS\naAV6JPGbQvkRts2HsGv9FDYkWC/x9U5bm9l5VYWDLiPpCuBmM3ui0bp0J0o7Hf7JzO5ttC5rC0k/\nJW0P/7MKMlZjVmpQFy1EylVetBC2zYMWNmy7ijyekVpaWiLlKifCtvkQdi2PJMysU4bS+u5gDSNF\nct6wjr+FFQRBEZJmk9YNnmBmL1aQW3+/FIIgCLqJgQOHsmLFs41WIwiCdZge6WAFQdD9SLL4XgiC\nIAiCIKhMOQernjVYQRAEQReJHPb8CNvmQ9g1H8Ku+RG2zYewa/2EgxUEQRAEQRAEQdBNRIpgEAQd\niBTBIAiCIAiC6kSKYBAEQRAEQRAEQc6EgxUEQbAWiBz2/Ajb5kPYNR/CrvkRts2HsGv9hIMVBEEQ\nBEEQBEHQTcQarCAIOhC/gxUEQbDuMXDwQFa8sKLRagRBkCF+BysIgpqQZDQ3WosgCIKgA80Qz2xB\nsG4Rm1wEQRA0kuWNVqAHE7bNh7BrPoRdcyPWCuVD2LV+anKwJA2V9HiZulmShnevWrUhaYikhZKm\nZ8rWqa8uSftLmlCD3PKM/D3lZCRt1Y16lRwnU19Rb78vZlWR6fb7I9tnLddb0kWSXvB7ZaGkT9fQ\npmK/klbVrvHqNj+U9LikKyRtI2mupAWS9qvl2tY41wGSZkhaJuk+Sf0zdb+VNE/StvXqHgRBEARB\nENRGPRGsdTEufQwww8wOy5Sti3rWopOVOa63n3qo1l+9ejeCWse/ysyG++c33dBvV+Z9MrCbmX0b\nOAhYbGYjzOx3NfZXi8x5wEwz2wl4EPjO6sZmo4AFwBF1ax784+zYaAV6MGHbfAi75kPYNTeampoa\nrUKPJOxaP/U4WBtJmiRpiaTbJfUtFpA0RtJi/1zuZb0kTfCyNklnevkwSfdLWiTpMUld+crZEnil\nqOzVjD4n+pitkiZ62QRJV0t6WNIzko718s0kzXRd2iQd5eVDJS31dsskTZZ0sLdfJmkvl9tU0vhM\nVOJIV+Nd4K81zOXVzHF/SfdKekrStZny1Tmeks7yaMjijE039XatXj7ay0e6votcv82yA0ua5pGd\nVkkrJX2xRr0/AF7zPnplIjSLJJ1WLOx2m+M2vs31PVTS7RmZ1ZE1SYcUy1exWyU65cdW4VXXYZCk\n2W6fxZL2XaOqLvG5zpH0YS+cULin/HyV/70b6AcskHQucAVwjPfbl47XdqykR73uOkmFulrmejQw\n0Y8nkl5CZFlB+ncTBEEQBEEQ5MCH6pDdCfiymc2VNB44FbiqUClpO+ByYE9gJXC/OykvAIPNbDeX\n28KbTAa+b2ZTJfWha+vBegPt2QIz29vH2QU4H9jHzF6XlH2oHGRm+0raGZgKTAHeBo4xszckbQ3M\n9TqAYcBnzWyJpMeAz3v7o3yMY4ELgAfM7CueljVP0kwzewR4xHUaAZxiZl8tnkhBb2cksDPwHHCf\npGPNbEqhUik97iSX6w08KqnF9XzRzD7jcptL2gj4BTDazBZK6ge8VTT2EZl+/x9wl5mtKuhdDjN7\nATjOT78KDCVFaKzI3rhNvwt8yszecifjLOAy4GeSNjGzt4DjgVtc/oIS8peUs5ukacBXzKzUNkvf\ncMfxMeBsM6voPGb6/QLwGzO7zB2dgpO3GTDHzL4r6QpSdOr7pbry/o6W9DczK6Q2vgyMMLMz/Lww\nh39xG/yrmX0g6afAWGBSjXPd1sxe9jFXqHM6YDvpnqlMNvFzB+KNa3ewnLBjXoRt8yHsmg9h19xo\naWmJaEsOhF3X0NLSUtOatHocrOfMbK4fTwJOJ+NgkR72Z5lZIaIxGRhFeiDeUdLVwK+BGf6Qv72Z\nTQUws3fr0APvX8DurkspDgTuMLPXfYyVmbq7vGxp5gFUwGWSRpEeQrfP1C03syV+/CQw048fJz1+\nAhwCHCnpHD/vAwwBlhUGNbMFJEekGvPM7E8+z1uB/UhOYIH9gDvN7G2XmQL8G3Af8CNJlwHTzOx3\nkj4BvGRmC12HN7xNhwElbQPcDBznzlW9HARcZ77FUZG9AT4J7AI87NduI5KD8oGk35Bs9ytS+to5\nQFMp+UoKFBzFElwLfM8dv0tI9+1XapzXfGC8O6p3m1mbl79jZr/24wWk+Zei1shZIf3vU8BwYL7P\nuy/wcifh8nMt12+BF0m2rcwBNfYeBEEQBEGwgdDU1NTB2Rw3blxJuXocrOIHtVLrQTo9TJrZSkm7\nA4cCXwNGA98sJduhI+lUUlTAgMOzb+ol9QL+CLwDTKtjDgXeKaHzWGAbYE8za1faUKBvCfn2zHk7\na2woUpTr6S7oU0wttu7cyOxpj0IdDlws6QGSM1nN1r2AW4FmM1vaBX1rQaT1cmNL1N0GfAN4HZhv\nZm+6c1FOvi7MLJta93Og7OYeJdo+5E73EcBNkq40s0nAexmxD1hzH7yPR2MzjmE9CJhoZhfU2a7A\ny5IGmtnLkgbROYV2CnChpCVmtksXxwi6Qryxzo+wbT6EXfMh7JobEWXJh7Br/dSTljdUUjZt6qGi\n+nnAKElbSeoNjAFme6pXbzO7k5QiNtyjKM9LOhpAUh9Jm2Q7M7NrzWxP35RgRVFdu5ntQEr3Or6M\nvg8Co+U7s0kaUEau4Hz0B15x5+oAUrpbsUwl7gPOWN1A2qOGNuXYW2ntVy/S/Ipt/RBp/U5fpfVU\n/w485Gmab5nZLcCPSJGQZcAgT09EUj+/PlmuANrM7I5Syiit4ZpYqi7D/cAphb5L2HsusK+kYV6/\nqaSPed1s1/VkUjpjNfm6cEejwLHAE16+vaSZpVutbjuEdF+MB250PaH8PfEssJcfH01HB6vSfVSo\newA4TmvWdA1wHWplKvAlPz4JuLuo/kRgejhXQRAEQRAE+VCPg/UUcJqkJaRF8td7eSElbAVpB7MW\noJUUibgHGAy0SGolpaCd5+1OBM6Q1AY8DAzsgv6/B0pube0pfZeSnLxW4MqsvllR/zsZGOn6nAAs\nLSFTqn2Bi0kbgSxW2tL+e8UCkkZIuqHCfArMA64hpSP+wczuyo5tZq3ATaT0tUeAGzx1bVfS2q9W\n4ELgEjN7j+SkXSNpETAD2LhovLOBQ5Q2uVgo6TNF9UOAv1fR+UbgeWCxjz+mSOc/kx78b3UbzyGt\n68PM2oF7gU/734rylLkGSpt1DCpR9QO/LouA/YFvefl2dIxElaIJaJO0EPgc8ONKOpAiZPu7DT4J\nvJmpqxSJLNhpKelFxAyf9wyg05wqzPUK4GBJy0jphpcX1Q8AuiPKGtTLOvUDEj2MsG0+hF3zIeya\nG/F7TfkQdq0frc+/Cu7rnbY2s/OqCgddxjdxuNnMnmi0Lt2J0k6HfzKzexuty9rCN81YbGY/qyBj\nNK89nTYYYmF7foRt8yHsmg9dtWszrM/PbGuD2IwhH8Ku5ZGEmXXKUFrfHaxhpEjOG0W/hRUEQRGS\nZpPWDZ5gZi9WkFt/vxSCIAh6KAMHD2TFC6U2yQ2CoFH0SAcrCILuR5LF90IQBEEQBEFlyjlYXfnt\nqSAIgqBOIoc9P8K2+RB2zYewa36EbfMh7Fo/4WAFQRAEQRAEQRB0E5EiGARBByJFMAiCIAiCoDqR\nIhgE/5+9O4+zo6rzPv75JoBsQ4CoCSAJTEZ5RIUQQFEwacSdVQw7A8OAy0tHVFyGEZUEQUAFjSAq\nYyagiQjMRHYkLOkMBENCCAmQEEHC6hPgUeIERxHTv+ePc25SffuuTVe603zfr9d9dd1Tp6p+9atK\n554+59Q1MzMzMyuZG1hmZuuBx7CXx7kth/NaDue1PM5tOZzX9rmBZWZmZmZm1kc8B8vMuvH3YJmZ\nlW/EiNGsXPl4f4dhZq+AvwfLzFqSGlj+vWBmVi7hz2BmGzY/5MLMrF919ncAg1hnfwcwSHX2dwCD\nVGd/BzBoea5QOZzX9rXUwJI0WtIDddbNljSub8NqjaRRku6TdHOhbEV/xFKPpAmSprVQb0Wh/vX1\n6kjatg/jqnmcwvqGcef7YnaTOn1+fxT32cr1ljRR0oOS1rQaS7P9SlrdWrTdtvm2pAcknS/ptZLm\nSVooab9Wrm2L5/otScsk3S/pvyRtVVj335LmS3p9u7GbmZmZWWva6cEaiP3YhwGzIuJDhbKBGGcr\nMUWd5Xb3045m+2s37v7QyvEfAD4CzOnD/fbmvD8G7BYR/wq8F1gSEXtGxF0t7q+VOrOAt0TEWOAR\n4N/WbhwxHlgIHNh25NYHOvo7gEGso78DGKQ6+juAQaqjvwMYtDo6Ovo7hEHJeW1fOw2sjSVNl7RU\n0lWSNq2uIOkYSUvy67xcNkTStFy2WNJnc/kYSbfmv7TfK2nnXsS/NfBcVdnzhXhOyMdcJOnyXDZN\n0hRJcyU9KunwXL6FpNtyLIslHZLLR+cegWmSlkuaIel9efvlkvbK9TaXNLXQK3FwDuOvwB9bOJfn\nC8vDJN0g6WFJlxTK147xlHRa7g1ZUsjp5nm7Rbn8iFy+d473/hzfFsUDS7ox9wQukrRK0j+2GPca\n4A95H0MKPTT3S/p0deWct7tzjq/M8X5A0lWFOmt71iS9v7p+k7zVFBHLI+KRYv5a8HyOYaSkOTk/\nSyTtuy5UnZ3P9W5Jr8uF0yr3VH6/Ov+8FtgSWCjpy8D5wGF5v5vS/doeJ+mevO6HkirrWjnX2yKi\nK7+dB7yhqspK0r8bMzMzMyvBRm3U3QU4KSLmSZoKfAq4sLJS0nbAecAewCrg1txIeRrYISJ2y/Uq\nQ5ZmAN+MiOskbULv5oMNBbqKBRHxjnycXYGvAO+MiBckFT9UjoyIfSW9GbgOmAn8BTgsIl6UNJz0\n4fS6XH8M8NGIWCrpXuDovP0h+RiHA2cAt0fEyZKGAfMl3RYRvwZ+nWPaE/hERHy8+kQqcWd7A28G\nngRukXR4RMysrFQa5nZirjcUuEdSZ47zmYg4KNf7O0kbA78AjoiI+yRtCfy56tgHFvb7H8A1EbG6\nEnc9EfE0MDG//TgwmtRDE1X5Juf0q8ABEfHn3Mg4DTgX+LGkzSLiz8BRwM9z/TNq1D+7Xt4k3Qic\nHBErG8XdisJ+jwV+FRHn5oZOpZG3BXB3RHxV0vmk3qlv1tpV3t+hkv4nIipDG58F9oyIU/P7yjn8\nn5yDd0XEGkk/AI4DpvfiXP+ZdO2Lukj3TBOTCssd+C+ufaET57EsnTi3ZejEeS1DJ85rOTo7O93b\nUgLndZ3Ozs6W5qS108B6MiLm5eXpwGcoNLBIH/ZnR0SlR2MGMJ70gXhnSVOAm4BZ+UP+9hFxHUBE\n/LWNOMj7F7B7jqWW9wBXR8QL+RirCuuuyWXLtG4+ioBzJY0nfQjdvrBuRUQszcsPAbfl5QeAnfLy\n+4GDJX0pv98EGAUsrxw0IhaSGiLNzI+IJ/J5XgHsR2oEVuwH/DIi/pLrzATeDdwCfEfSucCNEXGX\npLcCv4uI+3IML+Ztuh1Q0muBnwETc+OqXe8Ffhj5kUhV+QbYB9gVmJuv3cakBsoaSb8i5e6/SMPX\nvkT636dH/UYBVBqKfWwBMDU3VK+NiMW5/KWIuCkvLySdfy2t9ppVhv8dAIwDFuTz3hR4tkflJucq\n6Qzg5Yj4edWqZ2jpf/ZJzauYmZmZvYp0dHR0a2xOnjy5ZuJYhU4AACAASURBVL12GljV8z9qzQfp\n8WEyIlZJ2h34APBJ4Ajgc7XqdtuR9ClSr0AAHy7+pV7SEOAx4CXgxjbOoeKlGjEfB7wW2CMiupQe\nKLBpjfpdhfddrMuhSL1cj/Qinmqt5LrnRhGP5F6oDwPfkHQ7qTHZLNdDgCuASRGxrBfxtkKk+XLH\n1Vh3JfAvwAvAgoj4U25c1Ku/3kTEnbnRfSBwmaQLImI68HKh2hrW3Qd/I/fGFhqG7RBweUSc0duY\nJf0T6R54T43VM4GvS1oaEbv29hjWGx39HcAg1tHfAQxSHf0dwCDV0d8BDFruZSmH89q+dobljZZU\nHDZ1Z9X6+cB4SdtKGgocA8zJQ72GRsQvSUPExuVelKckHQogaRNJmxV3FhGXRMQeETGuehhURHRF\nxE7AvaThVLXcARyh/GQ2SdvUqVdpfAwDnsuNq/1Jw92q6zRyC3Dq2g2ksS1sU887lOZ+DSGdX3Wu\n7yTN39lUaT7VR4A78zDNP+dei++QekKWAyPz8EQkbZmvT9H5wOKIuLpWMEpzuC5vEvOtwCcq+66R\n73nAvpLG5PWbS3pjXjcnx/ox1g1pa1T/lSjOddpe0m0NK0ujSPfFVOAnOc5u+6nyOLBXXj6U7g2s\nRvdRZd3twMTCnK5tcgwtkfRBUg/gIRHxUo0qJwA3u3FlZmZmVo52GlgPA5+WtJQ0Sf5HubwyJGwl\ncDppcPEiUk/E9cAOQKekRaQhaKfn7U4ATpW0GJgLjOhF/L8Baj7aOg/pO4fUyFsEXFCMt1g1/5wB\n7J3jOR5YVqNOre0rvkF6EMgSpUfan1VdQdKeki5tcD4V84GLScMRfxsR1xSPHRGLgMtIw9d+DVya\nh669jTT3axHwdeDsiHiZ1Ei7WNL9pKfMvabqeF8A3q/0kIv7JB1UtX4U8L9NYv4J8BSwJB//mKqY\n/x/wT8AVOcd3k+b1kR/KcAPwwfyzYX3qXAOlh3WMrFF+mKSnSMMUb9C6x/pvR/eeqFo6gMWS7gOO\nBL7XKAbg34EJOQf7AH8qrGvUE1nJ0zLSHyJm5fOeBdQ6p5rnClxEepjGrflaXlK1fhvS0wVtvevs\n7wAGsc7+DmCQ6uzvAAapzv4OYNDy9zWVw3ltnzbkbxHP852GR8TpTStbr+WHOPwsIh7s71j6ktKT\nDp+IiBv6O5b1JT80Y0lE/LhBnej/p+8PRp14aFBZOnFuy9CJ81qGTlJexYb8GWwg8sMYyuG81ieJ\niOgxQmlDb2CNIfXkvFj1XVhmVkXSHNK8weMj4pkG9dzAMjMrnRtYZhu6QdnAMrO+lxpYZmZWphEj\nRrNy5eP9HYaZvQL1Gli9+e4pMxvkIsKvPn7Nnj2732MYrC/n1nndkF6VvLpx1fc8V6gczmv73MAy\nMzMzMzPrIx4iaGbdSAr/XjAzMzNrzEMEzczMzMzMSuYGlpnZeuAx7OVxbsvhvJbDeS2Pc1sO57V9\nbmCZmZmZmZn1Ec/BMrNuPAfLzMzMrLl6c7A26o9gzGxgk3r8rjAzs0FixA4jWPn0yv4Ow2zQcg+W\nmXUjKZjU31EMQiuAnfs7iEHKuS2H81qOgZDXSen7Dgebzs5OOjo6+juMQcd5rc9PETQzMzMzMytZ\nSw0sSaMlPVBn3WxJ4/o2rNZIGiXpPkk3F8pW9Ecs9UiaIGlaC/VWFOpfX6+OpG37MK6axymsbxh3\nvi9mN6nT5/dHcZ+tXG9JEyU9KGlNq7E026+k1a1F222bb0t6QNL5kl4raZ6khZL2a+Xatniu20ia\nJWm5pFskDSus+29J8yW9vt3YrQ/091+sBzPnthzOazmc19K4l6Uczmv72unBGoh9yYcBsyLiQ4Wy\ngRhnKzFFneV299OOZvtrN+7+0MrxHwA+Aszpw/325rw/BuwWEf8KvBdYEhF7RsRdLe6vlTqnA7dF\nxC7AHcC/rd04YjywEDiw7cjNzMzMrCXtNLA2ljRd0lJJV0natLqCpGMkLcmv83LZEEnTctliSZ/N\n5WMk3Srpfkn3SurN33S2Bp6rKnu+EM8J+ZiLJF2ey6ZJmiJprqRHJR2ey7eQdFuOZbGkQ3L5aEnL\n8nbLJc2Q9L68/XJJe+V6m0uaWuiVODiH8Vfgjy2cy/OF5WGSbpD0sKRLCuVrx3hKOi33hiwp5HTz\nvN2iXH5ELt87x3t/jm+L4oEl3Zh7AhdJWiXpH1uMew3wh7yPIYUemvslfbq6cs7b3TnHV+Z4PyDp\nqkKdtT1rkt5fXb9J3mqKiOUR8Ugxfy14PscwUtKcnJ8lkvZdF6rOzud6t6TX5cJplXsqv1+df14L\nbAkslPRl4HzgsLzfTel+bY+TdE9e90Np7RMnmp4rcChweV6+nPRHiKKVpH83tr4NqL71Qca5LYfz\nWg7ntTT+vqZyOK/ta+cpgrsAJ0XEPElTgU8BF1ZWStoOOA/YA1gF3JobKU8DO0TEbrneVnmTGcA3\nI+I6SZvQu/lgQ4GuYkFEvCMfZ1fgK8A7I+IFScUPlSMjYl9JbwauA2YCfwEOi4gXJQ0H5uV1AGOA\nj0bEUkn3Akfn7Q/JxzgcOAO4PSJOzsOy5ku6LSJ+Dfw6x7Qn8ImI+Hj1iVTizvYG3gw8Cdwi6fCI\nmFlZqTTM7cRcbyhwj6TOHOczEXFQrvd3kjYGfgEcERH3SdoS+HPVsQ8s7Pc/gGsiYnUl7noi4mlg\nYn77cWA0qYcmqvJNzulXgQMi4s+5kXEacC7wY0mbRcSfgaOAn+f6Z9Sof3a9vEm6ETg5Il7xo5EK\n+z0W+FVEnJsbOpVG3hbA3RHxVUnnk3qnvllrV3l/h0r6n4ioDG18FtgzIk7N7yvn8H9yDt4VEWsk\n/QA4Dpje4rm+PiKezcdcqZ7DAbtI90xjxYGfO+EhLWZmZvaq19nZ2VKDs50G1pMRMS8vTwc+Q6GB\nRfqwPzsiKj0aM4DxpA/EO0uaAtwEzMof8rePiOsAIuKvbcRB3r+A3XMstbwHuDoiXsjHWFVYd00u\nW1b4ACrgXEnjSR9Cty+sWxERS/PyQ8BtefkB0sdPgPcDB0v6Un6/CTAKWF45aEQsJDVEmpkfEU/k\n87wC2I/UCKzYD/hlRPwl15kJvBu4BfiOpHOBGyPiLklvBX4XEfflGF7M23Q7oKTXAj8DJubGVbve\nC/yw8gVKVfkG2AfYFZibr93GpAbKGkm/IuXuv0jD174EdNSq3yiASkOxjy0ApuaG6rURsTiXvxQR\nN+XlhaTzr6XVXrPK8L8DgHHAgnzemwLP9qjc+rlWDyt8hpTbxvZvce/WOjdSy+PclsN5LYfzWhrP\nFSqH87pOR0dHt3xMnjy5Zr12GljVH9RqzQfp8WEyIlZJ2h34APBJ4Ajgc7XqdtuR9ClSr0AAHy7+\npV7SEOAx4CXgxjbOoeKlGjEfB7wW2CMiupQeKLBpjfpdhfddrMuhSL1cj/Qinmqt5LrnRhGP5F6o\nDwPfkHQ7qTHZLNdDgCuASRGxrBfxtkKk+XLH1Vh3JfAvwAvAgoj4U25c1Ku/3kTEnbnRfSBwmaQL\nImI68HKh2hrW3Qd/I/fGFhqG7RBweUSc0cuQn5U0IiKelTSSnkNoZwJfl7Q0Inbt5THMzMzMrI52\nhuWNllQcNnVn1fr5wHhJ20oaChwDzMlDvYZGxC9JQ8TG5V6UpyQdCiBpE0mbFXcWEZdExB4RMa56\nGFREdEXETsC9pOFUtdwBHKH8ZDZJ29SpV2l8DAOey42r/UnD3arrNHILcOraDaSxLWxTzzuU5n4N\nIZ1fda7vJM3f2VRpPtVHgDvzMM0/R8TPge+QekKWAyPz8EQkbZmvT9H5wOKIuLpWMEpzuC6vta7g\nVuATlX3XyPc8YF9JY/L6zSW9Ma+bk2P9GGk4Y7P6r0RxrtP2km5rWFkaRbovpgI/yXF220+Vx4G9\n8vKhdG9gNbqPKutuByYW5nRtk2No1XXAP+XlE4Frq9afANzsxlU/8LyL8ji35XBey+G8lsZzhcrh\nvLavnQbWw8CnJS0lTZL/US6vDAlbSXqCWSewiNQTcT2wA9ApaRFpCNrpebsTgFMlLQbmAiN6Ef9v\ngJqPts5D+s4hNfIWARcU4y1WzT9nAHvneI4HltWoU2v7im+QHgSyROmR9mdVV5C0p6RLG5xPxXzg\nYtJwxN9GxDXFY0fEIuAy0vC1XwOX5qFrbyPN/VoEfB04OyJeJjXSLpZ0PzALeE3V8b4AvF/pIRf3\nSTqoav0o4H+bxPwT4ClgST7+MVUx/z/SB/8rco7vJs3rIyK6gBuAD+afDetT5xooPaxjZI3ywyQ9\nRRqmeIPWPdZ/O7r3RNXSASyWdB9wJPC9RjEA/w5MyDnYB/hTYV2jnshKnpaR/hAxK5/3LKDWOdU8\nV1Jj+X2SlpOGG55XtX4boC96Wc3MzMysBm3I3+Sd5zsNj4jTm1a2XssPcfhZRDzY37H0JaUnHT4R\nETf0dyzrS35oxpKI+HGDOsGk9ReTmZmtZ5NgQ/78ZzZQSCIieoxQ2tAbWGNIPTkvVn0XlplVkTSH\nNG/w+Ih4pkG9DfeXgpmZNTVihxGsfPoVP3DX7FVvUDawzKzvSQr/Xuh7nZ2dfhJTSZzbcjiv5XBe\ny+PclsN5ra9eA6s33z1lZmZmZmZmNbgHy8y6cQ+WmZmZWXPuwTIzMzMzMyuZG1hmZuuBv0ekPM5t\nOZzXcjiv5XFuy+G8ts8NLDMzMzMzsz7iOVhm1o3nYJmZmZk1V28O1kb9EYyZDWxSj98V9gqNGDGa\nlSsf7+8wzMzMrGQeImhmNYRfffx69tkn2rsE1jLPDyiH81oO57U8zm05nNf2NWxgSRot6YE662ZL\nGldOWI1JGiXpPkk3F8pW9Ecs9UiaIGlaC/UGVNxFrcTWrI6kMyWd1ndRdd+npGmSxjepv7WkmZIW\nS5onadcWjjFb0qgm69u6/yVNlLRU0u35/RWS7pf02XwehzfZvpVzPTaf52JJd0narbDuAkkPSZrQ\nTtxmZmZm1rpWerAG4mSMw4BZEfGhQtlAjLOVmAZi3BUbevwVXwEWRcTuwInA9/spjpOBUyLiAEkj\ngb0iYmxETOnDYzwGjM/nejZwaWVFRHwBOAv45z48nlm/6+jo6O8QBiXntRzOa3mc23I4r+1rpYG1\nsaTp+S/vV0natLqCpGMkLcmv83LZkPwX9yX5r+mfzeVjJN2a/3J/r6SdexH31sBzVWXPF+I5IR9z\nkaTLc9k0SVMkzZX0aKW3QNIWkm7LsSyWdEguHy1pWd5uuaQZkt6Xt18uaa9cb3NJU3PPyEJJB+cw\n/gr8sYVzeT7vZ6SkOblnbomkfXP5akln53zdLel1ufygwjFnFcrPlPTTXHe5pFNy+YS8/xskPSzp\nEiUnSfpuIXenSLqgOqfN4q+X9yJJfy/pZkkLcixvkrSVpMcLdTaX9KSkobXq1zj+KlKuG9kVuAMg\nIpYDO1Xy1cDvgTX17uPsSEn35HxWrteJki4qnM/1ksZL+hqwHzBV0reAW4Ad8vXerypP4yR15vO+\nWdKIVs81IuZFROW+mwfsUFVlJenfj5mZmZmVISLqvoDRQBewT34/FTgtL88GxgHbAU8A25IabLcD\nh+R1swr72ir/nAcckpc3ATZtFEOduCYDn6uzblfgYWCb/H7r/HMacGVefjPwSF4eCmyZl4cXykeT\nPszumt/fC0zNy4cAM/PyOcCxeXkYsBzYrCqmPYFLm5zTacC/5WUBW+TlLuDDefl84CuVYxW2PRn4\ndl4+E1iUczsceBIYCUwA/jefl4BZwOHAFsCjwNC8/VzgLb24JvXyfmbhnrkNGJOX3w7cnpd/CUzI\ny0dWctWg/tp91rgvDqpRfg5wQWE/fwX2aPG86t3Hsws5/xBwa14+Efh+of71pB6lyjZ7FO6vJYV6\n0/L12Chfg+GFfExt9Vyr6nyx+r4D3g3c0GS7gPCrz1+ElWP27Nn9HcKg5LyWw3ktj3NbDue1vvx/\ne4/PUq08RfDJiJiXl6cDnwEuLKzfG5gdEX8AkDQDGE8anrSzpCnATcAsSVsC20fEdaSImvU89CBJ\nwO45llreA1wdES/kY6wqrLsmly2T9PrKLoFzlea2dAHbF9atiIilefkh0gd+gAeAnfLy+4GDJX0p\nv98EGEVqaJGPtxD4eJNTW0Dq3dgYuDYiFufylyLipry8EHhvXt5R0lWkBu7GwIrCvq7Nuf29pDtI\njYo/AvMj4glI83+A/SJiptKcoIMkPQxsFBEPNYm1lkZ5R9IWwLuAq/M1JMcNcBVwFDAHOBr4QZP6\nNUXEmXVWnQdMkXQf6dotAta0eF6PUXUfF9bNzD8XkhpMrWj2eL5dgLcCt+bzHgL8rrpSg3NNB5H2\nB04i9ZoVPQO8SdJrIuKl+nuYVFjuyC8zMzOzV6/Ozs6WHvrRSgMrmryHGh8aI2KVpN2BDwCfBI4A\nPlerbrcdSZ8CPpaP8+GIWFlYN4T0gfcl4MYWYq9W/EBZieM44LWknoUupYc2bFqjflfhfRfrcifg\noxHxSC/iWSsi7syNvAOByyRdEBHTgZcL1dYUjnsR8J2IuFHpoQXFD9zFayRqX7NivamkeUoPk3pS\nyjAEeCEiaj0Y4jrgHEnbkHqM7gC2bFC/LRGxmsK8o3yNH2tx21r38Sl5deV+KF6Xv9F96G2PIbVN\nCHgwIvZtc7t1O0gPtrgU+GClwVsREY9JWgY8IemA+o3pSb09vNl65/kB5XBey+G8lse5LYfzuk5H\nR0e3fEyePLlmvVbmYI2W9I68fCxwZ9X6+cB4SdtKGgocA8yRNJw07OyXwFeBcRHxIvCUpEMBJG0i\nabPiziLikojYIyLGFRtXeV1XROxEGq53VJ147wCOkLRtPsY2depVGljDgOdy42p/uvdEtPJlQLcA\np67dQBrbwjY9g0lPrHsuIqYCPyE1NBrFsBXrejZOrFp3aM7tcNLQwAW5fG+luWVDSPm7CyAi5gM7\nkq7dFXXiW9bkFBrmPTdyVkiaWNjnbnndn0jXdApp+Fo0qt8uScNyzyCSPgbMyfciSvPvtmuwbY/7\nuF7V/PNxYGye37Yjqfew7u5rlC0HXidpn3z8jdTCUw8L8Y4C/gv4x4j4bY31uwE7k3qSe9NTaWZm\nZmYNtNLAehj4tKSlpMnxP8rlabJGagSdDnSShl4tiIjrSZPrOyUtAn6W6wCcAJwqaTFprkllAn87\nfkOa89VDHtJ3DqmRtwioPLChXk/cDFLDYzFwPLCsRp1a21d8g/QgkCVKj7Q/q7qCpD0lXdpz0246\ngMV5GNuRwPeaHHcy8J+SFtDzYRRLSNfjbuCsQkP1XuBi0nDH3+ZGQ8VVwNxY94CEYvzDm8TeKO9F\nxwMnKz2w40HSXLaKK0m9ib8olB3XoH4PkiZLOqjGqjcDD+ZG4geAygNXBIwB/tBgt/Xu45r3U0TM\nJTWyHiJdw4XVdeq8r2z/MjAROF/S/aR/U+9s41y/Rvq3cYnSw0bmV63fBng8IrpqbGu2QfJ3tJTD\neS2H81oe57Yczmv7lOZnbVjyfKfhEXF608qvMpLOBFZHxIVV5ROAL0REzUaKpOuBCyNido11BwI7\nR8TFZcTcXyS9BTgpIr7Y37GsL5KOBD4SEcc0qBP12/XWe2JD/H27Iejs7PQQlhI4r+VwXsvj3JbD\nea1PEhHRY0TShtrAGgNcBrwY3b8L61Wv3QaWpGGkYZ6LIuLo9ReprW9Kj99/N+lplbc3qOcGVinc\nwDIzMxtMBlUDy8zKkxpY1tdGjBjNypWP93cYZmZm1kfqNbBamYNlZq8ytb7Twa9X9vrFLy7r78s6\naHl+QDmc13I4r+VxbsvhvLbPDSwzMzMzM7M+4iGCZtaNpPDvBTMzM7PGPETQzMzMzMysZG5gmZmt\nBx7DXh7nthzOazmc1/I4t+VwXtvnBpaZmZmZmVkf8RwsM+vGc7DMzMzMmqs3B2uj/gjGzAY2qcfv\nChugRuwwgpVPr+zvMMzMzCxzD5aZdSMpmNTfUQxCK4CdS9jvpPS9Za9mnZ2ddHR09HcYg47zWg7n\ntTzObTmc1/p69RRBSaMlPVBn3WxJ4/oqwHZIGiXpPkk3F8pW9Ecs9UiaIGlaC/UGVNxFrcTWrI6k\nMyWd1ndRdd+npGmSxjepv7WkmZIWS5onadcWjjFb0qgm69u6/yVNlLRU0u35/RWS7pf02XwehzfZ\nvum55nrfl/RI3vfYQvkFkh6SNKGduM3MzMysda085GIg/mn0MGBWRHyoUDYQ42wlpoEYd8WGHn/F\nV4BFEbE7cCLw/X6K42TglIg4QNJIYK+IGBsRU/rqAJI+BIyJiDcCnwB+VFkXEV8AzgL+ua+OZ20o\no/fKAPyX1ZI4r+VwXsvj3JbDeW1fKw2sjSVNz395v0rSptUVJB0jaUl+nZfLhuS/uC/JPQefzeVj\nJN2a/7p+r6TefOzYGniuquz5Qjwn5GMuknR5LpsmaYqkuZIerfQWSNpC0m05lsWSDsnloyUty9st\nlzRD0vvy9ssl7ZXrbS5pau4ZWSjp4BzGX4E/tnAuz+f9jJQ0J/fMLZG0by5fLensnK+7Jb0ulx9U\nOOasQvmZkn6a6y6XdEoun5D3f4OkhyVdouQkSd8t5O4USRdU57RZ/PXyXiTp7yXdLGlBjuVNkraS\n9HihzuaSnpQ0tFb9GsdfRcp1I7sCdwBExHJgp0q+Gvg9sKbefZwdKemenM/K9TpR0kWF87le0nhJ\nXwP2A6ZK+hZwC7BDvt77VeVpnKTOfN43SxrRxrkeCvw0n+s9wLDC9gArSf9+zMzMzKwErTSwdgEu\njohdgdXAp4orJW0HnAd0AGOBvXMjZSywQ0TslnsOKsPlZgAXRcRY4F3A/+1F3EOBrmJBRLwjx7Mr\nqceiIyL2AIofiEdGxL7AwcD5uewvwGERsRfwHuCCQv0xwLcjYpech6Pz9l/KxwA4A7g9IvbJ239H\n0mYR8euI+HyOaU9Jl9Y6kUrcwLHAryJiHLA7cH8u3wK4O+frTuBjufzOiNgnIvYErgS+XNjt20jX\n413A15V6SwD2Bj4NvBn4B+AjwFXAwZKG5jonAf9RFVtdLea94lLgXyJib1IOfxgR/wMs0rphawfl\nPKypVb/G8T8fEfNyDJMlHVTjuIuBSoP67cAo4A1NzmtiRDxD/fsYYGg+/89Dt1lLPXr1IuIbwL3A\nsRHxZeAQ4NGIGBcRd1XqSdoIuAj4aD7vacA32zjXHYCnCu+fyWUVXaR/P7a+DdjBwBs+f0dLOZzX\ncjiv5XFuy+G8tq+Vpwg+WflQB0wHPgNcWFi/NzA7Iv4AIGkGMB44G9hZ0hTgJmCWpC2B7SPiOoCI\naPbX+B4kidQAmV6nynuAqyPihXyMVYV11+SyZZJeX9klcK7S3JYuYPvCuhURsTQvPwTclpcfAHbK\ny+8nNVC+lN9vQvoAv7xy0IhYCHy8yaktIPVubAxcGxGLc/lLEXFTXl4IvDcv7yjpKmA7YGO6f3y7\nNuf295LuAN5O6k2bHxFPQJr/A+wXETOV5gQdJOlhYKOIeKhJrLU0yjuStiA1+K7O15AcN6RG3lHA\nHOBo4AdN6tcUEWfWWXUeMEXSfaRrtwhY0+J5PUbVfVxYNzP/XAiMbnF/zR7PtwvwVuDWfN5DgN9V\nV2pwrs08A7xJ0msi4qW6tWYXlnfCw9vMzMzsVa+zs7OlBmcrDazqv8bXmnPT40NjRKyStDvwAeCT\nwBHA52rV7bYj6VOkXpoAPhwRKwvrhpA+8L4E3NhC7NWKHygrcRwHvBbYIyK6lB7asGmN+l2F912s\ny51IvQ2P9CKetSLiztzIOxC4TNIFETEdeLlQbU3huBcB34mIG3PvT/EDd/EaifrzpCrlU0m9Tw/T\nvYemLw0BXsg9dNWuA86RtA0wjjScb8sG9dsSEaspzDvK1/ixFretdR+fkldX7ofidfkb3XuGewyp\nbULAg7mntDeeAXYsvH9DLgMgIh6TtAx4QtIBdRvT+/fy6FafG6ml8fyAcjiv5XBey+PclsN5Xaej\no6NbPiZPnlyzXitDBEdLKg5ju7Nq/XxgvKRt8zCzY4A5koaThlD9EvgqMC4iXgSeknQogKRNJG1W\n3FlEXBIRe+ShUyur1nVFxE6koVZH1Yn3DuAISdvmY2xTp16lgTUMeC43rvane09EK18GdAtw6toN\nCk9ta4fSE+uei4ipwE9IDY1GMWzFup6NE6vWHZpzOxyYQOodgzR8c3RuqB4F3AUQEfNJH8qPAa6o\nE9+yJqfQMO+5kbNC0sTCPnfL6/5EuqZTgBsiqVu/XZKG5Z5BJH0MmJPvRZTm323XYNse93G9qvnn\n48BYJTuSeg/r7r5G2XLgdZL2ycffSC089bDgOuCEvO0+wKqIeLZwPruRPupv38ueSjMzMzNroJUG\n1sPApyUtJU2OrzyVLAByI+h0oJM09GpBRFxPmvfRKWkR8LNcB9KHv1MlLQbmAsUJ+K36DbBtrRV5\nSN85pEbeItbNqarXEzeD1PBYDBwPLKtRp9b2Fd8gPQhkidIj7c+qrtBoDlZBB7A4D2M7Evhek+NO\nBv5T0gJ6PoxiCel63A2cVWio3gtcTBru+NvcaKi4CpgbET0ezJEbGQ01yHvR8cDJSg/seJA0D6ni\nSlJv4i8KZcc1qN9Dg3lJbwYezI3ED5Dnh+UheGOAPzTYbb37uOb9FBFzSY2sh0jXcGF1nTrvK9u/\nDEwEzpd0P+nf1DtbPdc8nHSFpEeBH1M1ZxLYBng8Irqqt7WSeQ5WaTw/oBzOazmc1/I4t+VwXtu3\nQX7RcJ7vNDwiTm9a+VVG0pnA6oi4sKp8AvCFiKjZSJF0PXBhRMyuse5AYOeIuLiMmPuLpLcAJ0XE\nF/s7lvVF0pHARyLimAZ1/EXDZfAXDZfGX4JZDue1HM5reZzbcjiv9anOFw1vqA2sMcBlwItV34X1\nqtduA0vSMNIwz0URcfT6i9TWN6XH778b+LeIuL1BvGGqlgAAIABJREFUPTewNiST3MAyMzPrD4Oq\ngWVm5ZHkXwobkBE7jGDl0yubVzQzM7M+Va+B1cocLDN7lYkIv/r4NXv27FL268aV5weUxXkth/Na\nHue2HM5r+9zAMjMzMzMz6yMeImhm3UgK/14wMzMza8xDBM3MzMzMzErmBpaZ2XrgMezlcW7L4byW\nw3ktj3NbDue1fW5gmZmZmZmZ9RHPwTKzbjwHy8zMzKy5enOwNuqPYMxsYJN6/K4wszpGjBjNypWP\n93cYZmY2QHiIoJnVEH71+Wv2AIhhsL76N7fPPvsEg5HnXZTDeS2Pc1sO57V9DRtYkkZLeqDOutmS\nxpUTVmOSRkm6T9LNhbIV/RFLPZImSJrWQr0BFXdRK7E1qyPpTEmn9V1U3fcpaZqk8S1s831Jj0i6\nX9LYFurPljSqyfq27n9JEyUtlXR7fn9Fjuez+TwOb7J903OVdKykxfl1l6TdCusukPSQpAntxG1m\nZmZmrWtliGCUHkX7DgNmRcTphbKBGGcrMQ3EuCs29PgBkPQhYExEvFHSO4AfAfv0QygnA6dExN2S\nRgJ7RcQbc4xNG+MtegwYHxF/lPRB4FLyuUbEFyTNB/4ZmNNHx7OWdfR3AINYR38HMCh1dHT0dwiD\nkvNaHue2HM5r+1oZIrixpOn5L+9XSdq0uoKkYyQtya/zctmQ/Bf3Jfmv6Z/N5WMk3Zr/cn+vpJ17\nEffWwHNVZc8X4jkhH3ORpMtz2TRJUyTNlfRopbdA0haSbsuxLJZ0SC4fLWlZ3m65pBmS3pe3Xy5p\nr1xvc0lTJc2TtFDSwTmMvwJ/bOFcns/7GSlpTu6ZWyJp31y+WtLZOV93S3pdLj+ocMxZhfIzJf00\n110u6ZRcPiHv/wZJD0u6RMlJkr5byN0pki6ozmmz+OvlvUjS30u6WdKCHMubJG0l6fFCnc0lPSlp\naK36NY6/ipTrRg4FfgoQEfcAwySNaLLN74E19e7j7EhJ9+R8Vq7XiZIuKpzP9ZLGS/oasB8wVdK3\ngFuAHfL13q8qT+MkdebzvrkQa9NzjYh5EVG57+YBO1RVWUn692NmZmZmZYiIui9gNNAF7JPfTwVO\ny8uzgXHAdsATwLakBtvtwCF53azCvrbKP+cBh+TlTYBNG8VQJ67JwOfqrNsVeBjYJr/fOv+cBlyZ\nl98MPJKXhwJb5uXhhfLRpA+zu+b39wJT8/IhwMy8fA5wbF4eBiwHNquKaU/g0ibndBrwb3lZwBZ5\nuQv4cF4+H/hK5ViFbU8Gvp2XzwQW5dwOB54ERgITgP/N5yVgFnA4sAXwKDA0bz8XeEsvrkm9vJ9Z\nuGduI/UkAbwduD0v/xKYkJePrOSqQf21+6xxXxxUo/x64F2F97cB41o8r3r38exCzj8E3JqXTwS+\nX3Xs8YVt9ijcX0sK9abl67FRvgbDC/mY2uq5VtX5YvV9B7wbuKHJdgHhV5+/Zg+AGAbrq79zSwxG\ns2fP7u8QBiXntTzObTmc1/ry73+qX60MEXwyIubl5enAZ4ALC+v3BmZHxB8AJM0AxgNnAztLmgLc\nBMyStCWwfURcR4qoWc9DD5IE7J5jqeU9wNUR8UI+xqrCumty2TJJr6/sEjhXaW5LF7B9Yd2KiFia\nlx8ifTAHeADYKS+/HzhY0pfy+02AUaSGFvl4C4GPNzm1BaTejY2BayNicS5/KSJuyssLgffm5R0l\nXUVq4G4MrCjs69qc299LuoPUOPkjMD8inoA0/wfYLyJmKs0JOkjSw8BGEfFQk1hraZR3JG0BvAu4\nOl9DctwAVwFHkYatHQ38oEn9miLizF7E3cxjVN3HhXUz88+FpAZTK5o9nm8X4K3Arfm8hwC/q67U\n7Fwl7Q+cROo1K3oGeJOk10TES/X3MKmw3IGHYJmZmdmrXWdnZ0sP/ejNHKzq91DjQ2NErJK0O/AB\n4JPAEcDnatXttiPpU8DH8nE+HBErC+uGkD7wvgTc2ELs1YofKCtxHAe8ltSz0KX00IZNa9TvKrzv\nYl3uBHw0Ih7pRTxrRcSduZF3IHCZpAsiYjrwcqHamsJxLwK+ExE3Kj20oPiBu3iNRO1rVqw3FfgK\nqQdq2is5jwaGAC9ERK0HQ1wHnCNpG1KP0R3Alg3qt+sZYMfC+zfksqbq3Men5NWV+6F4Xf5G96G3\nPYbUNiHgwYjYt83t1u0gPdjiUuCDlQZvRUQ8JmkZ8ISkA+o3pif19vBWV0d/BzCIdfR3AIOS512U\nw3ktj3NbDud1nY6Ojm75mDx5cs16rczBGq30YACAY4E7q9bPB8ZL2lbSUOAYYI6k4aRhZ78Evkoa\nkvUi8JSkQwEkbSJps+LOIuKSiNgjIsYVG1d5XVdE7EQarndUnXjvAI6QtG0+xjZ16lUaWMOA53Lj\nan+690S08mVAtwCnrt2ghSfU1QwmPbHuuYiYCvyE1NBoFMNWrOvZOLFq3aE5t8NJQwMX5PK9leaW\nDSHl7y6AiJhPaoAcA1xRJ75lTU6hYd4jYjWwQtLEwj53y+v+RLqmU0jD16JR/V64Djgh72MfYFVE\nPJvf3yZpu3ob1rqP61XNPx8Hxub5bTuSeg/r7r5G2XLgdTlOJG0kadcG+6iOdxTwX8A/RsRva6zf\nDdiZ1JPcm55KMzMzM2uglQbWw8CnJS0lTY7/US4PgNwIOh3oJM39WRAR15Mm13dKWgT8LNeB9EH3\nVEmLSXNNmj1soJbfkOZ89ZCH9J1DauQtAioPbKjXEzeD1PBYDBwPLKtRp9b2Fd8gPQhkidIj7c+q\nriBpT0mXNjgfSH+CXSzpPtK8m+81Oe5k4D8lLaDnwyiWkK7H3cBZhYbqvcDFpOGOv82NhoqrgLmx\n7gEJxfiHN4m9Ud6LjgdOVnpgx4OkuWwVV5J6E39RKDuuQf0eJE2WdFCN2G4iNdYeBX4MfCrXFzAG\n+EOD3da7j2veTxExl9TIeoh0DRdW16nzvrL9y8BE4HxJ95P+Tb2z1XMFvkb6t3GJ0sNG5let3wZ4\nPCK6amxrpers7wAGsc7+DmBQ8nfflMN5LY9zWw7ntX1K87M2LHm+0/Do/ph2Iz1FEFgdERdWlU8A\nvhARNRspkq4HLoyI2TXWHQjsHBEXlxFzf5H0FuCkiPhif8eyvkg6EvhIRBzToE7Ub9db73XioWxl\n6aR/cys2xP9Lm+ns7PTQoBI4r+VxbsvhvNYniYjoMSJpQ21gjQEuA16MiA/1czgDSrsNLEnDSMM8\nF0XE0esvUlvflB6//27S0ypvb1DPDSyztgzOBpaZmTU2qBpYZlYeN7DM2uUGlpnZq1G9BlYrc7DM\n7FVHfvnlV4uvESNa/ZaGDYvnXZTDeS2Pc1sO57V9rTym3cxeZfzX+L7nMezlcW7NzGwg8RBBM+tG\nUvj3gpmZmVljHiJoZmZmZmZWMjewzMzWA49hL49zWw7ntRzOa3mc23I4r+1zA8vMzMzMzKyPeA6W\nmXXjOVhmZmZmzXkOlpmZmZmZWcn8mHYz60Hq8ccYMzMzsw3SiB1GsPLplevteB4iaGbdSAom9XcU\ng9AKYOf+DmKQcm7L4byWw3ktj3NbjsGQ10nlfMdnr4YIShot6YE662ZLGtdXAbZD0ihJ90m6uVC2\noj9iqUfSBEnTWqg3oOIuaiW2ZnUknSnptL6Lqvs+JU2TNL6Fbb4v6RFJ90sa20L92ZJGNVnf1v0v\naaKkpZJuz++vyPF8Np/H4U22f0XnKukCSQ9JmtBO3NZHNvT/nAYy57Yczms5nNfyOLflcF7b1soc\nrIHYxXUYMCsiPlQoG4hxthLTQIy7YkOPHwBJHwLGRMQbgU8AP+qnUE4GTomIAySNBPaKiLERMaWv\nDtDoXCPiC8BZwD/31fHMzMzMrLtWGlgbS5qe//J+laRNqytIOkbSkvw6L5cNyX9xXyJpsaTP5vIx\nkm7Nf12/V1Jv2sVbA89VlT1fiOeEfMxFki7PZdMkTZE0V9Kjld4CSVtIui3HsljSIbl8tKRlebvl\nkmZIel/efrmkvXK9zSVNlTRP0kJJB+cw/gr8sYVzeT7vZ6SkOblnbomkfXP5akln53zdLel1ufyg\nwjFnFcrPlPTTXHe5pFNy+YS8/xskPSzpEiUnSfpuIXenSLqgOqfN4q+X9yJJfy/pZkkLcixvkrSV\npMcLdTaX9KSkobXq1zj+KlKuGzkU+ClARNwDDJM0osk2vwfW1LuPsyMl3ZPzWbleJ0q6qHA+10sa\nL+lrwH7AVEnfAm4BdsjXe7+qPI2T1JnP++ZCrH1xritJ/35sfRuwfdWDgHNbDue1HM5reZzbcjiv\nbWulgbULcHFE7AqsBj5VXClpO+A8oAMYC+ydGyljgR0iYreI2B2oDJebAVwUEWOBdwH/txdxDwW6\nigUR8Y4cz67AV4COiNgDKH4gHhkR+wIHA+fnsr8Ah0XEXsB7gAsK9ccA346IXXIejs7bfykfA+AM\n4PaI2Cdv/x1Jm0XEryPi8zmmPSVdWutEKnEDxwK/iohxwO7A/bl8C+DunK87gY/l8jsjYp+I2BO4\nEvhyYbdvI12PdwFfV+otAdgb+DTwZuAfgI8AVwEHSxqa65wE/EdVbHW1mPeKS4F/iYi9STn8YUT8\nD7BI64atHZTzsKZW/RrH/3xEzMsxTJZ0UI3j7gA8VXj/TC5rdF4TI+IZ6t/HAEPz+X8eus1a6tGr\nFxHfAO4Fjo2ILwOHAI9GxLiIuKtST9JGwEXAR/N5TwO+2Yfn2kX692NmZmZmJWjlKYJPVj7UAdOB\nzwAXFtbvDcyOiD8ASJoBjAfOBnaWNAW4CZglaUtg+4i4DiAimv01vgdJIjVAptep8h7g6oh4IR9j\nVWHdNblsmaTXV3YJnKs0t6UL2L6wbkVELM3LDwG35eUHgJ3y8vtJDZQv5febAKOA5ZWDRsRC4ONN\nTm0BqXdjY+DaiFicy1+KiJvy8kLgvXl5R0lXAdsBG9P97wvX5tz+XtIdwNtJvWnzI+IJSPN/gP0i\nYqbSnKCDJD0MbBQRDzWJtZZGeUfSFqQG39X5GpLjhtTIOwqYAxwN/KBJ/Zoi4sxexN3MY1Tdx4V1\nM/PPhcDoFvfX7PF8uwBvBW7N5z0E+F11pVdwrs8Ab5L0moh4qW6t2YXlnfD4677gHJbHuS2H81oO\n57U8zm05nNe1Ojs76ezsbFqvlQZW9V/ja8256fGhMSJWSdod+ADwSeAI4HO16nbbkfQpUi9NAB+O\niJWFdUNIH3hfAm5sIfZqxQ+UlTiOA14L7BERXUoPbdi0Rv2uwvsu1uVOpN6GR3oRz1oRcWdu5B0I\nXCbpgoiYDrxcqLamcNyLgO9ExI2596f4gbt4jUT9eVKV8qmk3qeH6d5D05eGAC/kHrpq1wHnSNoG\nGAfcAWzZoH67ngF2LLx/Qy5rqs59fEpeXbkfitflb3TvGe4xpLYJAQ/mntLeaHiuEfGYpGXAE5IO\nqNuY3r+XRzczMzMbpDo6Oujo6Fj7fvLkyTXrtTJEcLSk4jC2O6vWzwfGS9o2DzM7BpgjaThpCNUv\nga8C4yLiReApSYcCSNpE0mbFnUXEJRGxRx46tbJqXVdE7EQaanVUnXjvAI6QtG0+xjZ16lUaWMOA\n53Ljan+690S08mVAtwCnrt2ghSfU1QwmPbHuuYiYCvyE1NBoFMNWrOvZOLFq3aE5t8OBCaTeMUjD\nN0fnhupRwF0AETGf9KH8GOCKOvEta3IKDfMeEauBFZImFva5W173J9I1nQLcEEnd+r1wHXBC3sc+\nwKqIeDa/vy0Pc62p1n1cr2r++TgwVsmOpN7DuruvUbYceF2OE0kb5eGXrap7rrlsN9LforbvZU+l\n9ZbHsJfHuS2H81oO57U8zm05nNe2tdLAehj4tKSlpMnxlaeSBUBuBJ0OdAKLgAURcT1p3kenpEXA\nz3IdSB/+TpW0GJgLNHvYQC2/AbattSIP6TuH1MhbxLo5VfV64maQGh6LgeOBZTXq1Nq+4hukB4Es\nUXqk/VnVFRrNwSroABZLug84Evhek+NOBv5T0gJ6PoxiCel63A2cVWio3gtcTBru+NvcaKi4Cpgb\nET0ezJEbGQ01yHvR8cDJSg/seJA0D6niSlJv4i8KZcc1qN9DvXlJeYjlCkmPAj8mzyPMQ/DGAH9o\nsNt693HN+yki5pIaWQ+RruHC6jp13le2fxmYCJwv6X7Sv6l3vtJzLdgGeDwiuqq3NTMzM7NXboP8\nouE832l4RJzetPKrjKQzgdURcWFV+QTgCxFRs5Ei6XrgwoiYXWPdgcDOEXFxGTH3F0lvAU6KiC/2\ndyzri6QjgY9ExDEN6viLhs3MzGzwmDSAvmh4AJsJ7KvCFw1b70gaJmk58KdajSuAiLhxsDWuACLi\noVdZ4+oC4IukIahmZmZmVoINsgfLzMojyb8UzMzMbNAYscMIVj69snnFNtXrwWrlKYJm9irjP7z0\nvc7Ozm5PHrK+49yWw3kth/NaHue2HM5r+9yDZWbdSAr/XjAzMzNrbLDNwTIzMzMzMxtw3MAyM1sP\nWvnmd+sd57Yczms5nNfyOLflcF7b5waWmZmZmZlZH/EcLDPrxnOwzMzMzJrzHCwzMzMzM7OSuYFl\nZj1I6vYa+YaR/R3SBs9j2Mvj3JbDeS2H81oe57Yczmv7/D1YZtbTpO5vn530bL+EYWZmZrah8Rws\n63eSVkfE3/V3HACSxgGXA/Mj4uRctiIidu6HWHYHto+Im/P7E4GdImJyk+1uBvYB7oyIQwrlxwBn\nAj+OiO822D6qG1hM8pcPm5mZmRV5DpYNZAPpk/vxwA8qjausrfgk9dW/q7HAh6vKWonlW6Tz6L5h\nxBXABODzrzw0MzMzM6vFDSwbMCRNlrRI0n2SnpY0VdJoScskTZO0XNIMSe+TNDe/3ytvu7ekuyUt\nlHSXpDf2Moytgeeqyp7Px5ggaY6kGyQ9LOmSQuyrJX1H0iJgH0njJHVKWiDpZkkjcr1TJT0k6X5J\nP89lm+dznZfjP1jSxsBZwJE5H0cA/wu82OwEImJ2vXoR8SwwrO2s2CvmMezlcW7L4byWw3ktj3Nb\nDue1fZ6DZQNGRJwJnClpGPDfwEV51RjgoxGxVNK9wNERsa+kQ4AzgI8Ay4D9IqJL0gHAucDEXoQx\nFOiqiusdhbd7A28GngRukXR4RMwEtgB+HRFflLQRMAc4JCJ+L+lI4JvAycC/kob5vSxpq7zPM4Db\nI+LkfO7zgduArwN7RsSp1UFKOjivm9SLc/QfVszMzMxK4gaWDUTTgQsi4n5Jo4EVEbE0r3uI1PgA\neAAYnZe3Bn6ae66CXtzbuWH0FtY17GqZHxFP5PpXAPsBM4E1+SfALsBbgVslidSg+V1etxj4uaRr\ngGty2fuBgyV9Kb/fBBjVKNaIuB64vvWz6+YPksZExG/r1phdWN6pl0exbjo6Ovo7hEHLuS2H81oO\n57U8zm05nNd1Ojs7W+rRcwPLBhRJk4AnI+KnheKXCstdhfddrLuHvwHcERGH50ZZsYlQ2ffZwIFA\nRMS4qnVvIPUcPRoR9zYIsXoOVOX9nwvfzivgwYjYt8b2BwLjgUOAMyS9Ldf/aEQ8UhXTPg3ieCWm\nAPdL+kxEXFazxv4lHdnMzMxsA9XR0dGtwTl5cu3njnmokA0EgrXD3t4LfLbW+iaGAc/k5ZNqVYiI\nr0bEHtWNq7zuaWCHFIY6Ghzn7Xle2BDgKODOGjEuB15XaSBJ2kjSrnndqIiYA5wObEUaWngLsHYY\noKSxeXF1rtMbon7evgL8Q93GlZXCY9jL49yWw3kth/NaHue2HM5r+9zAsoGg0vPzeWB7YEF+sMOk\nqvXVy0XfAs6TtJBe3te5B+pRYNsG1e4FLiYNVfxtRFSG+a2NKyJeJs3/Ol/S/cAi4J15COJ0SYuB\nhcCUiPgfUu/bxpKWSHqA9HALSL1wuxYecrFWfhDGpFoBSvpv4ErgPZKelPS+qiqb5IddmJmZmVkf\n8/dgmRVI+gHwQET8qMa6CcAXit8ttaGR9HpgcURs16COvwfLzMzMrAl/D5ZZa34KnCRpan8H0tfy\nFw3PIvX2mZmZmVkJ3MAyK4iIeyLiHVVfNFxZN2dD7r2KiCsiYmxEfLdp5UndXyN2GFFmaK8KHsNe\nHue2HM5rOZzX8ji35XBe2+enCJpZDx4OaGZmZtY7noNlZt1ICv9eMDMzM2vMc7DMzMzMzMxK5gaW\nmdl64DHs5XFuy+G8lsN5LY9zWw7ntX1uYJmZmZmZmfURz8Eys248B8vMzMysOc/BMjMzMzMzK5kb\nWGbWg6Rur5FvGNnfIW3wPIa9PM5tOZzXcjiv5XFuy+G8ts/fg2VmPU3q/vbZSc/2SxhmZmZmGxrP\nwTLrJ5JGAzdExNtarP8t4GDgJeD/t3fnQZaV9RnHvw8gIgjExDiYGVmMIooiM+CgAWJjAi4ERIyC\nQRHUbEyEimK5JTBdRaKE0kgBEhcyIehIobggZRREhgQVWYZNFiWyCMYZF1DAKBH45Y97Gk/3dPfM\n7XvP9CzfT1VXn/Oec+5971M9XfPr933P+R5wdFXd38f7PQtYAiwA3lNVH5zivJpYYLHYhw9LkiS1\nuQZLWjf1U7VcBOxaVbsDtwHv7vO9fgq8FTilz+skSZK0hiywpNn1uCSfSHJzkvOSbJFkjyTXJlme\n5IYkjwBU1Ver6tHmuiuAef28UVX9pKquAR4e8mfQGnAOe3fMthvm2g1z7Y7ZdsNc+2eBJc2uZwGn\nV9VzgAeAY6rqmqqaX1ULgC8z+YjTm4D/WIv9lCRJ0hpwDZY0S5o1WJdV1Y7N/n7AW6vq0Gb/MOAt\nwAHtB1MleS+woKpePcP3PRF4YNo1WC9uNewInO0aLEmStHFbtmzZuBG90dHRSddgeRdBaXZNrFoK\nIMlzgROAfScUV0cBrwBeMtmLJTkJOBCoZgRsZvab8ZWSJEkbpJGREUZGRh7bHx0dnfQ8pwhKs2uH\nJHs1238GXJ5kW2ApcGRV3Tt2YpKXAe8ADq6qhyZ7sar6u9b0wums8tcWdcs57N0x226YazfMtTtm\n2w1z7Z8jWNLsuhVYlGQJ8G3gTOC1wPbAx5KE34xGnQZsDlzca+aKqjpmTd8oyRzgamBr4NEkxwHP\nqaoHh/mBJEmSNmauwZI0js/BkiRJWj2fgyVJkiRJHbPAkrSqxeO/5sydM4ud2TA4h707ZtsNc+2G\nuXbHbLthrv1zDZakVTgdUJIkaWZcgyVpnCTl7wVJkqTpuQZLkiRJkjpmgSVJa4Fz2Ltjtt0w126Y\na3fMthvm2j8LLEmSJEkaEtdgSRrHNViSJEmr5xosSZIkSeqYBZakVSQZ6Gu7edvN9kdY5ziHvTtm\n2w1z7Ya5dsdsu2Gu/fM5WJJWtXiwy1cuXjmUbkiSJK1vXIMlrWOS3AHsUVX3Jrm8qvZJ8mLg+Ko6\naIDXPQv4E2BlVe02zXk1aIHFYh9WLEmSNmyuwZLWH49VJlW1z2TtM7QEeOmAryFJkqRpWGBJsyTJ\nXya5NsnyJLcnuWTsUOucB1qXbJvkwiS3Jvlwv+9XVZcD9w3Ybc2Qc9i7Y7bdMNdumGt3zLYb5to/\nCyxpllTVR6pqPrAQuBv4wGSntbZfACwCng08I8mh3fdSkiRJ/XANljTLmtGolVU12uy312DdX1Xb\nNGuwRqtqpDnnaOB5VfW2Pt9rB+CLq12D9eJWw47ATn19JNdgSZKkDc6yZcvGjeiNjo5OugbLuwhK\nsyjJUcDTquqYNTh9YsUybj/JQuAjTfsJVXXhjDu234yvlCRJ2iCNjIwwMjLy2P7o6Oik5zlFUJol\nSfYA3g68frrTWtt7JdkhySbAYcDl7ROr6sqqml9VC6YprjLhNbWWOIe9O2bbDXPthrl2x2y7Ya79\ns8CSZs8i4EnApc2NLj7atLdHptrbVwKnAzcB36uqz/XzZkmWAt8Adk7y/WaaoSRJkobINViSxvE5\nWJIkSavnc7AkSZIkqWMWWJK0FjiHvTtm2w1z7Ya5dsdsu2Gu/fMugpJWtXiwy+fMnTOUbkiSJK1v\nXIMlaZwk5e8FSZKk6bkGS5IkSZI6ZoElSWuBc9i7Y7bdMNdumGt3zLYb5to/CyxJkiRJGhLXYEka\nxzVYkiRJq+caLEmSJEnqmAWWJK0FzmHvjtl2w1y7Ya7dMdtumGv/fA6WpFUkq4x2S5IkbTTmzJ3D\nintWzOha12BJGidJDfqgYUmSpPXaYlhdneQaLGkNJHk0ySmt/bcnOWGW+rJD059FrbbTkhw5G/2R\nJEnS6llgSeM9BBya5LdnuyONHwHHJXE67/rujtnuwAbMbLthrt0w1+6YbTfMtW8WWNJ4DwMfBd42\n8UAzonRJkuuSXJxkXtO+JMmpSb6e5L+THNq65vgkVzbXnDiD/vwYuAQ4apL+7J7km81rn59k26b9\n0iTvT/KtJLcm2btp3yTJPzXt1yX58xn0R5IkSdOwwJLGK+AM4IgkW084dhqwpKp2B5Y2+2O2q6q9\ngYOAkwGS7A88s6oWAvOBPZPsM4P+nAwcn1XvPHE28I6mP98G2gXcplW1F/C38NiKqjcDP2vaFwJ/\nkWSHPvujmdpptjuwATPbbphrN8y1O2bbDXPtm9OOpAmq6sEkZwPHAb9sHXoR8Kpm+xyaQqrx+eba\nW5I8pWk7ANg/yXIgwFbAM4HL++zPnUmuAI4Ya0uyDbBtVY291tnAea3LPtt8vwYYK6IOAJ6X5DXN\n/jZNf+5a5U0vbW3viL9cJUnSRm/ZsmVrdNt6CyxpcqcCy4ElrbbpbiXzUGs7re/vq6qPTXVRkkPo\njTwV8JaqWj7Fqe8DPgMsm+R9puvPI/zm33mAt1bVxdNc17Pfas9Qv+7AQrUrZtsNc+2GuXbHbLth\nro8ZGRlhZGTksf3R0dFJz3OKoDReAKrqPnojQm9uHfsG8Lpm+/XAf033GsBXgDcl2Qogye8l+d32\niVX1+aqaX1ULpiiuxvrzHeBm4OBm/37g3rF0qtlUAAAIw0lEQVT1VcAbgMvWoD/HjN0wI8kzkzxh\nimskSZI0A45gSeO1R6k+ACxqtR0LLElyPL2bTxw9yTWP7VfVxUl2Ab7ZLJ96gF5h9uMZ9ucf6I2q\njTkK+JemSLp9df0BPk5vwt/yZj3Xj4BD+uiLBuFf/7pjtt0w126Ya3fMthvm2jcfNCxpHB80LEmS\nNnqLfdCwJK3bfI5Id8y2G+baDXPtjtl2w1z75giWpHGS+EtBkiRt1ObMncOKe1ZMe44jWJLWWFX5\nNeSvE088cdb7sKF+ma25rk9f5mq269vXxprr6oqr6VhgSZIkSdKQWGBJ0lpw5513znYXNlhm2w1z\n7Ya5dsdsu2Gu/XMNlqRxXIMlSZK0ZmqSNVgWWJIkSZI0JE4RlCRJkqQhscCSJEmSpCGxwJIkSZKk\nIbHAkgRAkpcluTXJd5O8c7b7s65LclaSlUluaLU9KclFSb6T5CtJtm0de3eS25LckuSAVvuCJDc0\nuX9obX+OdVGSeUm+luSmJDcmObZpN98BJHl8km8lubbJ9h+bdnMdgiSbJFme5IJm31yHIMmdSa5v\nfm6vbNrMdkBJtk3y6Sanm5LsZa7DY4EliSSbAKcDLwV2BV6XZJfZ7dU6bwm9vNreBXy1qp4FfA14\nN0CS5wCvBZ4NvBz4cJKxuw6dCby5qnYGdk4y8TU3Rg8Db6uqXYEXAYuan0fzHUBVPQTsV1Xzgd2A\nlyTZG3MdluOAm1v75jocjwIjVTW/qhY2bWY7uFOBL1XVs4HnA7dirkNjgSUJYCFwW1XdVVW/Bs4F\nXjnLfVqnVdXlwH0Tml8JnN1snw0c0mwfDJxbVQ9X1Z3AbcDCJNsBW1fVVc15/966ZqNVVSuq6rpm\n+0HgFmAe5juwqvrfZvPx9P4PcB/mOrAk84BXAB9vNZvrcIRV/79qtgNIsg2wb1UtAWjy+jnmOjQW\nWJIA5gJ3t/bvadrUn6dU1UroFQnAU5r2ifn+oGmbSy/rMeY+QZIdgd2BK4A55juYZhrbtcAKYFlV\n3Yy5DsM/A+8A2s++MdfhKODiJFcleUvTZraD2Qn4SZIlzbTWjybZEnMdGgssSeqODxocQJInAp8B\njmtGsibmab59qqpHmymC84B9k4xgrgNJciCwshl1XeWBoy3mOjN7V9UCeiOEi5Lsiz+zg9oMWACc\n0WT7C3rTA811SCywJEHvr1Hbt/bnNW3qz8okcwCaqRM/atp/ADytdd5YvlO1b/SSbEavuDqnqr7Q\nNJvvkFTV/cCXgD0x10HtDRyc5HbgU/TWtp0DrDDXwVXVD5vvPwY+T29Kuz+zg7kHuLuqrm72z6dX\ncJnrkFhgSQK4CnhGkh2SbA4cDlwwy31aH4Txf7G+ADiq2X4j8IVW++FJNk+yE/AM4MpmCsbPkyxs\nFgwf2bpmY/evwM1VdWqrzXwHkOTJY3cFS/IEYH/gWsx1IFX1nqravqqeTu9359eq6g3AFzHXgSTZ\nshnJJslWwAHAjfgzO5BmGuDdSXZumv4IuAlzHZrNZrsDkmZfVT2S5G+Ai+j94eWsqrpllru1Tkuy\nFBgBfifJ94ETgfcDn07yJuAuenddoqpuTnIevTuM/Ro4pqrGpl4sAv4N2ILeHZ2+vDY/x7qoubPd\nEcCNzXqhAt4DnAycZ74z9lTg7OY/QpvQGx28pMnYXIfv/ZjroOYAn0tS9P7P+smquijJ1ZjtoI4F\nPpnkccDtwNHAppjrUOQ3+UiSJEmSBuEUQUmSJEkaEgssSZIkSRoSCyxJkiRJGhILLEmSJEkaEgss\nSZIkSRoSCyxJkiRJGhILLEmStE5JcmmSBVMcOzfJ05vtO5NcNuH4dUluaLbfmOS0ad7nzCQvmuLY\nwUn+fuafQtLGygJLkiStF5L8PrBVVd3eNBWwdZK5zfFdmra26R74uRdwxRTHvgi8OslmA3RZ0kbI\nAkuSJE0ryZZJLkxybZIbkrymab8jyclN2xWtkaUnJ/lMkm81X3/Qep2zmnOvSXJw075Fkk8luSnJ\nZ4EtpujK4fQKn7bzmnaA1wFLJxzfvhkR+06SE1qfaRfgu1VVSY5t3vu6JEsBqqqAbwAHzDA2SRsp\nCyxJkrQ6LwN+UFXzq2o34MutY/c1bWcApzZtpwIfrKq9gD8FPt60vxe4pKpeCLwEOCXJE4C/Bn5R\nVbsCJwJ7TtGPfYCrW/sFnA+8qtk/iFULsBc0x58PvKY19fDlrc/xTmD3qtod+KvWtVcBfzhFXyRp\nUhZYkiRpdW4E9k/yviT7VNUDrWPnNt8/Bbyw2f5j4PQk1wIXAE9MsiW90aB3Ne3LgM2B7ekVMZ8A\nqKobgeun6McOwA8ntP0UuC/JYcDNwC8nHL+4qn5WVb8CPkuvSAN4Kb8psK4HliY5Anikde3/ADtO\n0RdJmpTziiVJ0rSq6rZm5OcVwElJvlpVJ40dbp/afN8E2Kuqft1+nSQAr66q2yZpH9c0VVemOHYe\nvRG0I6e4Ztx+M2q2bVWtaNoOpFfkHQy8N8lzq+rR5r2mW8MlSatwBEuSJE0ryVOBX1bVUuAUoH2H\nv8Oa74cD32y2vwIc17r++a32Y1vtuzeb/wkc0bQ9F9htiq7cBWzX7lrz/XPAycBFk1yzf5Lfaoqq\nQ4CvA/sBlzbvF2D7qroMeBewDfDE5tqnNu8pSWvMESxJkrQ6z6O3XupR4P8Yv07pSUmuB35F7yYT\n0CuuzmjaN6VXQB0DnAR8qLmNeoA76I0anQksSXITcAvj11m1XU5vfdbyZr8AqupBeoXfZKNhV9Kb\nGjgXOKeqlje3bv90c3xT4BNJtmn6dGpV3d8cWwhcuNp0JKklvZvkSJIk9SfJHcAeVXXvWnq/pwOn\nVdWBA77O1fSmMD4yzTmhV8i9oKoeHuT9JG1cnCIoSZJmaq3+lbZ5/tX9Y7eDH+B19pyuuGocBJxv\ncSWpX45gSZIkSdKQOIIlSZIkSUNigSVJkiRJQ2KBJUmSJElDYoElSZIkSUNigSVJkiRJQ/L/uDxL\nkuJrs2cAAAAASUVORK5CYII=\n", "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "co_t, de_t = compression_decompression_times()\n", "\n", "fig = plt.figure(figsize=(12, len(compression_configs)*.3))\n", "fig.suptitle('Compression speed', fontsize=14, y=1.01)\n", "\n", "\n", "ax = fig.add_subplot(1, 1, 1)\n", "\n", "y = [i for i, (c, o) in enumerate(compression_configs) if c == 'blosc' and o['shuffle'] == 2]\n", "x = (nbytes / 1000000) / np.array([co_t[i] for i in y])\n", "ax.barh(bottom=np.array(y)+.2, width=x.max(axis=1), height=.6, label='bit shuffle', color='b')\n", "\n", "y = [i for i, (c, o) in enumerate(compression_configs) if c != 'blosc' or o['shuffle'] == 0]\n", "x = (nbytes / 1000000) / np.array([co_t[i] for i in y])\n", "ax.barh(bottom=np.array(y)+.2, width=x.max(axis=1), height=.6, label='no shuffle', color='g')\n", "\n", "ax.set_yticks(np.arange(len(labels))+.5)\n", "ax.set_yticklabels(labels, rotation=0)\n", "\n", "xlim = (0, np.max((nbytes / 1000000) / np.array(co_t)) + 100)\n", "ax.set_xlim(*xlim)\n", "ax.set_ylim(0, len(co_t))\n", "ax.set_xlabel('speed (Mb/s)')\n", "ax.grid(axis='x')\n", "ax.legend(loc='upper right')\n", "\n", "fig.tight_layout();" ] }, { "cell_type": "code", "execution_count": 60, "metadata": { "collapsed": false }, "outputs": [ { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1gAAAMWCAYAAADszSe0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzs3XmcVMW9///Xm00CiIAaVlkk6nUJyLgLARQ1eqOEGEQU\nlxjUeM034pqH1yWAC2oi/uJN1MQEUaNZ9F7iBQwqCIPRiKhsCkiUTUFxYXO7YmQ+vz+qGs709DbD\nzPQ0fJ6PRz+m+1Sdqs85dcRTXVWnZWY455xzzjnnnNtxjYodgHPOOeecc87tLLyD5ZxzzjnnnHO1\nxDtYzjnnnHPOOVdLvIPlnHPOOeecc7XEO1jOOeecc845V0u8g+Wcc84555xztcQ7WM4553Y6ks6X\n9HGx4ygWSXtKqpDUv9ixOOfcrsY7WM45VySSJsab4K2SvpT0vqSZki6V1KTY8ZW4PwP7FjuIIvMf\nunTOuSLwDpZzzhXXdKAD0A04EZgMjAX+LulrxQysrkhqWtd1mNkWM/uorutp4FTsAJxzblfkHSzn\nnCuuLWb2oZm9Z2aLzOyXwECgDPhpKpOkppLukPSOpM8kvSTppGRBkg6Q9L+SNkn6RNILkg6OaZJ0\no6S3JX0haZGkwYl9u8XRtDMllUv6XNI8Sd+Mr39I+lTSbEn7JPYbLek1SSMlrY77/VXSnok8EyVN\nkfRTSe8A7xRyTJKaSPovSWtjzKsljUukny5pYaxzvaRZkvaOaT+Q9Ena+fmRpDclbYl/L0xLr5B0\nkaTH4rEulzQiV+NJOkTSDEmb4zmfL2lATBsQy/xO3P5/kl6RVJZWxrHxnH8maY2keyXtnpbnp5Le\nise6MD0uSUfEsv9P0qvAUbnids45V3e8g+Wccw2MmS0GngK+n9j8IPAtYDhwMPAQMFnSNwEkdQSe\nB7YCg4DewH8BjeP+lwNXAdcAhwB/BSZJ6pVW/RjgNuBQYBPwR+BXwLXAEUCLWG5Sd2AEcFqsez9g\nQlqeAcA3gW/HPHmPCRgFfBcYBnwDOBNYFo+3PfAnYCLwb7GcPyTqMxJT5CR9Lx7HXbGuu4F7JX0n\nLc4b47npBfwFeEBSF7L7I/AucDjhnI8BvkjL8wvCeT8MWAFMkdQ8xvVN4GngiXh+vhfLeSAR+63A\nBcB/AAcS2uc3kk6J6S2BqcBbhI75tcCd+BRB55wrDjPzl7/85S9/FeFF6BxMzpJ2G/BpfN+T0HHq\nkpbnr8Cv4/tbgZVA4yzlrQGuT9s2C3g4vu8GVAAXJtK/E7d9N7HtfGBz4vNo4F9A58S2vnG/nonj\nfB9oksizbwHHdDcwPcvx9In775Ml/Xzg48Tn54HfZTj/zyU+VwC3JD43Bj4Dzs7RhpuBc7OkDYhl\nDk9sawlsBH4YPz+UIa5D4357ETq0nwN90/L8f8DU+P5iYAPwtUT6iHh++hf7OveXv/zlr13t5Yuo\nnXOuYRLbRyD6xM9LJCXX1TQDno3vDwWeN7OtVQoK0806Af9IS3oeOCVt22uJ9+/HGF5P29ZKUnMz\nS43UrDWztYk8LxE6CAcCy+O2183sq0SeshzHNDO+fxCYLumfwDPA34BpZmbAwnjsiyU9A8wA/tuy\nr7s6kKqjas8TRt0yHr+ZbZX0IfD1LGVCGBGbIOkHMZ7/MbNliXQD5iTK/EzSa8BBcdNhQE9JwxP7\npNo+1bFuDjxV+TTRhNChhjCCt8jM/i+R/iK+Bss554rCO1jOOdcwHUSYTgZhOncFYRraV2n5/o8d\nkz6N7F8Z0jJtq+4U88/SPuc9JjObL6kb26cVPgQsAE40swrgJElHAScBI4HbJPU3s9coXK7jT6Vn\nPVYzGyvpEUJH9WRgtKQfmdmDBdbfCPg9oaOW3iFaS5guCHAqce1ajlidc841AL4GyznnGhhJhxBu\n1h+Pm+YTbr47mtmKtNd7iTz9lOHx7mb2CWGdUN+0pH7AkloIubOkzonPR8V4c5VdyDFhZp+Z2SQz\n+zFhyuIgSd9IpL9kZjeb2RGEYzwzS31LqXr838oTY0HMbLmZ/drMTiWMkiUfniHg6G0fwnqpQxL1\nzgMONrOVGc7DlphvC9A9Q3qqw7UU+KYqP3XyGHwNlnPOFYWPYDnnXHHtFh/Y0AjYGzgB+E/gZWA8\ngJm9KemPwIOSribclLcjPG1wuZk9AdwL/Ah4PD4UYSPhoRRLzGwR4UELYyW9BbwKnEvoYPXJE18h\n08y+AB6SdBVhzdB9hPVBK7LtUMgxSboCeI8wavUVYV3RZmBNHLk6gfCAiPcJUw67AIuzVPkL4DFJ\n8wjTDU8BziI8VKJG4oMq7iR0hFcRHrffjzA9L+kGSR/FY/kZocP0p5h2B/CipPuA3wKfEKYznmpm\nl5jZp5LuBO6U1Ah4DmhF6LRtNbPfEx60cSswUdJNQGfgupoel3POuR3jHSznnCuuEwgjL1sJT+17\nnXAT/ru0NUs/AK4n3JB3ITzUYC5xvZKZvSupP6EjMZMwevEa4QEIEJ781yru357wNL7TzSy5virT\niEchoyArCT/sOwXYk9DpuaiA/XIeE6GzcQ3hCYJGGPU62cy+kLSZMCL1/4A2hOlzN5nZn8jAzP5X\n0k+AqwkPiFgN/IeZ/S3PseY6/q1AW8LDMjoC6wnn4Jq0/a8ldJb3J3QAv5NaL2Vmr8V2uwUoJzxY\nYwXhYR+p2G+UtI7wFMh7gY8Jnc6fx/TP4tMQ7yN0nt8gPOJ/co7YnXPO1RGFtcLOOedc9UkaDXzf\nzNIf977Li7+HNRPY28w2FDse55xz9cPXYDnnnHN1x5/k55xzuxjvYDnnnHN1x6eJOOfcLsanCDrn\nnHPOOedcLfERLOecc84555yrJd7Bcs4555xzzrla4h0s55xzzjnnnKsl/jtYzrlKJPnCTOecc865\nAphZlafFegfLOVeFP/ym9PzgBz/gwQcfLHYYrpq83UqTt1tp8nYrTQ253aTMv8ThUwSdc24n0L17\n92KH4GrA2600ebuVJm+30lSK7eYdLOecc84555yrJT5F0DlXRbYh711R+/bdWLduVbHDyKtNmzbF\nDsHVgLdbafJ2K03ebqWpFNvNO1jOuQx8DVbK+++XRmfz0EMPLXYIrga83UqTt1tp8nYrTaXYbvLF\n7M65pPAUQf93YTv5Qz+cc87Vqe7du7N69epih+Gy6NatG6tWraqyXVLGpwjW2xosSd0kvZYlbZak\nsvqKJa3urpLmSZqW2LayGLFkI2mApIkF5MsZt6RP4t+Okh6L78+X9KualFdgnaMlXZmvnOpIlilp\noqT+efIPkLQptvM8STcUUMcsSV3zpFfrmpU0VNISSc/Gz3+StEDSqHgcp+fZv5BjPVvSwvh6XlKv\nRNp4SYslDahO3M4555yrW6tXr8bM/NVAX9Xt/Nb3Qy4a4tfAQ4BnzOyUxLaGGGchMeXLYwBm9p6Z\nDStgv9qos6F4zszK4uuWIsUwErjQzAZJ6gAcbmaHmtndtVjHCqC/mfUGbgHuTyWY2VXATcAPa7E+\n10CUl5cXOwRXA95upcnbrTR5u7n6Ut8drKaSHonf4j8mqXl6BklnSVoUX7fHbY3it/eL4jfzo+L2\nnpKmx1GAVyT1qEFMbYAP0rZ9mIjnvFjnfEkPxW0TJd0t6QVJb6VGHiS1lDQjxrJQ0uC4vZukpXG/\nZZIelXRi3H+ZpMNjvhaSJkiaI+lVSafFML4ENhdwLB/GcsbGeOdJWiNpQupwEvEkRxO7xhGZZZJ+\nluk85Ksz27lKkrSvpGmSXpY0W9L+klpLWpXI00LS25IaZ8qfof5NhPOTT3UX0qwHtma79qJhkl6S\n9IakvjH+SiOCkqZI6i/pRqAfMEHSz4Gngc6xjfpVClQqk1Qej3uapPaFHquZzTGz1LUyB+iclmUd\n4Zp3zjnnnHN1oL4fcnEAcIGZzYk3/ZcCd6USJXUEbgf6EG4mp8dOyhqgs5n1ivlax10eBcaZ2WRJ\nzahZh7ExUJHcYGZHxXoOAq4DjjGzjZKSN6YdzKyvpAOBycAk4AtgiJl9KmlPwg3u5Ji/J/B9M1si\n6RVgeNx/cKzjdOB64FkzGylpD2CupBlm9iLwYozpMOBHZnZx+oGk4jaz0cDoWMZzQOqGPznalHx/\nBHBwjP9lSVPNbF6qvFwKPFcp98fYl0s6ErgvjubMlzTAzGYDpwJPmdlWSVXyA4PS6r8i9V7SWOBl\nM5uaoe5jJC0A1gLXmNmSPMc1NJZZRuZrD6CxmR0l6RRgDHBiavcM5d0s6XjgSjObL+keYIqZlcVy\nR8a/TQjtNdjM1ksaBowDRlbjWFMuBKalbasgXPN5jEm8HxhfriEbOHBgsUNwNeDtVpq83UqTt5vb\nUeXl5QWNhNZ3B+ttM5sT3z8C/IREB4twoz/LzDYASHoU6E+Y6tRD0t3A34BnJLUCOpnZZAAzK2QU\noxJJAnrHWDI5HnjczDbGOjYl0p6I25ZK+nqqSOA2hXUyFUCnRNrKxE39YmBGfP8a0D2+Pwk4TdI1\n8XMzoCuwLFWpmb0KVOlcZfEIcJeZLciTb3rq2CRNIoy0zCuwjpRc5wpJLYFjgcfjeQdoGv8+BpwJ\nzAaGA/fkyZ9R7Fhm8irQ1cw+j52hJ4BMo2GZrCDt2kukTUqU363A8vKNpB0AHEL4ckGELw3eTc+U\n41hDJdJxwAWEtkxaC+wvaTcz25K9hDF5wnTOOeec27UMHDiwUkd97NixGfMVew1WpvU7VW5A4816\nb6AcuAT4Xba8lQqSLk1MleuQltYIWAkcCDxZUPSVJW9OU3GMAPYC+phZH8LUw+YZ8lckPlewvaMr\nwihXn/jqYWbLqAFJYwgd2ipT9TIopF12VCNgY1wDlTq+Q2LaZOBkSW2BMmBmnvzVYmafmtnn8f00\nwlTVdgXum+3ag+1tuJXtbfgVlf+7qjINNg8BryeOu3fa+sD8BYQHW9xPGAXbmEwzsxXAUmC1pIOr\nGZtrwHxtQWnyditN3m6lqdTarUOH7kiqs1eHDt0LiqNHjx7MnDkzY9rzzz/PgQceWCvHm6uefL74\n4gtOO+002rRpw5lnngnADTfcwN57702nTp1YvXo1jRo1oqKiIk9JtaO+O1jdJKWmnZ0N/D0tfS7Q\nX1I7SY2Bs4DZcbpdYzP7K3ADUGZmnwLvSPougKRmkr6WLMzM7o03qWVmti4trcLMugOvEEZPMpkJ\nnJG6GY8dgExSHaw9gA/MrCKOIHTLkCeXp4HLtu0g1ejB/wprt04ARqUnZdnlRElt4vkbAryQocyl\nearNea7M7BNgpaShiTJ7xbTPCO1wNzDVgqz5qyuxhok41VCJUdIZcWpqtn2rXHvZssa/q4BDFewD\nHJkrtAzblgF7Szo61t8kTr8siMKTD/8HONfMlmdI7wX0IIz+Li60XOecc87Vr/ffX034zrtuXqH8\nHdOvXz+WLt1+i7gjnaQd8d///d98+OGHbNy4kb/85S+888473HXXXbzxxhu8+26YCLR9QlTdq+8O\n1hvAjyUtISy0/03cnnq63TrgWsJowXzCGpMphIX65ZLmA3+IeQDOAy6TtJDQKdh2I10N/wQyjmbE\nKX23Ejp584HxyXiTWePfR4EjYjznEEYK0vNk2j/lZsLoyiKFh1DclJ5B0mFxbVIuVwCdCOup5sXR\nrFz1ziVMd1tAmOZXaXpg7GTklONcJZ0DjFR4KMnrwOBE2l8II4B/TmwbkSN/FQoP9zg1Q9JQSa/H\nuH5JmIaYmiLaE9iQo9hs117Ga8DMXiB0shbHul5Nz5Plc2r/fwFDgTsU1ozNB46pxrHeSLie742j\nt3PT0tsCq8ysfr7CcfXG1xaUJm+30uTtVpq83XZeq1evZv/999/WiVq9ejV77bUXe+6Z9xa2buR6\n5vuu8AKuAW4vdhwN+QV8B/h/xY6jDo7rYODOYsdRz8c8DPhTnjwG5q9tL8w555yrS5n+X1P3/z8u\n7P9v3bt3t9tuu80OOugga9eunf3whz+0LVu2mJlZeXm5denSxczMzj33XGvUqJG1aNHCdt99d/vF\nL35RpayPPvrITj31VGvTpo21a9fO+vfvX6meO++803r16mVt2rSx4cOHb6vnwQcftH79+lUqS5It\nX77cRo8ebc2aNbOmTZva7rvvbr/97W/ta1/7mjVu3Nh23313u+CCC2zVqlXWqFEj27p1q5mZbd68\n2UaOHGkdO3a0Ll262A033GAVFRXVap/E9ir3UvU9gtUQTQL6KvFDw64yM3vSzH5d7Dhqm5ktNrOr\nix1HfZE0Hrga+H0Buf0VX+3bd8t/uhqAUltb4AJvt9Lk7VaavN1q7o9//CPTp09n+fLlLFu2jFtu\n2f6ToqlRo4cffpiuXbsydepUPv74Y66+uuot1vjx49lnn31Yv349H3zwAePGjauU/vjjj/PMM8+w\ncuVKFi5cyIMPPlilnvTPY8aM4brrrmP48OF8/PHHXHzxxUybNo1OnTrx8ccf88ADD1SJ4/zzz6dZ\ns2asWLGC+fPnM336dH7/+wJujwpU308RbHAsrFP5VrHjcK6uWfih4ULz1mUozjnnnCshP/nJT+jU\nqRMA119/PZdddhk33VRlJQuQ+x6iadOmvPfee6xcuZKePXvSt2/fSumjRo2iffuw4ue0005jwYLs\nD8Ku6b3K+++/z7Rp09i8eTO77bYbzZs35/LLL+f+++/noosuqlGZ6XwEyznndgK+tqA0ebuVJm+3\n0uTtVnNdunTZ9r5bt27bHhxRXT/96U/p2bMnJ510Et/4xje44447KqWnOlcALVq04NNPP61ZwDm8\n/fbb/Otf/6Jjx460a9eOtm3bcskll/DRRx/VWh27/AiWc84555xzLrt33nln2/vVq1dvG81Kl+9J\nfS1btuTOO+/kzjvvZMmSJRx33HEceeSRHHfccXn3+/zzz7d9XrduXY2fCrjPPvvQvHlz1q9fX2dP\nFvQRLOec2wn42oLS5O1WmrzdSpO3W83dc889rF27lg0bNjBu3DiGDx+eMV+HDh1YsWJF1nKefPJJ\nli8PvyKz++6706RJExo3bpy3/t69e7N48WIWLVrEli1bsv7Aby6pKYUdOnTgpJNO4oorruCTTz7B\nzFixYgXPPfdctcvMxjtYzjnnnHPONTDhIUvFf4iTJM4+++xt0/r2228/rr/++ox5r732Wm6++Wba\ntWvHXXfdVSX9zTff5IQTTmD33Xenb9++/PjHP6Z///7b6slmv/3242c/+xmDBg1i//3351vfqv7j\nE5LlP/zww3z55ZccdNBBtGvXjjPOOIN169bl2LuadflidudckiTzfxecc865+iPJHzDVgGVrn7i9\nSs/QR7Ccc84555xzrpZ4B8s5V4Ukf+0Crw5dOhT7Utvl+ZqQ0uTtVpq83Vx98acIOueqGlPsAFy1\nrQR6VG+X98e8XyehOOecc7syX4PlnKtEknkHaxcxxn9U2jnnGgJfg9WwNdg1WJK6SXotS9osSWX1\nFUta3V0lzZM0LbFtZTFiyUbSAEkTC8iXM25Jn8S/HSU9Ft+fL+lXNSmvwDpHS7oyXznVkSxT0kRJ\n/fPkHyBpU2zneZJuKKCOWZK65kmv1jUraaikJZKejZ//JGmBpFHxOE7Ps3/eY435/kvSm7HsQxPb\nx0taLGlAdeJ2zjnnnHOFq+81WA2xaz4EeMbMTklsa4hxFhJTvjwGYGbvmdmwAvarjTobiufMrCy+\nbilSDCOBC81skKQOwOFmdqiZ3V1bFUg6BehpZvsBPwJ+k0ozs6uAm4Af1lZ9rgFpUF8LuUL5mpDS\n5O1WmrzdXH2p7w5WU0mPxG/xH5PUPD2DpLMkLYqv2+O2RvHb+0WSFkoaFbf3lDQ9flP/iqRqrkAA\noA3wQdq2DxPxnBfrnC/pobhtoqS7Jb0g6a3UyIOklpJmxFgWShoct3eTtDTut0zSo5JOjPsvk3R4\nzNdC0gRJcyS9Kum0GMaXwOYCjuXDWM7YGO88SWskTUgdTiKe5Ghi1zgis0zSzzKdh3x1ZjtXSZL2\nlTRN0suSZkvaX1JrSasSeVpIeltS40z5M9S/iXB+8qnuT3WvB7Zmu/aiYZJekvSGpL4x/kojgpKm\nSOov6UagHzBB0s+Bp4HOsY36VQpUKpNUHo97mqT21TjW7wIPA5jZS8Aeif0B1hGueeecc845Vwfq\n+yEXBwAXmNmceNN/KbDtV8gkdQRuB/oQbianx07KGqCzmfWK+VrHXR4FxpnZZEnNqFmHsTFQkdxg\nZkfFeg4CrgOOMbONkpI3ph3MrK+kA4HJwCTgC2CImX0qaU9gTkwD6Al838yWSHoFGB73HxzrOB24\nHnjWzEZK2gOYK2mGmb0IvBhjOgz4kZldnH4gqbjNbDQwOpbxHJC64U+ONiXfHwEcHON/WdJUM5uX\nKi+XAs9Vyv0x9uWSjgTui6M58yUNMLPZwKnAU2a2VVKV/MCgtPqvSL2XNBZ42cymZqj7GEkLgLXA\nNWa2JM9xDY1llpH52gNobGZHxVGjMcCJqd0zlHezpOOBK81svqR7gClmVhbLHRn/NiG012AzWy9p\nGDAOGFngsXYG3kl8Xhu3pZ5mUEG45nOblXjfnWo/PMEVgbdRSRo4cGCxQ3A14O1Wmrzd3I4qLy8v\naCS0vkew3jazOfH9I4Rv9JOOAGaZ2QYzqyB0oPoDK4AecdTo28AnkloBncxsMoCZfWlmX1QnGEkC\nehM6cJkcDzxuZhtjHZsSaU/EbUuBr6eKBG6TtBCYAXSSlEpbmbipXxzTAV4j3MICnARcK2k+UA40\nAyqtAzKzVzN1rrJ4BLjLzBbkyTfdzDbF8zeJqu1SiFznCkktgWOBx+Px/RZIjaw8BpwZ3w8H/pIn\nf0ZmNjpL5+pVoKuZHQr8mth2Bapy7SXSJiXKL+zn0POPpB0AHEL4cmE+odPdKT1TjmPNZy2wv6Td\ncuY6LvHyG3fnnHOu3nXo0mGX+amO4447jgceeKDG+19wwQW0a9eOo48+GoD77ruPDh060Lp1azZs\n2ECjRo1YsWLFDsc5cOBAxowZs+2VTX2PYKV/s59p/U6VG1Az2ySpN/Bt4BLgDODyTHkrFSRdClwU\n6/l3M1uXSGtEuHneAjxZjWNI2ZIh5hHAXkAfM6tQeABE8wz5KxKfK9jeDiKMcr1Zg3gqkTSG0KGt\nMlUvg0LaZUc1AjamRmzSTAZuldQWKANmAq1y5K8WM/s08X6apHsltTOzDQXsm+nauzAmp9pwK9vb\n8Csqf3FRZRpsHgJeN7O+1dwvZS2wT+Jzl7gNADNbIWkpsFrSIDNbXMN6XENTg8e0u+IrLy/3b9VL\nkLdbaSq1dnt/7ft1+rMpO8tPdTz//PM8++yzvPvuuzRv3pyvvvqKq666irlz53LIIYcA4Wl/9am+\nR7C6SUpNOzsb+Hta+lygv6R2khoDZwGz43S7xmb2V+AGoCzeNL8j6bsAkppJ+lqyMDO718z6xAcb\nrEtLqzCz7sArbB89STcTOENSu1hH2yz5Uq22B/BB7FwdR+VRjUJa9mngsm07JJ4AVx0Ka7dOAEal\nJ2XZ5URJbeL5GwK8kKHMpXmqzXmuzOwTYKWkoYkye8W0zwjtcDcw1YKs+asruQYpTjVUqnOlsGau\nY459q1x72bLGv6uAQxXsAxyZK7QM25YBe0s6OtbfJE6/LNRk4Ly479HAJjPb9i9oPIc9CKO/3rly\nzjnnXElbtWoV3bt3p3nz8J32unXr2LJlCwceeOC2PPX9CPz67mC9AfxY0hLCQvvUE85ST7dbB1xL\nmB43n7DGZAphDUl5nDL1h5gHwo3kZXFK3gvkmUKWxT+BdpkS4pS+WwmdvPnA+GS8yazx76PAETGe\nc4ClGfJk2j/lZsKDQBYpPITipvQMkg6La5NyuYIwrexlhYcojMlT71zCdLcFhGl+89Lq3DNPfbnO\nVdI5wEiFh5K8DgxOpP2FMAL458S2ETnyV6HwcI9TMyQNlfR6jOuXhGmIqSmiPYFcI1nZrr2M14CZ\nvUDoZC2Odb2anifL59T+/wKGAncorBmbDxxT6LGa2d8IHdO3CNMqL03L0hZYFafgup2Jj16VpFL6\nNt1t5+1WmrzdaqZHjx6MHz+e3r1707ZtW8466yy+/HL7M7d+97vfsd9++7HXXnsxZMgQ3nvvvYzl\nbNmyhXPPPZe99tqLtm3bctRRR/Hhh9ufp7Zq1Sr69etH69atOfnkk9mwIdyezZ49m3322adSWT16\n9GDmzJk88MADXHTRRbz44ou0bt2aESNG8G//9m8AtG3blhNOOKFKHF9++SVXX3013bp1o2PHjlx6\n6aVs2bKlSr4dscv/0LCka4A9zezavJl3UZK+A/Qws18XO5baJOlgwkNXri52LPVF4aEZ3zOzs3Lk\n8R8a3lWM8R8ads65hkAZfshWUp1OESz0/wE9evSgffv2/O///i+77bYbxx57LJdffjkXX3wxM2fO\n5Mwzz2TGjBkcdNBBXHXVVSxcuJDZs2dXKef+++/nySef5LHHHqNZs2YsWLCA/fbbj1atWnHcccex\nZs0annrqKbp06cLJJ5/MMcccw7hx45g9ezbnnnsub7/9dqWYJkyYwPHHH89DDz3EhAkTeO655wBY\nvXo1++67L1999dW2qYGNGjXirbfeYt999+WKK65g5cqVPPTQQzRp0oSzzz6bQw45hFtvvTXrOcjU\nPontVWYk1fcarIZoEvCgpGlpv4XlIjOryRq1Bi9OkduVOlfjgW8B/5k385i6jsY1BO0712TQ39Wm\nUlsT4gJvt9Lk7VZzo0aNon378P+M0047jQULwvPT/vjHPzJy5Eh69+4NwG233Ubbtm15++236dq1\n0nPaaNq0KevXr+ef//wn3/zmN+nTp0+l9AsuuICePXsCMGzYMKZMmbJDMZtZxrVXv/vd73jttdfY\nY489ALj22msZMWJEzg5Wde3yHSwzW0646XRupxZ/aLjQvHUZiqsDfuPgnHOurqQ6VwAtWrTYNg3w\n3Xff5bDDDtuW1rJlS/bcc0/Wrl1bpYN13nnnsWbNGoYPH87mzZsZMWIE48aNo3Hj8OsxHTp0qFTH\np59+Sm1wALmwAAAgAElEQVT78MMP+fzzzyvFXFFRUev3PfW9Bss551wd8M5VafJ2K03ebqXJ2632\nderUidWrV2/7/Nlnn7F+/Xo6d+5cJW/jxo258cYbWbx4Mf/4xz+YOnUqDz/8cN46WrZsyeeff77t\n89atWyut3aqOvfbaixYtWrB48WI2bNjAhg0b2LRpE5s3b65Redl4B8s555xzzjlXbWeddRYTJ05k\n0aJFbNmyheuuu46jjz66yugVhJkWr7/+OhUVFbRq1YqmTZtuG73KZf/99+eLL75g2rRpfPXVV9xy\nyy2VHrKRSbYRKUlcdNFFXH755ds6aWvXruWZZ54p4GgLt8tPEXTOuZ2BTxEsTd5upcnbrTSVWru1\n79y+Tn+rqtB1uLl+Q2rQoEHcfPPNnH766WzatIljjz2WP//5zxnzrlu3jksuuYS1a9fSqlUrhg8f\nzjnnnJO3jtatW3PvvfcycuRIKioq+OlPf0qXLl2qFXPy8x133MHYsWM5+uijt422/cd//AcnnXRS\nzjKrY5d/iqBzrjJJ5v8ulJ5Su3FwgbdbafJ2K00Nud2yPaXONQzVfYqgd7Ccc5V4B8s555yrX97B\natiq28HyNVjOOeecc845V0u8g+Wcq0KSv3bBV4cO3Yt96e1yysvLix2CqwFvt9Lk7ebqiz/kwjmX\ngU9TKD3lwMAdKuH997MvMnbOOedcYXwNlnOuEknmHaxdla8BcM65YlCWNT6uYcjWPnF78dZgSeom\n6bUsabMkldVXLGl1d5U0T9K0xLaVxYglG0kDJE0sIF/OuCV9Ev92lPRYfH++pF/VpLwC6xwt6cp8\n5VRHskxJEyX1z5N/sKSFkuZLekXS8QXUMUtS1R9xqJxerWtW0lBJSyQ9Gz//SdICSaPicZyeZ/9C\njvXseKwLJT0vqVcibbykxZIGVCdu55xzztWtbt26FX2auL+yv7p161at9qzvNVgNsWs+BHjGzE5J\nbGuIcRYSU748BmBm75nZsAL2q406G4IZZtbbzPoAFwD3FymOkcCFZjZIUgfgcDM71MzursU6VgD9\nzaw3cAuJYzWzq4CbgB/WYn2uwSgvdgCuBnxNSGnyditNDbndVq1ahZn5K8Nr1qxZRY9h1apV1WrP\n+u5gNZX0SPwW/zFJzdMzSDpL0qL4uj1uaxS/vV8Uv5kfFbf3lDRdYRTgFUk9ahBTG+CDtG0fJuI5\nT9tHPx6K2yZKulvSC5LeUhx5kNRS0owYy0JJg+P2bpKWxv2WSXpU0olx/2WSDo/5WkiaIGmOpFcl\nnRbD+BLYXMCxfBjLGRvjnSdpjaQJqcNJxJMcTeyqMCKzTNLPMp2HfHVmO1dJkvaVNE3Sy5JmS9pf\nUmtJqxJ5Wkh6W1LjTPkz1L+JcH6yMrPPEx9bAR8VcFzrga3Zrr1omKSXJL0hqW+Mv9KIoKQpkvpL\nuhHoB0yQ9HPgaaBzbKN+aeepTFJ5PO5pklK/BFjIsc4xs9S1MgfonJZlHeGad84555xzdaC+H3Jx\nAHCBmc2JN/2XAnelEiV1BG4H+hBuJqfHTsoaoLOZ9Yr5WsddHgXGmdlkSc2oWYexMVCR3GBmR8V6\nDgKuA44xs42SkjemHcysr6QDgcnAJOALYIiZfSppT8IN7uSYvyfwfTNbIukVYHjcf3Cs43TgeuBZ\nMxspaQ9grqQZZvYi8GKM6TDgR2Z2cfqBpOI2s9HA6FjGc0Dqhj852pR8fwRwcIz/ZUlTzWxeqrxc\nCjxXKffH2JdLOhK4z8JoznxJA8xsNnAq8JSZbZVUJT8wKK3+K1LvJY0FXjazqekVSxoC3AZ0AL5d\nwHENjfuVkfnaA2hsZkdJOgUYA5yY2j1DeTcrTE280szmS7oHmGJmZbHckfFvE0J7DTaz9ZKGAeOA\nkYUea8KFwLS0bRWEaz6PMYn3A9nRhye4+jCw2AG4GmioP3rqcvN2K03ebqWpIbVbeXl5QSOh9d3B\netvM5sT3jwA/IdHBItzozzKzDQCSHgX6E6Y69ZB0N/A34BlJrYBOZjYZwMxyfrOfiSQBvWMsmRwP\nPG5mG2MdmxJpT8RtSyV9PVUkcJvCOpkKoFMibaWZLYnvFwMz4vvXgO7x/UnAaZKuiZ+bAV2BZalK\nzexVoErnKotHgLvMbEGefNNTxyZpEmGkZV6BdaTkOldIagkcCzwezztA0/j3MeBMYDYwHLgnT/6M\nYscyW9oTwBNxtOgPhM5+IVaQdu0l0ibFv68ChU7OzfeYtgOAQwhfLojwpcG76ZlyHSuApOMI0yH7\npSWtBfaXtJuZbclewpg8YTrnnHPO7VoGDhxYqcM3duzYjPmKvQYr0/qdKjeg8Wa9N2GRwSXA77Ll\nrVSQdGliqlyHtLRGwErgQODJgqKvLHlzmopjBLAX0MfCep8PgOYZ8lckPlewvaMrwihXn/jqYWbL\nqAFJYwgd2ipT9TIopF12VCNgo5mVJY7vkJg2GThZUlugDJiZJ3+NmdnzQJM4wlhI/mzXHmxvw61s\nb8OvqPzfVZVpsHkIeD1x3L2t8vrA/AWEB1vcTxgF25hMM7MVwFJgtaSDqxmba9DKix2Aq4GGvCbE\nZeftVpq83UpTKbZbfXewuklKTTs7G/h7WvpcoL+kdpIaA2cBs+PNcGMz+ytwA1BmZp8C70j6LoCk\nZpK+lizMzO6NN6llZrYuLa3CzLoDrxBGTzKZCZwhqV2so22WfKkO1h7AB2ZWEUcQumXIk8vTwGXb\ndpAOLWCfqsGEtVsnAKPSk7LscqKkNvH8DQFeyFDm0jzV5jxXZvYJsFLS0ESZvWLaZ4R2uBuYakHW\n/NUlqWfifVmsc338PCNOTc22b5VrL1vW+HcVcKiCfYAjc4WWYdsyYG9JR8f6m8TplwVRePLh/wDn\nmtnyDOm9gB6E0d/FhZbrnHPOOecKU98drDeAH0taQlho/5u4PfV0u3XAtYSvYucT1phMISzUL5c0\nnzC969q433nAZZIWEjoFqYcBVMc/gXaZEuKUvlsJnbz5wPhkvMms8e+jwBExnnMIIwXpeTLtn3Iz\n4UEgixQeQnFTegZJh8W1SblcAXQirKeaF0ezctU7lzDdbQFhml+l6YGFjPbkOFdJ5wAjFR5K8jow\nOJH2F8II4J8T20bkyF+FwsM9Ts2Q9H1Jr0uaR+jEDY/5RVgbtyFHsdmuvYzXgJm9QOhkLQZ+SZg+\nSK590vb/FzAUuEPSAsJ/B8dU41hvJFzP98bR27lp6W2BVWZWUXVXV9oGFjsAVwMNaW2BK5y3W2ny\nditNpdhuu/wPDcf1Tnua2bV5M++iJH0H6GFmvy52LLUpTpG7wMyuLnYs9SU+NON7ZnZWjjz+Q8O7\nLP+hS+ecc65QKvYPDTdgk4C+SvzQsKvMzJ7c2TpXAGa2eBfrXI0HrgZ+X+xYXF0oL3YArgZKcW2B\n83YrVd5upakU262+nyLY4MR1Kt8qdhzO1TULPzRcoEKWDLqdTfv21fuleuecc85VtctPEXTOVSbJ\n/N8F55xzzrncfIqgc84555xzztUx72A559xOoBTnqDtvt1Ll7VaavN1KUym2m3ewnHPOOeecc66W\n+Bos51wlvgbLOeeccy4/X4PlnHPOOeecc3XMO1jOObcTKMU56s7brVR5u5Umb7fSVIrttsv/DpZz\nrirJfwfLuZT2nduzbs26YofhnHOuRPgaLOdcJZKMMcWOwrkGZAz4/yudc86l8zVYzjnnnHPOOVfH\n6q2DJambpNeypM2SVFZfsaTV3VXSPEnTEttWFiOWbCQNkDSxgHw545b0SfzbUdJj8f35kn5Vk/IK\nrHO0pCvzlVMdyTIlTZTUP0/+wZIWSpov6RVJxxdQxyxJXfOkV+ualTRU0hJJz8bPf5K0QNKoeByn\n59k/77HGfP8l6c1Y9qGJ7eMlLZY0oDpxuxLRoP7VcoUqxbUFztutVHm7laZSbLf6XoPVEOdYDAGe\nMbNrE9saYpyFxJQvjwGY2XvAsAL2q406G4IZZjYZQNI3gb8C3yhCHCOBC83sH5I6AIeb2X4xrrwd\n6EJIOgXoaWb7SToK+A1wNICZXSVpLvBDYHZt1Oecc8455yqr7ymCTSU9Er/Ff0xS8/QMks6StCi+\nbo/bGsVv7xfFkYhRcXtPSdPjN/WvSOpRg5jaAB+kbfswEc95idGPh+K2iZLulvSCpLdSIw+SWkqa\nEWNZKGlw3N5N0tK43zJJj0o6Me6/TNLhMV8LSRMkzZH0qqTTYhhfApsLOJYPYzljY7zzJK2RNCF1\nOIl4kqOJXeOIzDJJP8t0HvLVme1cJUnaV9I0SS9Lmi1pf0mtJa1K5Gkh6W1JjTPlz1D/JsL5ycrM\nPk98bAV8VMBxrQe2Zrv2omGSXpL0hqS+Mf5KI4KSpkjqL+lGoB8wQdLPgaeBzrGN+qWdpzJJ5fG4\np0lqX+ixAt8FHo7H/RKwR2J/gHWEa97tbGryr58ruoEDBxY7BFcD3m6lydutNJViu9X3CNYBwAVm\nNife9F8K3JVKlNQRuB3oQ7iZnB47KWuAzmbWK+ZrHXd5FBhnZpMlNaNmHcbGQEVyg5kdFes5CLgO\nOMbMNkpK3ph2MLO+kg4EJgOTgC+AIWb2qaQ9gTkxDaAn8H0zWyLpFWB43H9wrON04HrgWTMbKWkP\nYK6kGWb2IvBijOkw4EdmdnH6gaTiNrPRwOhYxnNA6oY/OdqUfH8EcHCM/2VJU81sXqq8XAo8Vyn3\nx9iXSzoSuM/MBsUO2QAzmw2cCjxlZlslVckPDEqr/4rUe0ljgZfNbGp6xZKGALcBHYBvF3BcQ+N+\nZWS+9gAam9lRCqNGY4ATU7tnKO9mhamJV5rZfEn3AFPMrCyWOzL+bUJor8Fmtl7SMGAcMLLAY+0M\nvJP4vDZuez9+riBc87nNSrzvjt+8O+ecc26XV15eXtCUxfruYL1tZnPi+0eAn5DoYBFu9GeZ2QYA\nSY8C/YFbgB6S7gb+BjwjqRXQKTX1y8zyfbNfhSQBvWMsmRwPPG5mG2MdmxJpT8RtSyV9PVUkcJvC\nOpkKoFMibaWZLYnvFwMz4vvXCLewACcBp0m6Jn5uBnQFlqUqNbNXgSqdqyweAe4yswV58k1PHZuk\nSYSRlnkF1pGS61whqSVwLPB4PO8ATePfx4AzCdPWhgP35MmfUexYZkt7Angijhb9gdDZL8QK0q69\nRNqk+PdVoFuB5eV7/vkBwCGELxdE+NLg3fRMuY41j7XA/pJ2M7MtWXMdV8PSXfGsxDvCJai8vLwk\nv53d1Xm7lSZvt9LUkNpt4MCBlWIZO3ZsxnzFXoOVaf1OlRtQM9skqTdh5OES4Azg8kx5KxUkXQpc\nFOv5dzNbl0hrRLh53gI8WY1jSEnenKbiGAHsBfQxswqFB0A0z5C/IvG5gu3tIMIo15s1iKcSSWMI\nHdoqU/UyKKRddlQjYGNqxCbNZOBWSW2BMmAmYSpftvw1ZmbPS2oiaU8zW19A/kzX3oUxOdWGW9ne\nhl9ReSS1yjTYPAS8bmZ9q7lfylpgn8TnLnEbAGa2QtJSYLWkQWa2uIb1OOecc865DOp7DVY3hYX3\nAGcDf09Lnwv0l9ROUmPgLGB2nG7X2Mz+CtwAlJnZp8A7kr4LIKmZpK8lCzOze82sj5mVJTtXMa3C\nzLoDrxBGTzKZCZwhqV2so22WfKkO1h7AB7FzdRyVRzUK+eXWp4HLtu2QeAJcdSis3ToBGJWelGWX\nEyW1iedvCPBChjKX5qk257kys0+AlZKGJsrsFdM+I7TD3cBUC7Lmry5JPRPvy2Kd6+PnGXFqarZ9\nq1x72bLGv6uAQxXsAxyZK7QM25YBe0s6OtbfJE6/LNRk4Ly479HAJjNLTQ9MncMehNFf71ztTHz0\nqiQ1lG9lXfV4u5Umb7fSVIrtVt8drDeAH0taQlho/5u4PfV0u3XAtUA5MJ+wxmQKYQ1JuaT5hOld\nqSf+nQdcJmkhoVOQXMxfqH8C7TIlxCl9txI6efOB8cl4k1nj30eBI2I85wBLM+TJtH/KzYQHgSxS\neAjFTekZJB0W1yblcgXQibCeal4czcpV71zCdLcFhGl+laYHxk5GTjnOVdI5wEiFh5K8DgxOpP2F\nMAL458S2ETnyV6HwcI9TMyR9X9LrkuYROnHDY34R1sZtyFFstmsv4zVgZi8QOlmLgV8Spg+Sa5+0\n/f8FDAXukLSA8N/BMYUeq5n9jdAxfQv4LWGdY1JbYJWZVaTv65xzzjnndpx29V+nj+ud9kx7TLtL\nkPQdoIeZ/brYsdQmSQcTHrpydbFjqS/xoRnfM7OzcuQxxtRfTK6W+BqsujMG6ur/lQ1pbYErnLdb\nafJ2K00Nud0kYWZVZiTV9xqshmgS8KCkaWZ2SrGDaYjMrCZr1Bq8OEVuV+pcjQe+Bfxn3sxj6joa\n50pH+841mRzhnHNuV7XLj2A55yqTZP7vgnPOOedcbtlGsOp7DZZzzjnnnHPO7bS8g+WcczuBQn74\n0DU83m6lydutNHm7laZSbDfvYDnnnHPOOedcLfE1WM65SnwNlnPOOedcfr4GyznnnHPOOefqmHew\nnHNuJ1CKc9Sdt1up8nYrTd5upakU281/B8s5V4VUZbTb7eTat+/GunWrih2Gc845V/J8DZZzrhJJ\nBv7vwq5H+P8PnHPOucL5GiznnHPOOeecq2P11sGS1E3Sa1nSZkkqq69Y0uruKmmepGmJbSuLEUs2\nkgZImlhAvpxxS/ok/u0o6bH4/nxJv6pJeQXWOVrSlfnKqY5kmZImSuqfJ/8Bkv4h6YtCY4nXZNc8\n6dW6ZiUNlbRE0rPx858kLZA0Kh7H6Xn2L+RYz5a0ML6el9QrkTZe0mJJA6oTtysV5cUOwNVAKa4t\ncN5upcrbrTSVYrvV9xqshjj/ZAjwjJldm9jWEOMsJKZ8eQzAzN4DhhWwX23U2RCsB35CaOtiGglc\naGb/kNQBONzM9oPQeaqlOlYA/c1ss6STgfuBowHM7CpJc4EfArNrqT7nnHPOOZdQ31MEm0p6JH6L\n/5ik5ukZJJ0laVF83R63NYrf3i+K38yPitt7SpoeRwFekdSjBjG1AT5I2/ZhIp7zYp3zJT0Ut02U\ndLekFyS9lRp5kNRS0owYy0JJg+P2bpKWxv2WSXpU0olx/2WSDo/5WkiaIGmOpFclnRbD+BLYXMCx\nfBjLGRvjnSdpjaQJqcNJxJMcTewaR2SWSfpZpvOQr85s5ypJ0r6Spkl6WdJsSftLai1pVSJPC0lv\nS2qcKX+G+jcRzk9WZvaRmb0KfFXA8aSsB7Zmu/aiYZJekvSGpL4x/kojgpKmSOov6UagHzBB0s+B\np4HOsY36pZ2nMknl8binSWpfjWOdY2apa2UO0DktyzrCNe92OgOLHYCrgYEDBxY7BFcD3m6lydut\nNJViu9X3CNYBwAVmNife9F8K3JVKlNQRuB3oQ7iZnB47KWuAzmbWK+ZrHXd5FBhnZpMlNaNmHcbG\nQEVyg5kdFes5CLgOOMbMNkpK3ph2MLO+kg4EJgOTgC+AIWb2qaQ9CTe4k2P+nsD3zWyJpFeA4XH/\nwbGO04HrgWfNbKSkPYC5kmaY2YvAizGmw4AfmdnF6QeSitvMRgOjYxnPAakb/uRoU/L9EcDBMf6X\nJU01s3mp8nIp8Fyl3B9jXy7pSOA+MxsUO2QDzGw2cCrwlJltlVQlPzAorf4rUu8ljQVeNrOp+eIu\n4LiGxjLLyHztATQ2s6MknQKMAU5M7Z6hvJslHQ9caWbzJd0DTDGzsljuyPi3CaG9BpvZeknDgHHA\nyBoc64XAtLRtFYRrPo8xifcD8Zt355xzzu3qysvLC5qyWN8drLfNbE58/whh2tZdifQjgFlmtgFA\n0qNAf+AWoIeku4G/Ac9IagV0MrPJAGaW85v9TCQJ6B1jyeR44HEz2xjr2JRIeyJuWyrp66kigdsU\n1slUAJ0SaSvNbEl8vxiYEd+/BnSP708CTpN0TfzcDOgKLEtVGkdiqnSusngEuMvMFuTJNz11bJIm\nEUZa5hVYR0quc4WklsCxwOPxvAM0jX8fA84kTFsbDtyTJ39GsWNZ21aQdu0l0ibFv68C3QosL9/z\nzw8ADiF8uSDClwbvpmfKd6ySjgMuILRl0lpgf0m7mdmW7CWMyROma3jK8Y5w6SkvLy/Jb2d3dd5u\npcnbrTQ1pHYbOHBgpVjGjh2bMV+x12BlWr9T5QbUzDZJ6g18G7gEOAO4PFPeSgVJlwIXxXr+3czW\nJdIaEW6etwBPVuMYUpI3p6k4RgB7AX3MrELhARDNM+SvSHyuYHs7iDDK9WYN4qlE0hhCh7bKVL0M\nCmmXHdUI2JgasUkzGbhVUlugDJgJtMqRv95kufYujMmpNtzK9jb8isojqVWmweYh4HUz61uziEHh\nwRb3AyenOrwpZrZC0lJgtaRBZra4pvU455xzzrmq6nsNVjdJqWlnZwN/T0ufC/SX1E5SY+AsYHac\nbtfYzP4K3ACUmdmnwDuSvgsgqZmkryULM7N7zayPmZUlO1cxrcLMugOvEEZPMpkJnCGpXayjbZZ8\nqQ7WHsAHsXN1HJVHNQr55dangcu27SAdWsA+VYMJa7dOAEalJ2XZ5URJbeL5GwK8kKHMpXmqzXmu\nzOwTYKWkoYkye8W0zwjtcDcw1YKs+XdQpXOgsGauY9bMGa69POWuAg5VsA9wZKGxRMuAvSUdHetv\nEqdfFkThyYf/A5xrZsszpPcCehBGf71ztVMZWOwAXA00lG9lXfV4u5Umb7fSVIrtVt8drDeAH0ta\nQlho/5u4PfV0u3XAtYS5LvMJa0ymEBbql0uaD/wh5gE4D7hM0kJCpyD1MIDq+CfQLlNCnNJ3K6GT\nNx8Yn4w3mTX+fRQ4IsZzDrA0Q55M+6fcTHgQyCKFh1DclJ5B0mFxbVIuVwCdCOup5sXRrFz1ziVM\nd1tAmOZXaXpg7GTklONcJZ0DjFR4KMnrwOBE2l8II4B/TmwbkSN/FQoP9zg1w/b2kt4hnJfrFR6i\n0SpOwesJbMhRbLZrL+M1YGYvEDpZi4FfEqYPkmuftP3/BQwF7pC0gPDfwTGFHitwI+F6vjeubZub\nlt4WWGVmFVV3dc4555xzO0pmpfCU7boT1zvtmfaYdpcg6TtADzP7dbFjqU2SDiY8dOXqYsdSX+JD\nM75nZmflyGOl8fR9V1k5OzaKJXb1/x8UQ0NaW+AK5+1WmrzdSlNDbjdJmFmVGUn1vQarIZoEPChp\nmpmdUuxgGiIzq8katQYvTpHblTpX44FvAf9ZQO66Dsc1MO3bF/qcFuecc87lssuPYDnnKpNk/u+C\nc84551xu2Uaw6nsNlnPOOeecc87ttLyD5ZxzO4FCfvjQNTzebqXJ2600ebuVplJsN+9gOeecc845\n51wt8TVYzrlKfA2Wc84551x+vgbLOeecc8455+qYd7Ccc24nUIpz1J23W6nyditN3m6lqRTbzX8H\nyzlXheS/g+V2Xu07t2fdmnXFDsM559xOytdgOecqkWSMKXYUztWhMeD/73POObejfA2Wc84555xz\nztWxeutgSeom6bUsabMkldVXLGl1d5U0T9K0xLaVxYglG0kDJE0sIF/OuCV9Ev92lPRYfH++pF/V\npLwC6xwt6cp85VRHskxJEyX1z5P/AEn/kPRFobHEa7JrnvRqXbOShkpaIunZ+PlPkhZIGhWP4/Q8\n++c91pjvvyS9Gcs+NLF9vKTFkgZUJ25XIhrUv1quUKW4tsB5u5Uqb7fSVIrtVt9rsBrinIwhwDNm\ndm1iW0OMs5CY8uUxADN7DxhWwH61UWdDsB74CaGti2kkcKGZ/UNSB+BwM9sPQuepNiqQdArQ08z2\nk3QU8BvgaAAzu0rSXOCHwOzaqM8555xzzlVW31MEm0p6JH6L/5ik5ukZJJ0laVF83R63NYrf3i+S\ntFDSqLi9p6Tp8Zv6VyT1qEFMbYAP0rZ9mIjnvFjnfEkPxW0TJd0t6QVJb6VGHiS1lDQjxrJQ0uC4\nvZukpXG/ZZIelXRi3H+ZpMNjvhaSJkiaI+lVSafFML4ENhdwLB/GcsbGeOdJWiNpQupwEvEkRxO7\nxhGZZZJ+luk85Ksz27lKkrSvpGmSXpY0W9L+klpLWpXI00LS25IaZ8qfof5NhPOTlZl9ZGavAl8V\ncDwp64Gt2a69aJiklyS9IalvjL/SiKCkKZL6S7oR6AdMkPRz4Gmgc2yjfmnnqUxSeTzuaZLaF3qs\nwHeBh+NxvwTskdgfYB3hmnc7m5r86+eKbuDAgcUOwdWAt1tp8nYrTaXYbvU9gnUAcIGZzYk3/ZcC\nd6USJXUEbgf6EG4mp8dOyhqgs5n1ivlax10eBcaZ2WRJzahZh7ExUJHcYGZHxXoOAq4DjjGzjZKS\nN6YdzKyvpAOBycAk4AtgiJl9KmlPYE5MA+gJfN/Mlkh6BRge9x8c6zgduB541sxGStoDmCtphpm9\nCLwYYzoM+JGZXZx+IKm4zWw0MDqW8RyQuuFPjjYl3x8BHBzjf1nSVDOblyovlwLPVcr9Mfblko4E\n7jOzQbFDNsDMZgOnAk+Z2VZJVfIDg9LqvyL1XtJY4GUzm5ov7gKOa2gss4zM1x5AYzM7SmHUaAxw\nYmr3DOXdLOl44Eozmy/pHmCKmZXFckfGv00I7TXYzNZLGgaMA0YWeKydgXcSn9fGbe/HzxWEa/7/\nZ+/to6wsrnz/zxeUURQVfOHFjK1haQw3RsWoMTra+ouYmGi8KMaXDC7jaLJwqZiYtbyZ/AIoEaMx\nUWd+jtE4JAquQe4lGdQoAul2FEUQmhcFUW/QiDet3igqs4iO9v79UfvA06fPWx+b7n6692ets049\nVbuqdtV+TvezT9WuU5mmTPpA4uE9CIIgCIJ+T3Nzc01bFrvbwfqTmS319CzStq2fZ8qPBprM7G0A\nSbOBE4HpwEGSbgN+DzwmaXdglJnNBzCzat/sd0CSgMNdl1KcAsw1s3e8j82Zst953npJ+xWaBGYo\nxTnXmyQAACAASURBVMm0AaMyZRvNbJ2nnwcWeXot6REWYBxwhqQf+PUg4ABgQ6FTX4np4FyVYRbw\nczNbVUVuYWFskuaRVlpW1thHgUpzhaTdgC8Bc33eAXb29weAb5K2rZ0H/H9V5EvijmVX80eK7r1M\n2Tx/XwE01NhetfPPPwN8jvTlgkhfGvyfYqFPMNbXgUMk/Y2ZfVBW6uQ6Ww96jo2EI5xDmpubc/nt\nbH8n7JZPwm75pDfZrbGxsZ0u06ZNKynX0zFYpeJ3OjyAmtlmSYcDpwHfBSYAk0vJtmtImgRc6v2c\nbmatmbIBpIfnD4CHOzGGAtmH04IeFwL7AEeaWZvSARC7lJBvy1y3sd0OIq1yvVSHPu2QNJXk0HbY\nqleCWuzySRkAvFNYsSliPvATSUOBscAfgN0ryHcbZe69f/Digg0/ZrsNP6L9SmqHbbBVEPCcmR1f\nn8a8Dvxt5vpTngeAmf1R0nrgVUn/j5k9X2c/QRAEQRAEQQm6OwarQSnwHuAC4Imi8mXAiZKGSRoI\nnA887tvtBprZb4EfAWPNbAvwmqRvAEgaJGnXbGNmdoeZHWlmY7POlZe1mdmBwLOk1ZNS/AGYIGmY\n9zG0jFzBwdoTeNOdq5Npv6pRyy+3LgCu3FYhcwJcZ1CK3foycFVxUZkqp0ray+fvLGBJiTbXV+m2\n4lyZ2fvARknnZNr8vJf9J8kOtwEPWaKs/Cek3RwoxcyNLCtc4t6r0u4rwBFK/C1wTK26OBuAfSV9\n0fvfybdf1sp8YKLX/SKw2cwK2wMLc3gQafU3nKu+RKxe5ZLe8q1s0DnCbvkk7JZP8mi37nawXgAu\nl7SOFGh/p+cXTrdrBa4FmoEWUozJg6QYkmZJLcB9LgPpQfJKSatJTkE2mL9WXgSGlSrwLX0/ITl5\nLcAtWX2zov4+Gzja9fkWsL6ETKn6Ba4nHQSyRukQiuuKBSQd5bFJlbgaGEWKp1rpq1mV+l1G2u62\nirTNr932QHcyKlJhrrJ8C7hE6VCS54AzM2VzSCuA/5bJu7CCfAeUDvf4eon84ZJeI83LPyodorG7\nb8EbDbxdodly917Je8DMlpCcrOeBW0nbB6lUp6j+fwHnAD+VtIr0OTiu1rGa2e9JjunLwC9JcY5Z\nhgKvmFlbcd0gCIIgCILgk6P+/mv2Hu+0d9Ex7UEGSV8DDjKzf+5pXboSSf+NdOjKNT2tS3fhh2b8\ndzM7v4KMMbX7dAq6iIjBqp2p0Fv+9/Wm2IKgdsJu+STslk96s90kYWYddiR1dwxWb2Qe8GtJj5jZ\nV3tamd6ImdUTo9br8S1y/cm5ugX4O+B/VBWeuqO1CYKeY/j+9Wx2CIIgCILa6PcrWEEQtEeSxd+F\nIAiCIAiCypRbweruGKwgCIIgCIIgCII+SzhYQRAEfYBafvgw6H2E3fJJ2C2fhN3ySR7tFg5WEARB\nEARBEARBFxExWEEQtCNisIIgCIIgCKoTMVhBEARBEARBEAQ7mHCwgiAI+gB53KMehN3yStgtn4Td\n8kke7Ra/gxUEQQekDqvdQcDw4Q20tr7S02oEQRAEQa8mYrCCIGiHJIP4uxCUQsT/jCAIgiBIRAxW\nEARBEARBEATBDmaHOliSGiStLVPWJGnsjuy/HJIOkLRS0iOZvI09oUs5JJ0kaWYNcp3WW9JVknYp\nU3aRpNs9PUXSxCptXSRpShWZ9zurYzUKbfo91lSDfJOkFyS1uO33qSJfcf69/MFO6jxI0kLvf4Kk\nEyQ959eHlvusZOpXHaukXSU9JGm9pLWSbsiUHeL9zemM3kFeaO5pBYI6yGNsQRB2yytht3ySR7t1\nxwpWb9xPchbwmJl9NZPXG/WsRad69J4MDK6jXr067Ii5tTLpSpxvZkea2Vgz+7+d7KOe8mLGAub9\nzwUuBG4ws7HA1hrbq0XmZjP7LHAkcIKk00gdv2hmnwMOk3RQJ3UPgiAIgiAIaqA7HKydJc2StE7S\nA6VWTiSdL2mNv270vAGSZnreaklXef5oXwVYJenZOh8U9wLeLMp7K6PPRO+zRdJvPG+mpNskLZH0\nsqTxnr+bpEWuy2pJZ3p+g68izJS0QdJsSad6/Q2SvuBygyXdI2mppBWSznA1PgTerWEsb3k70zKr\nM5u8zcG+mtHi8zhB0hXAKKBJ0mKve7HrtBQ4PtP2FtKDfyW2uhyS9pM0z23TIumLhSnNzO01kpa5\nzBTPmyFpUkZmiqTvlZMv4mPg7RrmCTp3v2+bf1+tKsztCkm7ucwQSXPdzvdl9N8oaZinj/LVs32B\n+4CjvZ3LgHOB67N1vc4ASTdJesbHfWmtYzWzrWb2uKc/AlYCnyoSe4P0GQj6FI09rUBQB42NjT2t\nQlAHYbd8EnbLJ3m0W3ecIvgZ4GIzWyrpHmAS8PNCoaSRwI2kb9s3AwvdSdkE7G9mn3e5PbzKbNK3\n/vMlDaI+J3Eg0JbNMLNjvZ8xwA+B48zsHUnZB9ERZna8pM8C84F5wF+Bs8xsi6S9gaVeBjAaONvM\n1kl6FjjP65/pfYwH/hFYbGaXSNoTWCZpkZk9DTztOh0FfMfMLiseSEFvM5sCTPE2/gP4Z+ArwOtm\n9nVvZ4iZvS/paqDRxzcCmEqa//dI+4xWepu3VJtIM3sgc3k70Gxm4yUJ2L0g5v2fChxsZsd4+XxJ\nJwBzgFuBO1z+XGBcOXkzexJ32sxsE3COtz8SuLsw3hL8WtJ/AfPMbHqVcW2bf+D7wCQze1rSYJLN\nAY4AxgCtwBJJXzKzp+i4ymRm9pakfwC+b2YFJ/w44EEzmyepISN/CbDZzI71e3yJpMfM7NVOjBW/\nd88gzW2WNtJnoAJTM+lG4uE9CIIgCIL+TnNzc01bFrtjBetPZrbU07OAE4rKjwaazOxtM2sjOVAn\nAn8EDvJVo9OA9yXtDowys/kAZvahmf2VTuAP6oeTHLhSnALMNbN3vI/NmbLfed56YL9Ck8AMSauB\nRcAoSYWyjWa2ztPPeznAWuBAT48DrpXUQnJuBgEHZBUysxWlnKsyzAJuMbMW7+dUXyE6wcwKsVBi\n+6rSsWyf/49Izk69nAL8i+tsmf4KjHN9VpKcuM+QHKhVwL6SRkj6PPC2mb1eTr5c52b25woOxwVm\ndhjwd8DfSfpWJ8a1BPiFr/4N9fsUYJn3acAqttv0k55xPg6Y6PfEM8AwisZdZaxIGgjcD9xqZq8U\nFW8ifQYqMDXzaqxd86AHae5pBYI6yGNsQRB2yytht3zSm+zW2NjI1KlTt73K0R0rWB2+zS8h0+GB\n1Mw2SzocOA34LjCBFDtU8eHVt5pd6v2cbmatmbIBJMftA+DhToyhwAcldL4Q2Ac40szalA6d2KWE\nfFvmuo3tcy/SKtdLdejTDklTSQ7tvQBm9pLSQSKnA9N9ZazUyk1X/ehRtfggATPM7O4SZXNJNh7B\ndievknyn4p/M7M/+/p+S7geOITmjtdT9qaSHgK+RVpPGeVHWvh+z3aYfsf3Li5KHiVRBwBVmtrCO\nugXuAjaY2T+VKPslsEDSMWb2nU/QRxAEQRAEQVBEd6xgNUg61tMXAE8UlS8DTpQ0zL91Px943Lfb\nDTSz3wI/Asaa2RbgNUnfgG2nsu2abczM7sgcZNBaVNZmZgcCzwLfLKPvH4AJmRiaoWXkCk7JnsCb\n7lydDDSUkKnEAuDKbRWkI2qo01GZFLv1ZeCqTN5IYKuZ3Q/cTDpkAdJWwMKWy2dI8z9U0s4kJ6dU\n+5dn46TKsJi0BbQQRzSkUN3fFwDfLsQwSRrlsUkADwDnAWeTnK1y8vsUtVkVSQP9fsLH+HXgOb8+\nS5mT9srU/7SZPW9mNwHLgUOrdLkROMrTZ9eqZ4YFwCRJO3n/Bxff51X0nQ7sYWZXlxG5BrgknKu+\nRmNPKxDUQR5jC4KwW14Ju+WTPNqtOxysF4DLJa0jBdbf6fkG4E7QtaT9LS3AcjN7ENgfaPZtUve5\nDMBE4ErfkrcEGF6HTi+Stl11wLf0/YTk5LUAhTikcitxs0kHF6wGvgWsLyFTqn6B60kHgaxROqb7\numIBPyjhrgrjAbiadHjFcj9EYSpwGCmmqwX4MVBYvbobeFTSYp//aaTYsSeAdR1aThwK/KWKDpOB\nkyWtITmxYzy/YOuFpG1rT7vMXDxOy+d9CLDJzN6oID8k22YWSSN9pamYvyGt2KwibTXc5HMAKU6u\n2mEik5WOPF9NOvzikRIyWX2uA26XtIy0mlWOcvfEr0h2WOn3xJ0UrTaXG6uk/UnxfWO0/WCObxeJ\nDQVerqBXEARBEARBUCdK4SP9C0k/APY2s2urCgcASJoPjPc4rT6DpHuBq82smvPYJ/AYxDXAOWa2\noYyM9c5fLQgq08yOX8US/fF/xo6kubk5l9/O9nfCbvkk7JZPerPdJGFmHXZVdccKVm9kHnC8Mj80\nHFTGzM7sa84VgJlN7EfO1SGkVeIW0ipuEARBEARB0MX0yxWsIAjKk1awgqAjw4c30Nr6Sk+rEQRB\nEAS9gnIrWN1ximAQBDkjvngJgiAIgiCoj/66RTAIgqBP0Zt+JySonbBbPgm75ZOwWz7Jo93CwQqC\nIAiCIAiCIOgiIgYrCIJ2SLL4uxAEQRAEQVCZOEUwCIIgCIIgCIJgBxMOVhAEQR8gj3vUg7BbXgm7\n5ZOwWz7Jo93CwQqCIAiCIAiCIOgiIgYrCIJ2xO9gBUHQ1xi+/3BaN7X2tBpBEPQxysVghYMVBEE7\nJBlTe1qLIAiCLmRq/L5fEARdTxxyEQRB0JfZ2NMKBHURdssleYwJCcJueSWPdtuhDpakBklry5Q1\nSRq7I/svh6QDJK2U9Egmr1f9m5N0kqSZNch1Wm9JV0napUzZRZJu9/QUSROrtHWRpClVZN7vrI7V\nKLTp91hTDfJNkl6Q1OK236eKfMX59/IHO6nzIEkLvf8Jkk6Q9JxfH1rus5KpX+tYx0paI+lFSbdm\n8g/x/uZ0Ru8gCIIgCIKgdrpjBas3rsmfBTxmZl/N5PVGPWvRqR69JwOD66hXrw47Ym6tTLoS55vZ\nkWY21sz+byf7qKe8mLGAef9zgQuBG8xsLLC1xvZqkfkX4BIzOwQ4RNJppI5fNLPPAYdJOqiTuge9\nnbBoPgm75ZLGxsaeViGog7BbPsmj3brDwdpZ0ixJ6yQ9UGrlRNL5/o37Gkk3et4ASTM9b7Wkqzx/\ntK8CrJL0bJ0PinsBbxblvZXRZ6L32SLpN543U9JtkpZIelnSeM/fTdIi12W1pDM9v0HSeq+3QdJs\nSad6/Q2SvuBygyXdI2mppBWSznA1PgTerWEsb3k70zKrM5u8zcGSHvL8Nb5qcgUwCmiStNjrXuw6\nLQWOz7S9hfTgX4mtLoek/STNc9u0SPpiYUozc3uNpGUuM8XzZkialJGZIul75eSL+Bh4u4Z5gs7d\n79vm31erCnO7QtJuLjNE0ly3830Z/TdKGubpo3z1bF/gPuBob+cy4Fzg+mxdrzNA0k2SnvFxX1rr\nWCWNAIaY2XLPupf0hUKWN0ifgSAIgiAIgqCL2akb+vgMcLGZLZV0DzAJ+HmhUNJI4EbgSGAzsNCd\nlE3A/mb2eZfbw6vMJn3rP1/SIOpzEgcCbdkMMzvW+xkD/BA4zszekZR9EB1hZsdL+iwwH5gH/BU4\ny8y2SNobWOplAKOBs81snaRngfO8/pnex3jgH4HFZnaJpD2BZZIWmdnTwNOu01HAd8zssuKBFPQ2\nsynAFG/jP4B/Br4CvG5mX/d2hpjZ+5KuBhp9fCOAqaT5fw9oBlZ6m7dUm0gzeyBzeTvQbGbjJQnY\nvSDm/Z8KHGxmx3j5fEknAHOAW4E7XP5cYFw5eTN7EnfazGwTcI63PxK4uzDeEvxa0n8B88xsepVx\nbZt/4PvAJDN7WtJgks0BjgDGAK3AEklfMrOn6LjKZGb2lqR/AL5vZgUn/DjgQTObJ6khI38JsNnM\njvV7fImkx8zs1RrGuj/ps1Ngk+dlaSN9BsqT3Yh4IPEtex7YSNgpj4Tdcklzc3Muv1Xv74Td8klv\nsltzc3NNMWHd4WD9ycyWenoWcAUZBws4Gmgys7cBJM0GTgSmAwdJug34PfCYpN2BUWY2H8DMPuys\nMv6gfrjrUopTgLlm9o73sTlT9jvPWy9pv0KTwAxJJ5IeXEdlyjaa2TpPPw8s8vRa0mMrwDjgDEk/\n8OtBwAHAhkKnZrYC6OBclWEWcIuZtUjaAvxM0gzgYXdMCjoXVpWOpf38zwEOrrGvYk4B/t51NqA4\n9moccKqkld7/biQHaqakfd3Z2w9428xelzS5lDzwJCUwsz8D5ZyrC8zsz776NE/St8ys3D1QzBLg\nF35vznPdAJZ5n0haRbLpU2RW7OpkHGkb3wS/3oM07lcLAlXGWo1NpM/As2UlTq6z5SAIgiAIgj5K\nY2NjO2dv2rRpJeW6w8Hq8G1+CZkOD6RmtlnS4cBpwHeBCaTYoYoPr77V7FLv53Qza82UDQD+CHwA\nPNyJMRT4oITOFwL7AEeaWZvSoRO7lJBvy1y3sX3uRVrleqkOfdohaSrJob0XwMxeUjpI5HRguq+M\nlVq5+aQOQYFq8UECZpjZ3SXK5pJsPIK0olVNvlPxTwVHyMz+U9L9wDGUd7KL6/5U0kPA10irSeO8\nKGvfj9lu04/YvrJa8jCRKgi4wswW1lH3deBvM9ef8rwsvwQWSDrGzL5TRx9BbyRWQfJJ2C2X9JZv\n04POEXbLJ3m0W3fEYDVIOtbTFwBPFJUvA06UNEzSQOB84HHfbjfQzH4L/AgYa2ZbgNckfQO2ncq2\na7YxM7sjc5BBa1FZm5kdSPrm/ptl9P0DMCETQzO0jFzBKdkTeNOdq5OBhhIylVgAXLmtgnREDXU6\nKpNit74MXJXJGwlsNbP7gZtJhyxA2gpY2HL5DGn+h0rameTklGr/8mycVBkWk7aAFuKIhhSq+/sC\n4NuFGCZJozw2CeAB4DzgbJKzVU5+n6I2qyJpoN9P+Bi/Djzn12dJuqFK/U+b2fNmdhOwHDi0Spcb\ngaM8fXatemZYAEyStJP3f3DxfV4Ov+fflVTYVjkR+PcisWtIh2CEcxUEQRAEQdDFdIeD9QJwuaR1\npMD6Oz3fYNsD4bWk2J8WYLmZPUiKG2mW1EI6HOBarzcRuFLSatLWreF16PQiMKxUgW/p+wnJyWsB\nCnFI5VbiZpMOLlgNfAtYX0KmVP0C15MOAlmjdEz3dcUCSgcl3FVhPABXkw6vWK50iMJU4DBSTFcL\n8GPStkuAu4FHJS32+Z9Gih17AljXoeXEocBfqugwGThZ0hqSEzvG8wu2XgjcDzztMnPxOC2f9yHA\nJjN7o4L8kGybWSSN9JWmYv6GtGKzihRftsnnAFKcXLXDRCZLWus2/hB4pIRMVp/rgNslLSOtZpWj\n3D3xK5IdVvo9cSdFq80VxgpwOXAP6T5/ycweLSofCrxcQa8gj/SqH5oIaibslkvy+Ls8Qdgtr+TR\nbuqPv2zu8U57m9m1VYUDACTNB8abWSWHIXdIuhe42syqOY99Al/VWgOcY2YbysgYU7tVraAriMMS\n8knYrXuYCl35vNObgu6D2gm75ZPebDdJmFmHXVX91cEaDfwa2FL0W1hB0GeRdAhpK+Ya4CIr8+GX\n1P/+KARB0KcZvv9wWje1VhcMgiDoBOFgBUFQE5LK+V5BEARBEASBU87B6o4YrCAIgmAHk8c96kHY\nLa+E3fJJ2C2f5NFu4WAFQRAEQRAEQRB0EbFFMAiCdsQWwSAIgiAIgurEFsEgCIIgCIIgCIIdTDhY\nQRAEfYA87lEPwm55JeyWT8Ju+SSPdgsHKwiCIAiCIAiCoIuIGKwgCNoRv4MV5JHhwxtobX2lp9UI\ngiAI+hHxO1hBENREcrDi70KQN0T8PwuCIAi6kzjkIgiCoE/T3NMKBHWQx9iCIOyWV8Ju+SSPdtuh\nDpakBklry5Q1SRq7I/svh6QDJK2U9Egmb2NP6FIOSSdJmlmDXKf1lnSVpF3KlF0k6XZPT5E0sUpb\nF0maUkXm/c7qWI1Cm36PNdUg/4ikFknPSfqVpJ2qyFecfy9/sJM6D5K00O+9CZJOcH1WSjq03Gcl\nU7/qWCXtKukhSeslrZV0Q6bsEO9vTmf0DoIgCIIgCGqnO1aweuOejbOAx8zsq5m83qhnLTrVo/dk\nYHAd9erVYUfMrZVJl2OCmR1pZp8D9gK+2ck+6ikvZixgZjbWzOYCFwI3mNlYYGuN7dUic7OZfRY4\nEjhB0mmkjl/08R8m6aBO6h70ehp7WoGgDhobG3tahaAOwm75JOyWT/Jot+5wsHaWNEvSOkkPlFo5\nkXS+pDX+utHzBkia6XmrJV3l+aN9FWCVpGfrfFDcC3izKO+tjD4Tvc8WSb/xvJmSbpO0RNLLksZ7\n/m6SFrkuqyWd6fkNvoowU9IGSbMlner1N0j6gssNlnSPpKWSVkg6w9X4EHi3hrG85e1Mc31XStrk\nbQ721YwWn8cJkq4ARgFNkhZ73Ytdp6XA8Zm2t5Ae/Cux1eWQtJ+keW6bFklfLExpZm6vkbTMZaZ4\n3gxJkzIyUyR9r5x8ER8Db1ebJDMr6LgzMAj4S5Uq2+bfV6sKc7tC0m4uM0TSXLfzfRn9N0oa5umj\nlFZr9wXuA472di4DzgWuz9b1OgMk3STpGR/3pbWO1cy2mtnjnv4IWAl8qkjsDdJnIAiCIAiCIOhq\nzGyHvYAGoA34ol/fA3zP002kb/RHAq8Cw0gO32LgTC97LNPWHv6+FDjT04OAXerQaxowuUzZGOAF\nYKhf7+XvM4E5nv4s8JKnBwK7e3rvTH4D6SF9jF8/C9zj6TOBeZ7+CXCBp/cENgC7Ful0FHBXjWPb\nE1hNWr0YD/wyUzbE3/+YGd+IzPzvBDwJ3F6nvf8NuNLTyvT3nr+fWtDHyx8ETgCOAJoz7TwP7F9O\n3q/fL9H/SOChCvo9SnKs5nRyXPOB4zw92O/Tk4B3vE8BTwFfyszvsIzt/uDpk4D5mXZnAuMz98sa\nT18K/DBzjy8HGjoz1sK9C/xv4MCi/MXAFyrUM5iSeTUZWLx6/au/2wnLI01NTT2tQlAHYbd8EnbL\nJ73Jbk1NTTZlypRtL//fQ/GrYhxKF/EnM1vq6VnAFcDPM+VHA01m9jaApNnAicB04CBJtwG/Bx6T\ntDswyszmk0b0YWeVkSTgcNelFKcAc83sHe9jc6bsd563XtJ+hSaBGZJOJDmTozJlG81snaefBxZ5\nei1woKfHAWdI+oFfDwIOIDlaeH8rgMtqHOIs4BYza5G0BfiZpBnAw2b2ZEbnwqrSsbSf/znAwTX2\nVcwpwN+7zgYUx16NA06VtNL73w042MxmStpX0ghgP+BtM3td0uRS8iQnsANm9mfg6+WUM7OvSBoE\nPCBpopndW+O4lgC/8HtznusGsMz7RNIqkk2fIrNiVyfjSNv4Jvj1HqRxv5oZS8WxShoI3A/camav\nFBVvIn0Gni2vwtTOax0EQRAEQdCHaWxsbLdlcdq0aSXlusPBsirXUOKB1Mw2SzocOA34LjCBFDtU\n8eHVt5pd6v2cbmatmbIBpNWFD4CHOzGGAh+U0PlCYB/gSDNrUzp0YpcS8m2Z6za2z72As83spTr0\naYekqSSH9l4AM3tJ6SCR04HpkhaZ2fRSVT9p304p2xb3M8PM7i5RNpdk4xHAnBrkq/VVWkGzDyX9\nL+AYoCYHy8x+Kukh4GvAEknjvChr34/ZbtOP2L79tuRhIlUQcIWZLayjboG7gA1m9k8lyn4JLJB0\njJl95xP0EfQqGntagaAO8hhbEITd8krYLZ/k0W7dEYPVIOlYT18APFFUvgw4UdIw/9b9fOBxSXsD\nA83st8CPgLGW4mhek/QN2HYq267ZxszsDkuHGYzNOlde1mZmB5K+uS93yMEfgAmZGJqhZeQKTsme\nwJvuXJ1M2upVLFOJBcCV2ypIR9RQp6MyKXbry8BVmbyRwFYzux+4mbTtEuA90qoIwDOk+R/q8UkT\nKIGky7NxUmVYDExy+QGShhSq+/sC4NuFGCZJozw2CeAB4DzgbJKzVU5+n6I2q6IUJzfC0zuRHKVV\nfn2WMiftlan/aTN73sxuIm3XO7RKlxtJWwPx8XSWBcAk1xVJBxff51X0nU7aUnt1GZFrgEvCuQqC\nIAiCIOh6usPBegG4XNI6UkzInZ5vAO4EXUv6EZcWYLmZPUiKwWmW1EI6HOBarzcRuFLSatLWreF1\n6PQiKeaoA76l7yckJ68FuCWrb1bU32eTDi5YDXwLWF9CplT9AteTDgJZo3RM93XFAn5Qwl0VxgNw\nNenwiuV+iMJU4DBgmY/jx6RtlwB3A49KWuzzP40U2/YEsK5Dy4lDqX4wxGTgZElrSE7sGM8v2Hoh\nadva0y4zF9jdy9YBQ4BNZvZGBfkh2TazSBrpK03F7AbM9218K4DXgH/1stFUP0xkstKR56tJcXWP\nlJDJ6nMdcLukZaTVrHKUuyd+RbLDSr8n7qRotbncWCXtD/wQGJM5mOPbRWJDgZcr6BXkkuaeViCo\ngzz+vksQdssrYbd8kke7KYXK9C883mlvM7u2qnAAgKT5pAMZKjkMuUPSvcDVZlbNeewTeAziGuAc\nM9tQRsbq3IEZ9CjN9O9tgiKP/8+am5tzuf2lvxN2yydht3zSm+0mCTPrsKuqvzpYo4FfA1us/W9h\nBUGfRdIhpK2Ya4CLrMyHPzlYQZAvhg9voLX1lZ5WIwiCIOhHhIMVBEFNSCrnewVBEARBEAROOQer\nO2KwgiAIgh1MHveoB2G3vBJ2yydht3ySR7uFgxUEQRAEQRAEQdBFxBbBIAjaEVsEgyAIgiAIqhNb\nBIMgCIIgCIIgCHYw4WAFQRD0AfK4Rz0Iu+WVsFs+CbvlkzzaLRysIAiCIAiCIAiCLiJisIIgaEf8\nDlYQBL2B4fsPp3VTa0+rEQRBUJb4HawgCGpCkjG1p7UIgqDfMxXiGSUIgt5MHHIRBEHQl9nYsMjF\nvgAAIABJREFU0woEdRF2yyV5jAkJwm55JY9226EOlqQGSWvLlDVJGrsj+y+HpAMkrZT0SCavV/2b\nk3SSpJk1yHVab0lXSdqlTNlFkm739BRJE6u0dZGkKVVk3u+sjtUotOn3WFMN8o9IapH0nKRfSdqp\ninzF+ffyBzup8yBJC/3emyDpBNdnpaRDy31WMvVrHetYSWskvSjp1kz+Id7fnM7oHQRBEARBENRO\nd6xg9cb1/bOAx8zsq5m83qhnLTrVo/dkYHAd9erVYUfMrZVJl2OCmR1pZp8D9gK+2ck+6ikvZixg\nZjbWzOYCFwI3mNlYYGuN7dUi8y/AJWZ2CHCIpNNIHb/o4z9M0kGd1D3o7YRF80nYLZc0Njb2tApB\nHYTd8kke7dYdDtbOkmZJWifpgVIrJ5LO92/c10i60fMGSJrpeaslXeX5o30VYJWkZ+t8UNwLeLMo\n762MPhO9zxZJv/G8mZJuk7RE0suSxnv+bpIWuS6rJZ3p+Q2S1nu9DZJmSzrV62+Q9AWXGyzpHklL\nJa2QdIar8SHwbg1jecvbmeb6rpS0ydscLOkhz1/jqyZXAKOAJkmLve7FrtNS4PhM21tID/6V2Opy\nSNpP0jy3TYukLxamNDO310ha5jJTPG+GpEkZmSmSvldOvoiPgberTZKZFXTcGRgE/KVKlW3z76tV\nhbldIWk3lxkiaa7b+b6M/hslDfP0UUqrtfsC9wFHezuXAecC12frep0Bkm6S9IyP+9JaxyppBDDE\nzJZ71r2kLxSyvEH6DARBEARBEARdTMVtUl3EZ4CLzWyppHuAScDPC4WSRgI3AkcCm4GF7qRsAvY3\ns8+73B5eZTbpW//5kgZRn5M4EGjLZpjZsd7PGOCHwHFm9o6k7IPoCDM7XtJngfnAPOCvwFlmtkXS\n3sBSLwMYDZxtZuskPQuc5/XP9D7GA/8ILDazSyTtCSyTtMjMngaedp2OAr5jZpcVD6Sgt5lNAaZ4\nG/8B/DPwFeB1M/u6tzPEzN6XdDXQ6OMbAUwlzf97QDOw0tu8pdpEmtkDmcvbgWYzGy9JwO4FMe//\nVOBgMzvGy+dLOgGYA9wK3OHy5wLjysmb2ZO402Zmm4BzvP2RwN2F8RYj6VHgaGCRmT1aZVzb5h/4\nPjDJzJ6WNJhkc4AjgDFAK7BE0pfM7Ck6rjKZmb0l6R+A75tZwQk/DnjQzOZJasjIXwJsNrNj/R5f\nIukxM3u1hrHuT/rsFNjkeVnaSJ+B8mQ3Ih5IfMueBzYSdsojYbdc0tzcnMtv1fs7Ybd80pvs1tzc\nXFNMWHc4WH8ys6WengVcQcbBIj3wNpnZ2wCSZgMnAtOBgyTdBvweeEzS7sAoM5sPYGYfdlYZf1A/\n3HUpxSnAXDN7x/vYnCn7neetl7RfoUlghqQTSQ+uozJlG81snaefBxZ5ei3psRVgHHCGpB/49SDg\nAGBDoVMzWwF0cK7KMAu4xcxaJG0BfiZpBvCwOyYFnQurSsfSfv7nAAfX2FcxpwB/7zobUBx7NQ44\nVdJK7383kgM1U9K+7uztB7xtZq9LmlxKHniSEpjZn4GSzpWXf8UdlgckTTSze2sc1xLgF35vznPd\nAJZ5n0haRbLpU2RW7OpkHGkb3wS/3oM07lczY6k41ipsIn0Gni0rcXKdLQdBEARBEPRRGhsb2zl7\n06ZNKynXHQ5Wh2/zS8h0eCA1s82SDgdOA74LTCDFDlV8ePWtZpd6P6ebWWumbADwR+AD4OFOjKHA\nByV0vhDYBzjSzNqUDp3YpYR8W+a6je1zL9Iq10t16NMOSVNJDu29AGb2ktJBIqcD031lbHqpqp+0\nb6dafJCAGWZ2d4myuSQbjyCtaFWTryuuy8w+lPS/gGNI2+dqqfNTSQ8BXyOtJo3zoqx9P2a7TT9i\n+8pqycNEqiDgCjNbWEfd14G/zVx/yvOy/BJYIOkYM/tOHX0EvZFYBcknYbdc0lu+TQ86R9gtn+TR\nbt0Rg9Ug6VhPXwA8UVS+DDhR0jBJA4Hzgcd9u91AM/st8CNgrMfRvCbpG7DtVLZds42Z2R1+mMHY\nrHPlZW1mdiDpm/tyhxz8AZiQiaEZWkau4JTsCbzpztXJQEMJmUosAK7cVkE6ooY6HZVJsVtfBq7K\n5I0EtprZ/cDNpEMWIG0FLGy5fIY0/0M9PmkCJZB0eTZOqgyLSVtAC3FEQwrV/X0B8O1CDJOkUR6b\nBPAAcB5wNsnZKie/T1GbVVGKkxvh6Z1IjtIqvz5L0g1V6n/azJ43s5uA5cChVbrcCBzl6bNr1TPD\nAmCS64qkg4vv83L4Pf+upMK2yonAvxeJXUM6BCOcqyAIgiAIgi6mOxysF4DLJa0jBdbf6fkG2x4I\nryXF/rQAy83sQVLcSLOkFtLhANd6vYnAlZJWk7ZuDa9DpxeBYaUKfEvfT0hOXgtQiEMqtxI3m3Rw\nwWrgW8D6EjKl6he4nnQQyBqlY7qvKxZQOijhrgrjAbiadHjFcqVDFKYCh5FiulqAH5O2XQLcDTwq\nabHP/zRS7NgTwLoOLScOpfrBEJOBkyWtITmxYzy/YOuFwP3A0y4zF4/T8nkfAmwyszcqyA/JtplF\n0khfaSpmN1L81ipgBfAa8K9eNprqh4lMlrTWbfwh8EgJmaw+1wG3S1pGWs0qR7l74lckO6z0e+JO\nilabK4wV4HLgHtJ9/lKJeLOhwMsV9ArySK/6oYmgZsJuuSSPv8sThN3ySh7tpv74K+ke77S3mV1b\nVTgAQNJ8YLyZVXIYcoeke4Grzaya89gn8FWtNcA5ZrahjIwxtVvVCrqCOCwhn4TdyjMVeuszSm8K\nug9qJ+yWT3qz3SRhZh12VfVXB2s08GtgS9FvYQVBn0XSIaStmGuAi6zMh19S//ujEARBr2P4/sNp\n3dRaXTAIgqCHCAcrCIKakFTO9wqCIAiCIAiccg5Wd8RgBUEQBDuYPO5RD8JueSXslk/Cbvkkj3YL\nBysIgiAIgiAIgqCLiC2CQRC0I7YIBkEQBEEQVCe2CAZBEARBEARBEOxgwsEKgiDoA+Rxj3oQdssr\nYbd8EnbLJ3m0WzhYQRAEQRAEQRAEXUTEYAVB0I74HaygtzN8eAOtra/0tBpBEARBPyd+BysIgppI\nDlb8XQh6MyL+dwVBEAQ9TRxyEQRB0Kdp7mkFgjrIY2xBEHbLK2G3fJJHu+1QB0tSg6S1ZcqaJI3d\nkf2XQ9IBklZKeiSTt7EndCmHpJMkzaxBrtN6S7pK0i5lyi6SdLunp0iaWKWtiyRNqSLzfmd1rEah\nTb/HmmqQny7pT5Leq7H9ivPv5Q/WrjFIGiRpod97EySdIOk5vz603GclU7/qWCXtKukhSeslrZV0\nQ6bsEO9vTmf0DoIgCIIgCGqnO1aweuM+jrOAx8zsq5m83qhnLTrVo/dkYHAd9erVYUfMrZVJl2M+\ncPQn6KOe8mLGAmZmY81sLnAhcIOZjQW21theLTI3m9lngSOBEySdRur4RTP7HHCYpIM6qXvQ62ns\naQWCOmhsbOxpFYI6CLvlk7BbPsmj3brDwdpZ0ixJ6yQ9UGrlRNL5ktb460bPGyBppuetlnSV54/2\nVYBVkp6t80FxL+DNory3MvpM9D5bJP3G82ZKuk3SEkkvSxrv+btJWuS6rJZ0puc3+CrCTEkbJM2W\ndKrX3yDpCy43WNI9kpZKWiHpDFfjQ+DdGsbylrczzfVdKWmTtznYVzNafB4nSLoCGAU0SVrsdS92\nnZYCx2fa3kJ68K/EVpdD0n6S5rltWiR9sTClmbm9RtIyl5nieTMkTcrITJH0vXLyRXwMvF1tksxs\nmZm9UU0uw7b599WqwtyukLSbywyRNNftfF9G/42Shnn6KKXV2n2B+4CjvZ3LgHOB67N1vc4ASTdJ\nesbHfWmtYzWzrWb2uKc/AlYCnyoSe4P0GQiCIAiCIAi6GjPbYS+gAWgDvujX9wDf83QT6Rv9kcCr\nwDCSw7cYONPLHsu0tYe/LwXO9PQgYJc69JoGTC5TNgZ4ARjq13v5+0xgjqc/C7zk6YHA7p7eO5Pf\nQHpIH+PXzwL3ePpMYJ6nfwJc4Ok9gQ3ArkU6HQXcVePY9gRWk1YvxgO/zJQN8fc/ZsY3IjP/OwFP\nArfXae9/A670tDL9vefvpxb08fIHgROAI4DmTDvPA/uXk/fr90v0PxJ4qIqO79UxrvnAcZ4e7Pfp\nScA73qeAp4AvZeZ3WMZ2f/D0ScD8TLszgfGZ+2WNpy8Ffpi5x5cDDXWMdS/gfwMHFuUvBr5QoZ7B\nlMyrycDi1etf/clOWF+hqampp1UI6iDslk/CbvmkN9mtqanJpkyZsu3l/48ofu3EjudPZrbU07OA\nK4CfZ8qPBprM7G0ASbOBE4HpwEGSbgN+DzwmaXdglJnNJ43ow84qI0nA4a5LKU4B5prZO97H5kzZ\n7zxvvaT9Ck0CMySdSHImR2XKNprZOk8/Dyzy9FrgQE+PA86Q9AO/HgQcQHK08P5WAJfVOMRZwC1m\n1iJpC/AzSTOAh83syYzOhVWlY2k//3OAg2vsq5hTgL93nQ0ojr0aB5wqaaX3vxtwsJnNlLSvpBHA\nfsDbZva6pMml5ElOYAfM7M/A1+vUvRJLgF/4vTnPdQNY5n0iaRXJpk+RWbGrk3GkbXwT/HoP0rhf\nLQhUG6ukgcD9wK1m9kpR8SbSZ+DZ8ipM7bzWQRAEQRAEfZjGxsZ2WxanTZtWUq47HCyrcg0lHkjN\nbLOkw4HTgO8CE0ixQxUfXn2r2aXez+lm1popG0BaXfgAeLgTYyjwQQmdLwT2AY40szalQyd2KSHf\nlrluY/vcCzjbzF6qQ592SJpKcmjvBTCzl5QOEjkdmC5pkZlNL1X1k/btlLJtcT8zzOzuEmVzSTYe\nAcypQb5aX12Gmf1U0kPA14AlksZ5Uda+H7Pdph+xffttycNEqiDgCjNbWI++zl3ABjP7pxJlvwQW\nSDrGzL7zCfoIehWNPa1AUAd5jC0Iwm55JeyWT/Jot+6IwWqQdKynLwCeKCpfBpwoaZh/634+8Lik\nvYGBZvZb4EfAWDPbArwm6Ruw7VS2XbONmdkdZnakpYMEWovK2szsQNI3998so+8fgAmZGJqhZeQK\nTsmewJvuXJ1M2upVLFOJBcCV2ypIR9RQp6MyKXbry8BVmbyRwFYzux+4mbTtEuA90qoIwDOk+R8q\naWeSk1Oq/cuzcVJlWAxMcvkBkoYUqvv7AuDbhRgmSaM8NgngAeA84GySs1VOfp+iNjtLu3qSzlLm\npL2SFaRPm9nzZnYTabveoVX62EjaGghpPJ1lATBJ0k7e/8HF93kVfaeTttReXUbkGuCScK6CIAiC\nIAi6nu5wsF4ALpe0jhQTcqfnG4A7QdeSfsSlBVhuZg+SYnCaJbWQDge41utNBK6UtJq0dWt4HTq9\nSIo56oBv6fsJyclrAW7J6psV9ffZpIMLVgPfAtaXkClVv8D1pINA1igd031dsYAflHBXhfEAXE06\nvGK5H6IwFTgMWObj+DFp2yXA3cCjkhb7/E8jxbY9Aazr0HLiUOAvVXSYDJwsaQ3JiR3j+QVbLyRt\nW3vaZeYCu3vZOmAIsMn8MIoy8kOybWaRNNJXmjog6aeSXgN2VTqu/cdeNJrqh4lMVjryfDUpru6R\nEjJZfa4Dbpe0jLSaVY5y98SvSHZY6ffEnRStNpcbq6T9gR8CYzIHc3y7SGwo8HIFvYJc0tzTCgR1\nkMffdwnCbnkl7JZP8mg3pVCZ/oXHO+1tZtdWFQ4AkDSfdCBDJYchd0i6F7jazKo5j30Cj0FcA5xj\nZhvKyFg37sAMuoxm+s82QdFX/nc1NzfncvtLfyfslk/CbvmkN9tNEmbWYVdVf3WwRgO/BrZY+9/C\nCoI+i6RDSFsx1wAXWZkPfzhYQe+n7zhYQRAEQX4JBysIgppIDlYQ9F6GD2+gtfWVnlYjCIIg6OeU\nc7C6IwYrCIKcUeo3HeLVu19NTU09rkN3vfqSc5XH2IIg7JZXwm75JI92CwcrCIIgCIIgCIKgi4gt\ngkEQtEOSxd+FIAiCIAiCysQWwSAIgiAIgiAIgh1MOFhBEAR9gDzuUQ/Cbnkl7JZPwm75JI92Cwcr\nCIIgCIIgCIKgi4gYrCAI2hExWEEQBEEQBNUpF4O1U08oEwRB70bq8LciCIKgJMP3H07rptaeViMI\ngqDXECtYQRC0Q5Ixtae1CDrNRuCgnlYi6DR9wW5T02/n9Seam5tpbGzsaTWCThJ2yye92W5ximAQ\nBEEQBEEQBMEOZoc6WJIaJK0tU9YkaeyO7L8ckg6QtFLSI5m8jT2hSzkknSRpZg1yndZb0lWSdilT\ndpGk2z09RdLEKm1dJGlKFZn3O6tjNQpt+j3WVIP8dEl/kvReje1XnH8vf7B2jUHSIEkL/d6bIOkE\nSc/59aHlPiuZ+rWOdaykNZJelHRrJv8Q729OZ/QOckLeV0H6K2G3XNJbv00PKhN2yyd5tFt3rGD1\nxn0DZwGPmdlXM3m9Uc9adKpH78nA4Drq1avDjphbK5Mux3zg6E/QRz3lxYwFzMzGmtlc4ELgBjMb\nC2ytsb1aZP4FuMTMDgEOkXQaqeMXzexzwGGS4rEuCIIgCIJgB9AdDtbOkmZJWifpgVIrJ5LO92/c\n10i60fMGSJrpeaslXeX5o30VYJWkZ+t8UNwLeLMo762MPhO9zxZJv/G8mZJuk7RE0suSxnv+bpIW\nuS6rJZ3p+Q2S1nu9DZJmSzrV62+Q9AWXGyzpHklLJa2QdIar8SHwbg1jecvbmeb6rpS0ydscLOkh\nz1/jqyZXAKOAJkmLve7FrtNS4PhM21tID/6V2OpySNpP0jy3TYukLxamNDO310ha5jJTPG+GpEkZ\nmSmSvldOvoiPgberTZKZLTOzN6rJZdg2/75aVZjbFZJ2c5khkua6ne/L6L9R0jBPH6W0WrsvcB9w\ntLdzGXAucH22rtcZIOkmSc/4uC+tdaySRgBDzGy5Z91L+kIhyxukz0DQl+hVa/BBzYTdckkef5cn\nCLvllTzarTtOEfwMcLGZLZV0DzAJ+HmhUNJI4EbgSGAzsNCdlE3A/mb2eZfbw6vMJn3rP1/SIOpz\nEgcCbdkMMzvW+xkD/BA4zszekZR9EB1hZsdL+ixpRWQe8FfgLDPbImlvYKmXAYwGzjazdZKeBc7z\n+md6H+OBfwQWm9klkvYElklaZGZPA0+7TkcB3zGzy4oHUtDbzKYAU7yN/wD+GfgK8LqZfd3bGWJm\n70u6Gmj08Y0AppLm/z2gGVjpbd5SbSLN7IHM5e1As5mNlyRg94KY938qcLCZHePl8yWdAMwBbgXu\ncPlzgXHl5M3sSdxpM7NNwDne/kjg7sJ4PwnZ+Qe+D0wys6clDSbZHOAIYAzQCiyR9CUze4qOq0xm\nZm9J+gfg+2ZWcMKPAx40s3mSGjLylwCbzexYv8eXSHrMzF6tYaz7kz47BTZ5XpY20megPNmNiAcS\n25iCIAiCIOj3NDc31+TwdYeD9SczW+rpWcAVZBws0ratJjN7G0DSbOBEYDpwkKTbgN8Dj0naHRhl\nZvMBzOzDzirjD+qHuy6lOAWYa2bveB+bM2W/87z1kvYrNAnMkHQi6cF1VKZso5mt8/TzwCJPryU9\ntgKMA86Q9AO/HgQcAGwodGpmK4AOzlUZZgG3mFmLpC3AzyTNAB52x6Sgc2FV6Vjaz/8c4OAa+yrm\nFODvXWcDimOvxgGnSlrp/e9GcqBmStrXnb39gLfN7HVJk0vJA09SAjP7M/CJnasSLAF+4ffmPNcN\nYJn3iaRVJJs+RWbFrk7GkbbxTfDrPUjjfrUg8AnHuon0GXi2rMTJdbYc9BzhBOeTsFsuyWNMSBB2\nyyu9yW6NjY3t9Jk2bVpJue5wsDp8m19CpsMDqZltlnQ4cBrwXWACKXao4sOrbzW71Ps53cxaM2UD\ngD8CHwAPd2IMBT4oofOFwD7AkWbWpnToxC4l5Nsy121sn3uRVrleqkOfdkiaSnJo7wUws5eUDhI5\nHZjuK2PTS1X9pH071eKDBMwws7tLlM0l2XgEaUWrmny3xcyZ2U8lPQR8jbSaNM6Lsvb9mO02/Yjt\nK6slDxOpgoArzGxhHXVfB/42c/0pz8vyS2CBpGPM7Dt19BEEQRAEQRCUoTtisBokHevpC4AnisqX\nASdKGiZpIHA+8LhvtxtoZr8FfgSMNbMtwGuSvgHbTmXbNduYmd1hZkf6QQKtRWVtZnYg6Zv7b5bR\n9w/AhEwMzdAycgWnZE/gTXeuTgYaSshUYgFw5bYK0hE11OmoTIrd+jJwVSZvJLDVzO4HbiYdsgBp\nK2Bhy+UzpPkfKmlnkpNTqv3Ls3FSZVhM2gJaiCMaUqju7wuAbxdimCSN8tgkgAeA84CzSc5WOfl9\nitrsLO3qSTpL0g0VK0ifNrPnzewmYDlwaJU+NgJHefrsOnRcAEyStJP3f3DxfV4Ov+fflVTYVjkR\n+PcisWtIh2CEc9WXiFiefBJ2yyV5jAkJwm55JY926w4H6wXgcknrSIH1d3q+wbYHwmtJsT8twHIz\ne5AUN9IsqYV0OMC1Xm8icKWk1aStW8Pr0OlFYFipAt/S9xOSk9cCFOKQyq3EzSYdXLAa+BawvoRM\nqfoFricdBLJG6Zju64oF/KCEuyqMB+Bq0uEVy/0QhanAYaSYrhbgx6RtlwB3A49KWuzzP40UO/YE\nsK5Dy4lDgb9U0WEycLKkNSQndoznF2y9ELgfeNpl5uJxWj7vQ4BNhcMoysgPybaZRdJIX2nqgKSf\nSnoN2FXpuPYfe9Foqh8mMlnSWrfxh8AjJWSy+lwH3C5pGWk1qxzl7olfkeyw0u+JOylaba40VuBy\n4B7Sff6SmT1aVD4UeLmCXkEQBEEQBEGdqL/9+jqAxzvtbWbXVhUOAJA0HxhvZpUchtwh6V7gajOr\n5jz2CXxVaw1wjpltKCNjTO1WtYIgyDNToT8+SwRBEEjCzDrsquqvDtZo4NfAlqLfwgqCPoukQ0hb\nMdcAF1mZD7+k/vdHIQiCuhm+/3BaN7VWFwyCIOhjhIMVBEFNSCrnewW9mObm5l510lJQG2G3fBJ2\nyydht3zSm+1WzsHqjhisIAiCIAiCIAiCfkGsYAVB0I5YwQqCIAiCIKhOrGAFQRAEQRAEQRDsYMLB\nCoIg6APk8XdCgrBbXgm75ZOwWz7Jo93CwQqCIAiCIAiCIOgiIgYrCIJ2RAxWEARBEARBdcrFYO3U\nE8oEQdC7Sb9HHATBJ2X48AZaW1/paTWCIAiCbiS2CAZBUAKLV+5eTb1Ah3gVv95441UqkcfYgiDs\nllfCbvkkj3YLBysIgiAIgiAIgqCL6BIHS1KDpLVlypokje2KfjqLpAMkrZT0SCZvY0/oUg5JJ0ma\nWYPcRn8vO9dF8kMkvSbp9mwbkoZ1Qreqc+X2PaBC+UWS/qnWPmvU66LCuCRNkTSxivzRklr8tVrS\nN2voY6akE6uUj++k3idIes7vyb+RdLOktZJ+6uP4XpX6tYz1y5Ke9XEul3Rypuz7kl6oZfxBHmns\naQWCOmhsbOxpFYI6CLvlk7BbPsmj3boyBsu6sK2u4izgMTO7NpPXG/WsRScrky7H9cDjdfTzSeR3\ndDv1shY4yszaJI0AnpP0P83s427W40LgBjO7H0DSpcBQMzNJU7qoj7eAr5tZq6T/BiwAPgVgZrdI\nehK4GZjTRf0FQRAEQRAEGbpyi+DOkmZJWifpAUm7FAtIOl/SGn/d6HkDfDVgjX/rfpXnj5a0UNIq\n/0b+oDp02gt4syjvrYw+E73PFkm/8byZkm6TtETSy4VVCkm7SVqUWR040/MbJK33ehskzZZ0qtff\nIOkLLjdY0j2SlkpaIekMV+ND4N0axvJWcYakuzMrM29K+n89/yhgP+Cx4irAld7/akmHZMb2r26D\nVZL+e7k+S/AX4GNv5yve9ipJC0vou4+k/ynpGX8dp8RGSXtk5F6UtG8p+RL9bwG2VlLQzP5qZm1+\nuSvwbg3O1WaSbZB0o688rZJ0U0bmpBL3yUmSHsyM5Z/8PrsEOBe4XtJ9kv4d2B1YIWlC0Tx9WtIj\nvgL1eMFOwPs1jHW1mbV6+nlgF0k7Z0RagT2rjD3IJc09rUBQB3mMLQjCbnkl7JZP8mi3rlzB+gxw\nsZktlfT/s3fvUXZUZf7/3x8yiQiBhBAIkDEX4ncwcUhMAiIjQiOgw8yAiIAgN10sQC6CXBwZvBAE\nEUX4fqP+wOEyMUIECQNMEDMkQDq6IBFyJyQEgTgQmXARgmQGQejP74/aB6tP17l006G7Os9rrV59\nTtVTVc+uXZ2cffbeVTcApwNXVVZK2hm4HJhI9uF1bmqkrAOG2x6f4ioftGeQfds/S9IAutYY7Ae0\n5RfY3isdZxxwIbC37ZclDc6F7WT7o5LGArOA24E/AYfZ3ihpe2BhWgcwBviM7VWSFgFHp+0PTcc4\nHPgacJ/tkyQNAh6SdK/tBcCClNNk4FTbp1QXpJJ31bKT03YjgNnANEkCvk/WW3JQwTl53vZkSacB\n5wOnAN8ANuTqYFCtYxbkcETaZihwLbCP7aerzmfFVOAq2w9Keh9wj+1xku4EPg1Ml/Rh4He2X5A0\nozoeGFd1/CsrryWdmi3ytdUHTvv9N2A08LkmynVO2m4IWb1/IL3fNhdWdJ1AQY+d7Rsk7QPcZfv2\ntK8/2p6UXud7sK4luw6eTHlfAxxgO//3VLOsuZgjgCW2/5xb3EZTf/dTcq9biOFnIYQQQtjctba2\nNtXg684G1tO2F6bXNwFfItfAAvYE5tl+CSB9eN4XuBQYLWkq8EtgjqSBwC62ZwHYfqOzyaSGxoSU\nS5GPAzNtv5yOsSG37s60bLWkHSu7BL6jbF5OG7BLbt1a26vS60eBe9PrR4BR6fUngEMkfSW9HwCM\nANZUDmp7MVmDpzPl3BKYCZxpe52kM4C7bT+bnQKq77d9R/q9mKxRA3Ag8Pa8HNvN9KhEg1wQAAAg\nAElEQVRV+wgw3/bTaR8bCmIOBMamugEYKGkr4Fbgm8B04Gj+MnytVnwh2/9aZ91DwN9K2g24R9I8\n239solyvAK9Juh64G/hFbl3RdfKOSNoa+DtgZq7c/avj6pU17eeDwHfo2Mh+EdhB0uAadZRMaT7p\n0Eu09HQCoQvKOLcgRL2VVdRbOfWmemtpaWmXz8UXX1wYtynnYBXNu+nwcB3bGyRNAD4JfBE4Evhy\nUWy7HUmnAyen4/xDZVhUWrcF8BTwOtmH4s56vSDnY4GhwMQ0l2ctsGVBfFvufb63QGS9XL/tQj71\nXAPcZnteer83sE86P9uQDd181faFVbm+Rfc/B63Rw5ME7FXVowKwQNmQ0KFk8+a+VS9e7+AZTbbX\nSHoS+D9kjcxG8W+lXqQDyK7NM9NrKL5O3qR9b2uHobINbAG8XOnZ6gpJf03Wm3a87d/l19l+TdIt\nwFOSPmu7w1DOEEIIIYTQdd05B2ukpMqQss8Bv65a/xCwr6QhkvoBxwDz03C7frbvAL4OTLK9EXhG\n0qcAJA2Q9N78zmxfbXui7Un5xlVa12Z7FLCIXM9MlfuBI9MQMCRtVyOu8sF5ENnwujZld2YbWRBT\nzz3AWW9vIH2oiW3qSr1VA21fUVlm+zjbo2zvSjYE8Ke5xlUtc4EzcvvtMLxP2fyznevsYyHwMUkj\nU3zR+ZwDnJ3b54TcujvIejxX5XpW6sU3TdKodM2R8ns/8Nv0frrSPLka224NDLb9n8C5wPhaoen3\nfwHjJPVP5/GAGvH5bd5m+1VgbRreV8mh1jGL8h1E1sv21VyPcn79YLK/ieHRuOprWns6gdAFZZxb\nEKLeyirqrZzKWG/d2cB6DDhD0iqym0v8OC03QGoEXUD2KWAp8LDtu4DhQKukpcCNKQbgBLIbMiwH\nHgCGdSGnx4HC25KnIX3fJmvkLQUqc3lq9cTNAPZM+RwHrC6IKdq+4hKy3qQVym6z/q3qAEmTJdWc\nU1PgPGB3ZTe5WCKp0fDCWrldCgxRdsvwpVSNNUpD1cYAL9Xcsf0i2fDGO9I+bikIOxvYQ9kNNlYC\np+bW3UrWS3hLk/EdSDq1xjnYB1guaUk6zim54YHjgWfr7HYb4Bep3n8FnJOWF14nttelY6xMZVlS\nHVPnfcVxwEnKbqqxEji0OqBOWc8kq6tv5q6Lobn1g4DnbNe9WUYIIYQQQuga2T19B+1NJ8132r7q\nNu2hk9J8ni/YPr+nc+lOkrYBrre92TwXKg13nGq76I6MlRj3/J31Q+grRF/+fzaEEDZnkrDdYURS\nX29gjQF+Amy0fXAPpxNCj5J0Hlkv4RW2b64T13f/UQjhXTZs2EjWr/9dT6cRQghhE9gsG1ghhM6T\n5Ph3oXxaW1t71Z2WQnOi3sop6q2cot7KqTfXW60GVnfOwQohhBBCCCGEzVr0YIUQ2okerBBCCCGE\nxqIHK4QQQgghhBA2sWhghRBCH1DG54SEqLeyinorp6i3cipjvUUDK4QQQgghhBC6SczBCiG0E3Ow\nQgghhBAaqzUH6696IpkQQu8mdfi3IoQQQgibyLDhw1i/bn1PpxG6SfRghRDakWSm9HQWodPWAqN7\nOonQaVFv5RT1Vk69ud6mQHwmLxbPwQohhBBCCCGEzVi3NLAkjZT0SI118yRN6o7jdJakEZKWSJqd\nW7a2J3KpRdJ+kqY1Ebc2/a55rqvit5H0jKQf5PchaUgncmt4rlL9jqiz/kRJP2z2mE3mdWKlXJIu\nknRCg/g9JS1NP8slfbaJY0yTtG+D9Yd3Mu99JK1M1+R7JF0h6RFJ303lOLfB9g3LmuL+RdJvJa2W\n9Inc8vMkPdZM+UMJ9dZvZUN9UW/lFPVWTlFvpdRbe6/q6c4erN7Yr3kYMMf2wbllvTHPZnJyjde1\nXALM78Jx3kn8pt5PVz0CTLY9Efgk8P9J6tcDeRwLXGZ7ku3XgZOB8ba/2l0HkDQWOAoYCxwMXK00\nocr2lcCJwBnddbwQQgghhNBedzaw+ku6SdIqSbdK2rI6QNIxklakn8vTsi1Sb8CK1Ltwdlo+RtJc\nScskLZLUle8dBgPPVy17IZfPCemYSyVNT8umSZoq6QFJT1R6KSRtLenelMtySYem5SNTT8E0SWsk\nzZB0UNp+jaQ9UtxWkm6QtFDSYkmHpDTeAF5poiwvVC+QdF2uZ+Z5Sd9IyycDOwJzqjcBzkrHXy7p\nb3Jl+7dUB8skfbrWMQv8AXgr7efv076XSZpbkO9QSbdJ+k362VuZtZK2zcU9LmmHoviC428EXquX\noO0/2W5Lb98LvGL7rQbl2kBWN0i6PPU8LZP0vVzMfgXXyX6S7sqV5YfpOjuJrOFziaQbJf0HMBBY\nLOnIqvO0q6TZkh6WNL9ST8CrjcoKfAq4xfabtn8H/Bb4cG79emBQg32EMupVffOhaVFv5RT1Vk5R\nb6VUxudgdeddBHcDvmB7oaQbgNOBqyorJe0MXA5MJPvwOjc1UtYBw22PT3GVD9ozyL7tnyVpAF1r\nDPYD2vILbO+VjjMOuBDY2/bLkgbnwnay/dHUGzALuB34E3CY7Y2StgcWpnUAY4DP2F4laRFwdNr+\n0HSMw4GvAffZPknSIOAhSffaXgAsSDlNBk61fUp1QSp5Vy07OW03ApgNTEu9Fd8n6y05qOCcPG97\nsqTTgPOBU4BvABtydTCo1jELcjgibTMUuBbYx/bTVeezYipwle0HJb0PuMf2OEl3Ap8Gpkv6MPA7\n2y9ImlEdD4yrOv6VldeSTs0W+drqA6f9/hvZAIHPNVGuc9J2Q8jq/QPp/ba5sKLrBAp67GzfIGkf\n4C7bt6d9/dH2pPT6olz4tWTXwZMp72uAA2zn/55qlXU46XpKfp+WVbTRzN/9vNzrUcSwihBCCCFs\n9lpbW5tq8HVnA+tp2wvT65uAL5FrYAF7AvNsvwSQPjzvC1wKjJY0FfglMEfSQGAX27MAbL/R2WRS\nQ2NCyqXIx4GZtl9Ox9iQW3dnWrZa0o6VXQLfUTYvpw3YJbdure1V6fWjwL3p9SNkH08BPgEcIukr\n6f0AYASwpnJQ24vJGjydKeeWwEzgTNvrJJ0B3G372ewUUH1nkzvS78VkjRqAA4G35+XYbqZHrdpH\ngPm2n0772FAQcyAwNtUNwEBJWwG3At8EpgNHAz9vEF/I9r/WWfcQ8LeSdgPukTTP9h+bKNcrwGuS\nrgfuBn6RW1d0nbwjkrYG/g6YmSt3/+q4emVt4EVgB0mDa9RRZv8u7j30nGgEl1PUWzlFvZVT1Fsp\n9aY5WC0tLe3yufjiiwvjurOBVf2tfdG8mw63MbS9QdIEsrkxXwSOBL5cFNtuR9LpZHNYDPyD7fW5\ndVsATwGvk30o7qzXC3I+FhgKTLTdpuwGEFsWxLfl3ud7C0TWy/XbLuRTzzXAbbYrfQ57A/uk87MN\n2dDNV21fWJXrW3T/c9AaPTxJwF62/1y1fIGyIaFDyebNfatevN7BM5psr5H0JPB/yBqZjeLfSr1I\nB5Bdm2em11B8nbxJ+97WDkNlG9gCeLnSs9UFvwfel3v/12kZALZfk3QL8JSkz9ruMJQzhBBCCCF0\nXXfOwRopqTKk7HPAr6vWPwTsK2mIshsMHAPMT8Pt+tm+A/g6MMn2RuAZSZ8CkDRA0nvzO7N9te2J\n6YYB66vWtdkeBSwi1zNT5X7gyDQEDEnb1YirfHAeRDa8rk3S/sDIgph67gHOensD6UNNbFNX6q0a\naPuKyjLbx9keZXtXsiGAP801rmqZS+7GB0XD+5TNP9u5zj4WAh+TNDLFF53POcDZuX1OyK27g6zH\nc1WuZ6VefNMkjUrXHCm/95PNTULSdKV5cjW23RoYbPs/gXOB8bVC0+//AsZJ6p/O4wE14vPbvM32\nq8BaSUfkcqh1zCKzgKPT38xosrI+lNvXYLK/ieHRuOpjYm5BOUW9lVPUWzlFvZVSGedgdWcD6zHg\nDEmryG4u8eO03ACpEXQB0AosBR62fRfZ/JBWSUuBG1MMwAlkN2RYDjwADOtCTo8DhbclT0P6vk3W\nyFsKVOby1OqJmwHsmfI5DlhdEFO0fcUlZL1JK5TdZv1b1QGSJkvqMH+ojvOA3ZXd5GKJpEbDC2vl\ndikwRNktw5cCLVV5iWye2Us1d2y/SDa88Y60j1sKws4G9lB2g42VwKm5dbeS9RLe0mR8B5JOrXEO\n9gGWS1qSjnNKbnjgeODZOrvdBvhFqvdfAeek5YXXie116RgrU1mWVMfUeV9xHHCSsptqrAQOrQ6o\nVdZ0Xd8KrCIbcnu62z+5cBDwnO1GN8sIIYQQQghdoL781Og032l72xc0DA41Sfog2Q1Mzu/pXLqT\npG2A621vNs+FSsMdp9ouuiNjJcZMefdyCiGEEDZ7U6AvfybvqyRhu8OIpL7ewBoD/ATYWPUsrBA2\nO5LOI+slvML2zXXi+u4/CiGEEEIvNGz4MNavW984MPQqm2UDK4TQeZIc/y6UT2tra6+601JoTtRb\nOUW9lVPUWzn15nqr1cDqzjlYIYQQQgghhLBZix6sEEI70YMVQgghhNBY9GCFEEIIIYQQwiYWDawQ\nQugDyvickBD1VlZRb+UU9VZOZay3aGCFEEIIIYQQQjeJOVghhHZiDlYIIYQQQmO15mD9VU8kE0Lo\n3aQO/1aEsFkaNmwk69f/rqfTCCGEUCIxRDCEUMDxU7qfeb0gh77389xz/8WmVMa5BSHqrayi3sqp\njPUWDawQQgghhBBC6Cbd0sCSNFLSIzXWzZM0qTuO01mSRkhaIml2btnansilFkn7SZrWRNza9Lvm\nua6K30bSM5J+kN+HpCGdyK3huUr1O6LO+hMl/bDZYzaZ14mVckm6SNIJDeKHSLpf0qv589Fgm2mS\n9m2w/vBO5r2PpJXpmnyPpCskPSLpu6kc5zbYvpmyHihpkaTlkh6WtH9u3XmSHpP02c7kHcqipacT\nCF3Q0tLS0ymELoh6K6eot3IqY711Zw+Wu3Ff3eUwYI7tg3PLemOezeTkGq9ruQSY34XjvJP4Tb2f\nrvoT8HXgvB7O41jgMtuTbL8OnAyMt/3VbjzGC8A/2Z4AfB64sbLC9pXAicAZ3Xi8EEIIIYSQ050N\nrP6SbpK0StKtkrasDpB0jKQV6efytGyL1BuwIn3rfnZaPkbSXEnL0jfyo7uQ02Dg+aplL+TyOSEd\nc6mk6WnZNElTJT0g6YlKL4WkrSXdm+sdODQtHylpddpujaQZkg5K26+RtEeK20rSDZIWSlos6ZCU\nxhvAK02U5YXqBZKuS7kvlfS8pG+k5ZOBHYE51ZsAZ6XjL5f0N7my/Vuqg2WSPl3rmAX+ALyV9vP3\nad/LJM0tyHeopNsk/Sb97K3MWknb5uIel7RDUXzB8TcCr9VL0Pb/2n4QeL2J8lRsIKsbJF2eep6W\nSfpeLma/gutkP0l35cryw3SdnQQcBVwi6UZJ/wEMBBZLOrLqPO0qaXbqgZpfqSfg1SbKutz2+vT6\nUWBLSf1zIeuBQZ04D6E0Wns6gdAFZZxbEKLeyirqrZzKWG/deRfB3YAv2F4o6QbgdOCqykpJOwOX\nAxPJPrzOTY2UdcBw2+NTXOWD9gyyb/tnSRpA1xqD/YC2/ALbe6XjjAMuBPa2/bKkwbmwnWx/VNJY\nYBZwO1kvyGG2N0raHliY1gGMAT5je5WkRcDRaftD0zEOB74G3Gf7JEmDgIck3Wt7AbAg5TQZONX2\nKdUFqeRdtezktN0IYDYwTZKA75P1lhxUcE6etz1Z0mnA+cApwDeADbk6GFTrmAU5HJG2GQpcC+xj\n++mq81kxFbjK9oOS3gfcY3ucpDuBTwPTJX0Y+J3tFyTNqI4HxlUd/8rKa0mnZot8baO8myjXOWmf\nQ8jq/QPp/ba5sKLrBAp67GzfIGkf4C7bt6d9/dH2pPT6olz4tWTXwZPpfFwDHGA7//fUsKySjgCW\n2P5zbnEbTf3dT8m9biGGn4UQQghhc9fa2tpUg687G1hP216YXt8EfIlcAwvYE5hn+yWA9OF5X+BS\nYLSkqcAvgTmSBgK72J4FYPuNziaTGhoTUi5FPg7MtP1yOsaG3Lo707LVknas7BL4jrJ5OW3ALrl1\na22vSq8fBe5Nrx8BRqXXnwAOkfSV9H4AMAJYUzmo7cVkDZ7OlHNLYCZwpu11ks4A7rb9bHYKqL7f\n9h3p92KyRg3AgcDb83JsN9OjVu0jwHzbT6d9bCiIORAYm+oGYKCkrYBbgW8C04GjgZ83iC9k+1+7\nkHcjrwCvSboeuBv4RW5d0XXyjkjaGvg7YGau3P2r4xqVVdIHge/QsZH9IrCDpME16iiZ0nzSoZdo\n6ekEQheUcW5BiHorq6i3cupN9dbS0tIun4svvrgwrjsbWNXf2hfNu+nwcB3bGyRNAD4JfBE4Evhy\nUWy7HUmnk81hMfAPlWFRad0WwFNkQ8Lu7kQZKvJDySp5HAsMBSbablN2A4gtC+Lbcu/zvQUi6+X6\nbRfyqeca4Dbb89L7vYF90vnZhmzo5qu2L6zK9S26/zlojR6eJGCvqh4VgAXKhoQOJZs396168XoX\nn9Fk+63Ui3QA2bV5ZnoNxdfJm7Tvbe0wVLaBLYCXKz1bXSHpr8l60463/bv8OtuvSboFeErSZ213\nGMoZQgghhBC6rjvnYI2UVBlS9jng11XrHwL2VXZHt37AMcD8NNyun+07yG5EMMn2RuAZSZ8CkDRA\n0nvzO7N9te2J6YYB66vWtdkeBSwi1zNT5X7gyDQEDEnb1YirfHAeRDa8rk3ZndlGFsTUcw9w1tsb\nSB9qYpu6Um/VQNtXVJbZPs72KNu7kg0B/GmucVXLXHI3Piga3qds/tnOdfaxEPiYpJEpvuh8zgHO\nzu1zQm7dHWQ9nqtyPSv14ruqXV1Jmq40T64wOOtRGmz7P4FzgfEN9vtfwDhJ/dN5PKBGfIdcAGy/\nCqxNw/sqOdQ6ZlG+g8h62b6a61HOrx9M9jcxPBpXfU1rTycQuqCMcwtC1FtZRb2VUxnrrTsbWI8B\nZ0haRXZziR+n5QZIjaALyD4FLAUetn0XMBxolbSU7I5nF6TtTiC7IcNy4AFgWBdyehwovC15GtL3\nbbJG3lKgMpenVk/cDGDPlM9xwOqCmKLtKy4h601aoew269+qDpA0WVJn5g+dB+yu7CYXSyQ1Gl5Y\nK7dLgSHKbhm+lKqxRmmo2hjgpZo7tl8kG954R9rHLQVhZwN7KLvBxkrg1Ny6W8l6CW9pMr4DSafW\nOgepx/FK4ERJT0v6QFo1Hni2zm63AX6R6v1XwDlpeeF1YntdKsvKVJYl1TF13lccB5yk7KYaK4FD\nC8pTq6xnktXVN3PXxdDc+kHAc7br3iwjhBBCCCF0jeyevoP2ppPmO21v+4KGwaGmNJ/nC7bP7+lc\nupOkbYDrbW82z4VKwx2n2i66I2Mlxj1/Z/0QegvRl/+fDCGE0HWSsN1hRFJfb2CNAX4CbKx6FlYI\nmx1J55H1El5h++Y6cdHACuFt0cAKIYRQrFYDqzuHCPY6tp+0/bFoXIWQ3dI+zVms2bj6C8VP/MQP\nYtiw/HTb7lfGuQUh6q2sot7KqYz11t13kQsh9AHxjX35tLa29qpb2YYQQgibqz49RDCE0HmSHP8u\nhBBCCCHUt1kOEQwhhBBCCCGEd1M0sEIIoQ8o4xj1EPVWVlFv5RT1Vk5lrLdoYIUQQgghhBBCN4k5\nWCGEdmIOVgghhBBCYzEHK4QQQgghhBA2sbhNewihA6nDlzEhhBBCqQwbPoz169a//T4eZ1FOZay3\naGCFEDqa0tMJhE5bC4zu6SRCp0W9lVPUWyk8N+W5nk4hbKZiDlYIoR1JjgZWCCGE0psC8Tk3bEqb\ndA6WpJGSHqmxbp6kSd1xnM6SNELSEkmzc8vW9kQutUjaT9K0JuLWpt81z3VV/DaSnpH0g/w+JA3p\nRG4Nz1Wq3xF11p8o6YfNHrPJvE6slEvSRZJOaBA/RNL9kl7Nn48G20yTtG+D9Yd3Mu99JK1M1+R7\nJF0h6RFJ303lOLfB9g3LmuL+RdJvJa2W9Inc8vMkPSbps53JO4QQQgghNK87b3LRG78iOAyYY/vg\n3LLemGczObnG61ouAeZ34TjvJH5T76er/gR8HTivh/M4FrjM9iTbrwMnA+Ntf7W7DiBpLHAUMBY4\nGLhaaUKV7SuBE4Ezuut4oRfpVV8dhaZFvZVT1FsplfF5SqGc9dadDaz+km6StErSrZK2rA6QdIyk\nFenn8rRsi9QbsELScklnp+VjJM2VtEzSIkldGe08GHi+atkLuXxOSMdcKml6WjZN0lRJD0h6otJL\nIWlrSfemXJZLOjQtH5l6CqZJWiNphqSD0vZrJO2R4raSdIOkhZIWSzokpfEG8EoTZXmheoGk61Lu\nSyU9L+kbaflkYEdgTvUmwFnp+Msl/U2ubP+W6mCZpE/XOmaBPwBvpf38fdr3MklzC/IdKuk2Sb9J\nP3srs1bStrm4xyXtUBRfcPyNwGv1ErT9v7YfBF5vojwVG8jqBkmXp56nZZK+l4vZr+A62U/SXbmy\n/DBdZyeRNXwukXSjpP8ABgKLJR1ZdZ52lTRb0sOS5lfqCXi1UVmBTwG32H7T9u+A3wIfzq1fDwzq\nxHkIIYQQQgid0J03udgN+ILthZJuAE4HrqqslLQzcDkwkezD69zUSFkHDLc9PsVVPmjPIPu2f5ak\nAXStMdgPaMsvsL1XOs444EJgb9svSxqcC9vJ9kdTb8As4HayXpDDbG+UtD2wMK0DGAN8xvYqSYuA\no9P2h6ZjHA58DbjP9kmSBgEPSbrX9gJgQcppMnCq7VOqC1LJu2rZyWm7EcBsYFrqrfg+WW/JQQXn\n5HnbkyWdBpwPnAJ8A9iQq4NBtY5ZkMMRaZuhwLXAPrafrjqfFVOBq2w/KOl9wD22x0m6E/g0MF3S\nh4Hf2X5B0ozqeGBc1fGvrLyWdGq2yNc2yruJcp2T9jmErN4/kN5vmwsruk6goMfO9g2S9gHusn17\n2tcfbU9Kry/KhV9Ldh08mc7HNcABtvN/T7XKOpx0PSW/T8sq2mjm735e7vUoYjJ3GUQdlVPUWzlF\nvZVS2e5EFzK9qd5aW1ub6lHrzgbW07YXptc3AV8i18AC9gTm2X4JIH143he4FBgtaSrwS2COpIHA\nLrZnAdh+o7PJpIbGhJRLkY8DM22/nI6xIbfuzrRstaQdK7sEvqNsXk4bsEtu3Vrbq9LrR4F70+tH\nyD6eAnwCOETSV9L7AcAIYE3loLYXkzV4OlPOLYGZwJm210k6A7jb9rPZKaB64t0d6fdiskYNwIHA\n2/NybDfTo1btI8B820+nfWwoiDkQGJvqBmCgpK2AW4FvAtOBo4GfN4gvZPtfu5B3I68Ar0m6Hrgb\n+EVuXdF18o5I2hr4O2Bmrtz9q+PeQVlfBHaQNLhGHWX27+LeQwghhBD6qJaWlnYNvosvvrgwblPO\nwSqad9PhLhvpQ94EoBX4InBdrdh2O5JOT0PjlkjaqWrdFmQjpMeSfSjurPxQskoexwJDgYm2J5IN\nPdyyIL4t9z7fWyCyXq6J6We07TW8c9cAt9mu9DnsDZwp6SmynqzjJV1WULa36P7b9Dd6eJKAvXLn\nYEQavrcAGJN6wQ4D/r1efDfnXJftt8iG2N0G/BPwn7nVRdfJm7T/u+owVLaBLYCX0zytSrn/thPb\n/x54X+79X6dlANh+DbgFeEpSUQ9nKKuYE1JOUW/lFPVWSmWcyxPKWW/d2cAaKakypOxzwK+r1j8E\n7Kvsjm79gGOA+Wm4XT/bd5DdiGCS7Y3AM5I+BSBpgKT35ndm++r04XOS7fVV69psjwIWkeuZqXI/\ncGQaAoak7WrEVT44DyIbXtcmaX9gZEFMPfcAZ729gfShJrapK/VWDbR9RWWZ7eNsj7K9K9kQwJ/a\nvrDBruaSu/FB0fA+ZfPPdq6zj4XAxySNTPFF53MOcHZunxNy6+4g6/FcletZqRffVe3qStJ0pXly\nhcFZj9Jg2/8JnAuMb7Df/wLGSeqfzuMBzeYCYPtVYK2kI3I51DpmkVnA0elvZjTwfrK/vcq+BpP9\nTQy33WGeXAghhBBCeGe6s4H1GHCGpFVkN5f4cVpugNQIuoCsp2op8LDtu8jmh7RKWgrcmGIATiC7\nIcNy4AFgWBdyehwovC15GtL3bbJG3lKgMpenVk/cDGDPlM9xwOqCmKLtKy4huxHICmW3Wf9WdYCk\nyZI6M3/oPGD3XE9eo+GFtXK7FBii7JbhS4GWqrxENs/spZo7tl8kG954R9rHLQVhZwN7KLvBxkrg\n1Ny6W8l6CW9pMr4DSafWOgfKbjl/JXCipKclfSCtGg88W2e32wC/SPX+K+CctLzwOrG9LpVlZSrL\nkuqYOu8rjgNOUnZTjZXAoQXlKSxruq5vBVaRDbk93e0fAjIIeC71ZIW+JOaElFPUWzlFvZVSb5rL\nE5pXxnrr0w8aTvOdtrd9QcPgUJOkD5LdwOT8ns6lO0naBrje9mbzXKh004yptovuyFiJiQcNhxBC\nKL8p8aDhsGmpxoOG+3oDawzwE2Bj1bOwQtjsSDqPrJfwCts314nru/8ohBBC2GwMGz6M9ev+Mouk\ntbW1lL0hm7veXG+1GljdfZODXsX2k8DHejqPEHqDdEv7KxsGEt/4lVFv/g8o1Bb1Vk5RbyGEevp0\nD1YIofMkOf5dCCGEEEKor1YPVnfe5CKEEEIIIYQQNmvRwAohhD6gjM8JCVFvZRX1Vk5Rb+VUxnqL\nBlYIIYQQQgghdJOYgxVCaCfmYIUQQgghNBZzsEIIIYQQQghhE4sGVgihA0nxU9KfnXYa1dOXT+iE\nMs4tCFFvZRX1Vk5lrLc+/RysEEJXxRDB8mkFWnjuuQ4jFUIIIYTwLoo5WCGEdiQ5GlhlpnhQdAgh\nhPAu6FVzsCSNlPRIjXXzJE16t3NKxx4haYmk2blla3sil1ok7SdpWhNxa9Pvmh8kNx0AACAASURB\nVOe6Kn4bSc9I+kFu2TxJIxpsN03Svg3yvavR8Tsjv09JJ0q6qIltvivpEUkrJB3VRPxFkk5osP7c\nTua9m6SlkhZLGi3pLEmrJN2YyvHDBts3LKukCZIeTGVdli+rpGMkPSbpnM7kHUIIIYQQmteTc7B6\n41eshwFzbB+cW9Yb82wmJ9d4XcslwPyupdOpXDbFPuvuX9I/AB8CxgMfAc6XNHAT5NTIYcBM25Nt\nrwVOAw60fXxa39l6LfI/wPG2dwcOBv6fpG0BbN8M7AdEA6tPau3pBEIXlHFuQYh6K6uot3IqY731\nZAOrv6Sb0jf4t0rasjogfeO+Iv1cnpZtkXpNVkhaLunstHyMpLnpW/tFkkZ3IafBwPNVy17I5XNC\nOuZSSdPTsmmSpkp6QNITkg5Py7eWdG/KZbmkQ9PykZJWp+3WSJoh6aC0/RpJe6S4rSTdIGlh6vE4\nJKXxBvBKE2V5oXqBpOtS7kslPS/pG2n5ZGBHYE7VJn8A3mpwnA0pJyTtmcqxLOW9ddXxC8skaYGk\nsbm4eZIm1TkHea8BGxvkOA74lTP/C6wA/r7BNq+mfZN6mh5N5fpZLuaDKdcnJH0pxbbrMZR0Xurt\nOhj4MnCapPskXQPsCsyuXMO5bYZKuk3Sb9LP3s2W1fYTtp9Mr/+b7HreIbf+OWBQg7KHEEIIIYQu\n6smbXOwGfMH2Qkk3AKcDV1VWStoZuByYSPYhfm5qpKwDhtsen+K2TZvMAC6zPUvSALrWeOwHtOUX\n2N4rHWcccCGwt+2XJQ3Ohe1k+6OpkTALuB34E3CY7Y2StgcWpnUAY4DP2F4laRFwdNr+0HSMw4Gv\nAffZPknSIOAhSffaXgAsSDlNBk61fUp1QSp5Vy07OW03ApgNTJMk4PvAscBBVfFHNDphts9J++wP\n3AIcaXtJ6iF6rSq8sExpu88CUyTtlM7nEknfrhGfP/6tldepATbZ9pSq4y4HvinpKmBrYH/g0Qbl\nuir39qvAKNt/zl1vkF3DLWQNljWSrq5s3nF3ni3px8CrlX1L+iTQkq6nE3PxU4GrbD8o6X3APcC4\nJstKLubDQP9Kgyunib+N/G5b0k/o3Vp6OoHQBS0tLT2dQuiCqLdyinorp95Ub62trU31qPVkA+tp\n2wvT65uAL5FrYAF7AvNsvwQgaQawL3ApMFrSVOCXwJz0YX4X27MAbL/R2WRSQ2NCyqXIx8mGd72c\njrEht+7OtGy1pB0ruwS+o2x+UhuwS27dWtur0utHgUqj4RFgVHr9CeAQSV9J7wcAI4A1lYPaXgx0\naFw1KOeWwEzgTNvrJJ0B3G372ewU0NVbkO0GPGt7ScptYzpePqZWmWaS9Z5NAY4CbmsQX8j2XUCH\n+V6250raE3iQrEfnQRr3zOUtB34m6U5SXSd3234T+IOk54BhndgnZOe66HwfCIzVX07eQElbpd43\noHZZ395x9gXFT4HjC1a/JGlMQcMrZ0rD5EMIIYQQNictLS3tGnwXX3xxYVxvmoNVNLekw4fP1LCZ\nQDbh4IvAdbVi2+1IOj0NjVuSekny67YA1gJjgbubyr691wtyPhYYCky0PZHsg/2WBfFtufdt/KXR\nK7JeronpZ7TtNbxz1wC32Z6X3u8NnCnpKbKerOMlXdbFfTdqnBWWyfazwIuSdifryfp5bptuOQe2\nL0v7+CTZdf94Jzb/R+BHwCTg4XS9QMd6/CvgTbKe0IoOQ1+bIGCvXLlH5BtXDTeWtgF+AfyL7YcL\nQqYCyyR9vgu5hV6rtacTCF1QxrkFIeqtrKLeyqmM9daTDayRkirD2D4H/Lpq/UPAvpKGSOoHHAPM\nT8Pt+tm+A/g6MCn1ljwj6VMAkgZIem9+Z7avTh9WJ9leX7WuzfYoYBHZB/wi9wNHShqSjrFdjbhK\nI2MQ8LztNkn7AyMLYuq5Bzjr7Q2kDzWxTV2pt2qg7Ssqy2wfZ3uU7V2B84Gf2r6wYNvpSvPDalgD\n7JSGLSJpYKq3vHpl+jnwz8C2tlc2Ed80ZfP2KvU2HtidNN9M0mWV66bGtgJG2J4PXABsC9S7QcZz\nwA6StpP0HuCfupDyHODteVmSJjS7YRqqeScwPf2NFLkQeL/tn3QhtxBCCCGEUEdPNrAeA86QtIrs\n5hI/TssNkBpBF5B9LbsUeDgNixoOtEpaCtyYYgBOAM6StBx4gM4P14KsV2NI0Yo0pO/bZI28pcCV\n+Xzzoen3DGDPlM9xwOqCmKLtKy4huxHIinTThG9VB0iaLOnaOuWpdh6we64nrzPDC8cDz9ZaafvP\nZI3TH0laRtZIeE9VWL0y/Tsde68urRPfgaRDJE0pWNUf+LWklWTX2XG2K3PtdgfWF2xT0Q+4KdXj\nYmCq7T8WxFWu2zdTng+TNRBXF8S226bA2cAeym6OshI4tTqgTlmPAvYBPp+r5/FVMQPSzS5Cn9LS\n0wmELuhNcwtC86LeyinqrZzKWG/xoOGcNNdne9sXNAzejKQhZ9fbrtW7V1qSZlfdlr9PS/MAl9ve\nuU5MPGi41OJBwyGEEMK7Qb3pQcO92O3AR5V70HAA26/2xcYVwGbWuDqGrGfxe01Ex09Jf4YNy49G\nDr1dGecWhKi3sop6K6cy1ltP3kWw10l3VftYT+cRwqaQHjR8c5Oxmzib0N1aW1tLOYwihBBC6Gti\niGAIoR1Jjn8XQgghhBDqiyGCIYQQQgghhLCJRQMrhBD6gDKOUQ9Rb2UV9VZOUW/lVMZ6iwZWCCGE\nEEIIIXSTmIMVQmgn5mCFEEIIITQWc7BCCCGEEEIIYROL27SHEDqQOnwZE0IouWHDh7F+3fqeTqNP\niMcilFPUWzmVsd6igRVC6GhKTycQOm0tMLqnkwid9i7W23NTnnt3DhRCCJu5mIMVQmhHkqOBFUIf\nNCUeIh5CCN2pV83BkjRS0iM11s2TNOndzikde4SkJZJm55at7YlcapG0n6RpTcStTb9rnuuq+G0k\nPSPpB7ll8ySNaLDdNEn7Nsj3rkbH74z8PiWdKOmiJrb5rqRHJK2QdFQT8RdJOqHB+nM7mfdukpZK\nWixptKSzJK2SdGMqxw8bbN9sWU+U9LikNfkySDpG0mOSzulM3iGEEEIIoXk9eZOL3vg12mHAHNsH\n55b1xjybyck1XtdyCTC/a+l0KpdNsc+6+5f0D8CHgPHAR4DzJQ3cBDk1chgw0/Zk22uB04ADbR+f\n1ne2XjuQtB3wTWBPYC/gIkmDAGzfDOwHRAOrL+pVXwWFpkW9lVIZn8sTot7Kqoz11pMNrP6Sbkrf\n4N8qacvqgPSN+4r0c3latkXqNVkhabmks9PyMZLmSlomaZGkroxqHww8X7XshVw+J6RjLpU0PS2b\nJmmqpAckPSHp8LR8a0n3plyWSzo0LR8paXXabo2kGZIOStuvkbRHittK0g2SFqYej0NSGm8ArzRR\nlheqF0i6LuW+VNLzkr6Rlk8GdgTmVG3yB+CtBsfZkHJC0p6pHMtS3ltXHb+wTJIWSBqbi5snaVKd\nc5D3GrCxQY7jgF8587/ACuDvG2zzato3qafp0VSun+ViPphyfULSl1Jsux5DSeel3q6DgS8Dp0m6\nT9I1wK7A7Mo1nNtmqKTbJP0m/ezdibJ+kuxLgldsbyCr07fLavs5YFCDfYQQQgghhC7qyZtc7AZ8\nwfZCSTcApwNXVVZK2hm4HJhI9iF+bmqkrAOG2x6f4rZNm8wALrM9S9IAutZ47Ae05RfY3isdZxxw\nIbC37ZclDc6F7WT7o6mRMAu4HfgTcJjtjZK2BxamdQBjgM/YXiVpEXB02v7QdIzDga8B99k+KfVA\nPCTpXtsLgAUpp8nAqbZPqS5IJe+qZSen7UYAs4FpkgR8HzgWOKgq/ohGJ8z2OWmf/YFbgCNtL0k9\nRK9VhReWKW33WWCKpJ3S+Vwi6ds14vPHv7XyOjXAJtueUnXc5cA3JV0FbA3sDzzaoFxX5d5+FRhl\n+8+56w2ya7iFrMGyRtLVlc077s6zJf0YeLWyb0mfBFrS9XRiLn4qcJXtByW9D7gHGNdkWYcDz+Te\n/z4ty2v8tzEv93oUcfOEMog6Kqeot1Iq2x3NQibqrZx6U721trY21aPWkw2sp20vTK9vAr5EroFF\nNsRpnu2XACTNAPYFLgVGS5oK/BKYkz7M72J7FoDtNzqbTGpoTEi5FPk42fCul9MxNuTW3ZmWrZa0\nY2WXwHeUzU9qA3bJrVtre1V6/ShQaTQ8QvZxFuATwCGSvpLeDwBGAGsqB7W9GOjQuGpQzi2BmcCZ\nttdJOgO42/az2Smgq/fn3g141vaSlNvGdLx8TK0yzSTraZkCHAXc1iC+kO27gA7zvWzPlbQn8CBZ\nD+WDNO6Zy1sO/EzSnaS6Tu62/SbwB0nPAcM6sU/IznXR+T4QGKu/nLyBkrZKvW9A7bI26SVJY2w/\nWTNi/y7uOYQQQgihj2ppaWnX4Lv44osL43rTHKyiuSUdPnymhs0EoBX4InBdrdh2O5JOT0PjlqRe\nkvy6LchGwo8F7m4q+/ZeL8j5WGAoMNH2RLIP9lsWxLfl3rfxl0avyHq5Jqaf0bbX8M5dA9xmu9JH\nsTdwpqSnyHqyjpd0WRf33ahxVlgm288CL0ranawn6+e5bbrlHNi+LO3jk2TX/eOd2PwfgR8Bk4CH\n0/UCHevxr4A3yXpCKzoMfW2CgL1y5R6Rb1w18HvaN0L/Oi3Lmwosk/T5LuQWequYy1NOUW+lVMY5\nISHqrazKWG892cAaKakyjO1zwK+r1j8E7CtpiKR+wDHA/DTcrp/tO4CvA5NSb8kzkj4FIGmApPfm\nd2b76vRhdZLt9VXr2myPAhaRfcAvcj9wpKQh6Rjb1YirNDIGAc/bbpO0PzCyIKaee4Cz3t5A+lAT\n29SVeqsG2r6issz2cbZH2d4VOB/4qe0LC7adrjQ/rIY1wE5p2CKSBqZ6y6tXpp8D/wxsa3tlE/FN\nUzZvr1Jv44HdSfPNJF1WuW5qbCtghO35wAXAtkC9G2Q8B+wgaTtJ7wH+qQspzwHenpclaUIntr0H\nOEjSoHSNHpSW5V0IvN/2T7qQWwghhBBCqKMnG1iPAWdIWkV2c4kfp+UGSI2gC8h6qpYCD6dhUcOB\nVklLgRtTDMAJwFmSlgMP0PnhWpD1agwpWpGG9H2brJG3FLgyn28+NP2eAeyZ8jkOWF0QU7R9xSVk\nNwJZkW6a8K3qAEmTJV1bpzzVzgN2z/XkdWZ44Xjg2Vorbf+ZrHH6I0nLyBoJ76kKq1emf6dj79Wl\ndeI7kHSIpCkFq/oDv5a0kuw6O852Za7d7sD6gm0q+gE3pXpcDEy1/ceCuMp1+2bK82Gyhs3qgth2\n2xQ4G9hD2c1RVgKnVgfUKmsawnoJ2ZcFvwEurhrOCjAg3ewi9CUxl6ecot5KqTfNCQnNi3orpzLW\nWzxoOCfN9dne9gUNgzcjkrYBrrddq3evtCTNrrotf5+W5gEut71znZh40HAIfdGUeNBwCCF0J9V4\n0HA0sHIkjQF+AmzcnD50h82DpGPI7og43fb/rRMX/yiE0AcNGz6M9evqddiHZrW2tpbyW/XNXdRb\nOfXmeqvVwOrJuwj2Oumuah/r6TxC2BTSg4ZvbjJ2E2cTultv/g8o1Bb1FkIIfU/0YIUQ2pHk+Hch\nhBBCCKG+Wj1YPXmTixBCCCGEEELoU6KBFUIIfUAZnxMSot7KKuqtnKLeyqmM9RYNrBBCCCGEEELo\nJjEHK4TQTszBCiGEEEJoLOZghRBCCCGEEMImFrdpDyF0IHX4MiaEsAlst90wXnopnk1VNnF7/XKK\neiunMtZbNLBCCAViiGD5tAItPZxD6KyXX44vM0IIoa+JOVghhHYkORpYIbxbFA/2DiGEkupVc7Ak\njZT0SI118yRNerdzSsceIWmJpNm5ZWt7IpdaJO0naVoTcWvT75rnuip+G0nPSPpBbtk8SSMabDdN\n0r4N8r2r0fE7I79PSSdKuqiJbd5KdbtU0p1NxF8k6YQG68/tZN67peMvljRa0lmSVkm6MZXjhw22\nb1hWSRMkPSjpEUnLJB2VW3eMpMckndOZvEMIIYQQQvN68iYXvfEru8OAObYPzi3rjXk2k5NrvK7l\nEmB+19LpVC6bYp/N7P9/bE+yPdH2YZsgn2YcBsy0Pdn2WuA04EDbx6f1na3XIv8DHG97d+Bg4P9J\n2hbA9s3AfkA0sPqk1p5OIITNRhmfyxOi3sqqjPXWkw2s/pJuSt/g3yppy+qA9I37ivRzeVq2Reo1\nWSFpuaSz0/Ixkuamb+0XSRrdhZwGA89XLXshl88J6ZhLJU1Py6ZJmirpAUlPSDo8Ld9a0r0pl+WS\nDk3LR0panbZbI2mGpIPS9msk7ZHitpJ0g6SFqcfjkJTGG8ArTZTlheoFkq5LuS+V9Lykb6Tlk4Ed\ngTlVm/wBeKvBcTaknJC0ZyrHspT31lXHLyyTpAWSxubi5kmaVOcc5L0GbGyQI0BnJzq8mvZN6ml6\nNJXrZ7mYD6Zcn5D0pRTbrsdQ0nmpt+tg4MvAaZLuk3QNsCswu3IN57YZKuk2Sb9JP3s3W1bbT9h+\nMr3+b7LreYfc+ueAQZ08FyGEEEIIoUk9eZOL3YAv2F4o6QbgdOCqykpJOwOXAxPJPsTPTY2UdcBw\n2+NT3LZpkxnAZbZnSRpA1xqP/YC2/ALbe6XjjAMuBPa2/bKkwbmwnWx/NDUSZgG3A38CDrO9UdL2\nwMK0DmAM8BnbqyQtAo5O2x+ajnE48DXgPtsnSRoEPCTpXtsLgAUpp8nAqbZPqS5IJe+qZSen7UYA\ns4FpkgR8HzgWOKgq/ohGJ8z2OWmf/YFbgCNtL5E0kNRAySksU9rus8AUSTul87lE0rdrxOePf2vl\ndWqATbY9pSDV96Rz/QbwXdv/0aBcV+XefhUYZfvPuesNsmu4hazBskbS1ZXNO+7OsyX9GHi1sm9J\nnwRa0vV0Yi5+KnCV7QclvQ+4BxjXibJWYj4M9K80uHKa+NvI77aFuHlCGbT0dAIhbDbKdkezkIl6\nK6feVG+tra1N9aj1ZAPradsL0+ubgC+Ra2ABewLzbL8EIGkGsC9wKTBa0lTgl8Cc9GF+F9uzAGy/\n0dlkUkNjQsqlyMfJhne9nI6xIbfuzrRstaQdK7sEvqNsflIbsEtu3Vrbq9LrR4FKo+ERYFR6/Qng\nEElfSe8HACOANZWD2l4MdGhcNSjnlsBM4Ezb6ySdAdxt+9nsFHS6p6diN+BZ20tSbhvT8fIxtco0\nk6z3bApwFHBbg/hCtu8Cas33Gmn7v1PP5v2SVqRhes1YDvxM2dyt/Pytu22/CfxB0nPAsCb3VyGK\nz/eBwFj95eQNlLSV7f+tBDQoa+ULip8CxxesfknSmIKGV86UhsmHEEIIIWxOWlpa2jX4Lr744sK4\n3jQHq2huSYcPn6lhM4FswsEXgetqxbbbkXR6Ghq3JPWS5NdtAawFxgJ3N5V9e68X5HwsMBSYaHsi\n2VCtLQvi23Lv2/hLo1dkvVwT089o22t4564BbrM9L73fGzhT0lNkPVnHS7qsi/tu1DgrLJPtZ4EX\nJe1O1pP189w23XIO0nA5UqOqlaxntFn/CPwImAQ8nK4X6FiPfwW8SdYTWtFh6GsTBOyVK/eIfOOq\n4cbSNsAvgH+x/XBByFRgmaTPdyG30Gu19nQCIWw2yjgnJES9lVUZ660nG1gjJVWGsX0O+HXV+oeA\nfSUNkdQPOAaYn4bb9bN9B/B1YFLqLXlG0qcAJA2Q9N78zmxfnT6sTrK9vmpdm+1RwCKyD/hF7geO\nlDQkHWO7GnGVRsYg4HnbbZL2B0YWxNRzD3DW2xtIH2pim7pSb9VA21dUltk+zvYo27sC5wM/tX1h\nwbbTleaH1bAG2CkNW0TSwFRvefXK9HPgn4Ftba9sIr5pkganYaNIGgp8FFiV3l9WuW5qbCtghO35\nwAXAtsDAOod7DthB0naS3gP8UxdSngO8PS9L0oRmN0xDNe8Epqe/kSIXAu+3/ZMu5BZCCCGEEOro\nyQbWY8AZklaR3Vzix2m5AVIj6AKyr2WXAg+nYVHDgVZJS4EbUwzACcBZkpYDD9D54VoAjwNDilak\nIX3fJmvkLQWuzOebD02/ZwB7pnyOA1YXxBRtX3EJ2Y1AVqSbJnyrOkDSZEnX1ilPtfOA3XM9eZ0Z\nXjgeeLbWStt/Jmuc/kjSMrJGwnuqwuqV6d/p2Ht1aZ34DiQdImlKwaqxwKJUb/eRzdV7LK3bHVhf\nsE1FP+CmVI+Lgam2/1gQV7lu30x5PkzWQFxdENtumwJnA3souznKSuDU6oA6ZT0K2Af4fK6ex1fF\nDEg3uwh9SktPJxDCZqM3zQkJzYt6K6cy1ls8aDgnzfXZ3vYFDYM3I2nI2fW2a/XulZak2VW35e/T\n0jzA5bZ3rhMTDxoO4V0TDxoOIYSyUm960HAvdjvwUeUeNBzA9qt9sXEFsJk1ro4h61n8Xk/nEjaF\n1p5OIITNRhnnhISot7IqY7315F0Ee510V7WP9XQeIWwK6UHDNzcX3dWbSYYQOmO77boymj2EEEJv\nFkMEQwjtSHL8uxBCCCGEUF8MEQwhhBBCCCGETSwaWCGE0AeUcYx6iHorq6i3cop6K6cy1ls0sEII\nIYQQQgihm8QcrBBCOzEHK4QQQgihsZiDFUIIIYQQQgibWDSwQgihDyjjGPUQ9VZWUW/lFPVWTmWs\nt3gOVgihAymegxVCCGHzNWz4MNavW9/TaYSSijlYIYR2JJkpPZ1FCCGE0IOmQHxGDo3EHKwQQggh\nhBBC2MR6pIElaaSkR2qsmydp0rudUzr2CElLJM3OLVvbE7nUImk/SdOaiFubftc811Xx20h6RtIP\ncsvmSRrRYLtpkvZtkO9djY7fGfl9SjpR0kVNbPNWqtulku5sIv4iSSc0WH9uJ/PeLR1/saTRks6S\ntErSjakcP2ywfbNlPVHS45LW5Msg6RhJj0k6pzN5h5LoVf9ShaZFvZVT1Fs5Rb2VUszB6pze2O96\nGDDH9gW5Zb0xz2Zyco3XtVwCzO9aOp3KZVPss5n9/4/tHmm45xwGzLR9GYCk04ADbD8r6UQ6X68d\nSNoO+CYwCRCwWNJ/2H7F9s2S7gceBv7vOylICCGEEEIo1pNDBPtLuil9g3+rpC2rA9I37ivSz+Vp\n2Rap12SFpOWSzk7Lx0iaK2mZpEWSRnchp8HA81XLXsjlc0I65lJJ09OyaZKmSnpA0hOSDk/Lt5Z0\nb8pluaRD0/KRklan7dZImiHpoLT9Gkl7pLitJN0gaWHq8TgkpfEG8EoTZXmheoGk61LuSyU9L+kb\naflkYEdgTtUmfwDeanCcDSknJO2ZyrEs5b111fELyyRpgaSxubh5kibVOQd5rwEbG+QIWWOjM15N\n+yb1ND2ayvWzXMwHU65PSPr/2Tv3cC2rMv9/vpCIipKHBGUGNKbL0UlN0J85Org1D6l5GJOMNK3x\nMhtNOziao05hmodKr/E3jlrpoCNoanlAiUSMTaYiChvQQLKiPA1ooxSa5/39/fGsF5797vf82/ju\nB+7Pde1rP+9a91rrXut+2Dz3c99rvacn2R4RQ0lnpmjXIcBXgH+W9ICka4APAtNL93CuzVaSfizp\n0fSzVxNzPZjsJcGfbK8ks+nHS5W2VwBDm1yLoAi08hcvaD9ht2ISdismYbdC0tHR0W4VmqadEawd\ngM/bniPpeuBU4IpSpaRtgEuB3cge4u9PTspzwAjbuyS5zVKTKcDFtqdKGkRrzuNAoDtfYHvPNM5O\nwLnAXrZfkfT+nNhw23snJ2EqcAfwBnCU7VclbQnMSXUAo4FP2l4s6XHg06n9EWmMo4HzgAdsnyRp\nKDBX0kzbjwCPJJ3GAqfY/kL5REp6l5WdnNqNBKYDkyQJ+B5wHHBgmfwx9RbM9ldTnxsAPwLG254v\naQjJQclRcU6p3bHAREnD03rOl/TtKvL58W8rXScHbKztiRVU3TCt9VvAZbbvrjOvK3Ifvw5sZ/vt\n3P0G2T3cQeawLJV0dal57+48XdK1wKpS35IOBjrS/XRiTv5K4ArbD0v6a+A+YKcG5zoCeDb3+flU\nlqf+v41ZuevtiP+UgiAIgiBY7+ns7GwoZbGdDtYztuek68nA6eQcLGAPYJbtlwEkTQHGARcB20u6\nEvgpMCM9zG9reyqA7beaVSY5GrsmXSqxP1l61ytpjJW5urtS2RJJW5e6BC5Rtj+pG9g2V7fM9uJ0\n/Sug5DQ8QfY4C3AQcLiks9LnQcBIYGlpUNvzgF7OVZ15DgZuB75k+zlJpwHTUppaSe9W2AF4wfb8\npNuraby8TLU53U4WaZkIfAr4cR35iti+B6i232uU7f9Jkc2fS1pku9Fs7IXAzcr2buX3b02z/Q7w\nv5JWAMMa7K+EqLzeBwA7as3iDZG0se2/lATqzLUeL0sabfu3VSX2a7HnoH0sIxzhIhJ2KyZht2IS\ndisknZ2d/SaK1dHR0UOXCy64oKJcf9qDVWlvSa+HT9srJe1Klgr1RWA8WepVTcdA0qnAyWmcQ20v\nz9UNAH4HvAlMa2IOJd6soPNxwFbAbra7lR06MbiCfHfuczdrbCKyKNfTLehTi2uAH9suxSj2AvZJ\n67MpWermKtvnttB3Pees6pwk/VHSzmSRrFNyVb3kU5SrKWz/T/q9TFInWWS0UQfrMDLn/gjgPEkf\nTuXldnwf8A5ZJLREr9TXBhCwp+23W2j7PFlUrcRf0TMeBVmEbIGk023f0MIYQRAEQRAEQRXauQdr\nlKRSGttngAfL6ucC4yRtIWkgMAGYndLtBtq+EzgfGJOiJc9KOhJA0iBJG+U7s3217d1sj8k7V6mu\n2/Z2wONkD/iV+DkwXtIWaYzNq8iVnIyhwIvJudoPGFVBphb3AWesbiB9pIE2NUnRqiG2v1sqs328\n7e1sfxD4F+C/KzlXkm5U2h9WhaXA8JS2iKQhyW55as3pVuBsYDPbTzYg70Ac1gAAIABJREFU3zCS\n3p/SRpG0FbA3sDh9vrh031RpK2Ck7dnAOcBmwJAaw60APiBpc0kbAp9oQeUZwOp9WemFQqPcBxwo\naWi6Rw9MZXnOBf4mnKt1jHgrW0zCbsUk7FZMwm6FpL9Er5qhnQ7WU8BpkhaTHS5xbSo3QHKCzgE6\ngS7gsZQWNQLolNQF3JRkAE4AzpC0EHiI5tO1AH4NbFGpIqX0fZvMyesCLs/rmxdNv6cAeyR9jgeW\nVJCp1L7EhWTRpEXp0IRvlQtIGivpBzXmU86ZwM7KDrmYL6mZ9MJdgBeqVaZoy7HAVZIWkDkJG5aJ\n1ZrTT1L7W3NlF9WQ74WkwyVNrFC1I/B4stsDZHv1nkp1OwO1vqp9IDA52XEecKXtP1eQK9237yQ9\nHyNzbJZUkO3RpgJfBnZXdjjKk/SM6AHV55pSWC8ke1nwKHBBWTorwKB02EUQBEEQBEHQxyi+pXoN\naa/PlmXHtK/3SNoUuM52teheYZE03fYh7dbjvSLtA1xoe5saMmbie6dT0EfE3oJiEnYrJmG3YtKM\n3SZCPCP3D/rTHqxyJGG7V2ZaO/dg9UfuAG5Y3x6662F7FdVTJwvN+mRnSRPITkT8Tl3hiWtbmyAI\ngiDovwwb0UoiVBBkRAQrCIIeSHL8XQiCIAiCIKhNtQhWO/dgBUEQBEEQBEEQrFOEgxUEQbAO0MgX\nHwb9j7BbMQm7FZOwWzEpot3CwQqCIAiCIAiCIOgjYg9WEAQ9iD1YQRAEQRAE9Yk9WEEQBEEQBEEQ\nBGuZcLCCIAjWAYqYox6E3YpK2K2YhN2KSRHtFt+DFQRBL6Re0e4gCNrIsGGjWL789+1WIwiCIGiA\n2IMVBEEPJBni70IQ9C9E/H8dBEHQv4g9WEEQBEEQBEEQBGuZtjhYkkZJeqJK3SxJY95rndLYIyXN\nlzQ9V7asHbpUQ9K+kiY1ILcs/a661mXym0p6VtL/zZXNkjSyTrtJksbV0feeeuM3Q75PSSdK+mYD\nbaZLekXS1AbH+KakE+rUf61xrUHSDpK6JM2TtL2kMyQtlnRTmsd/1Glfd66SdpX0sKQnJC2Q9Klc\n3QRJT0n6ajN6B0Whs90KBC3R2W4FghYo4p6QIOxWVIpot3ZGsPpjrsNRwAzbh+TK+qOejejkKtfV\nuBCY3Zo6TemyNvpspP/vAMevBT2a4SjgdttjbS8D/hk4wPZnU32zdq3Ea8Bnbe8MHAL8u6TNAGzf\nAuwLhIMVBEEQBEGwlming7WBpMnpDf5tkgaXC6Q37ovSz6WpbECKmiyStFDSl1P5aEn3p7f2j0va\nvgWd3g+8WFb2Uk6fE9KYXZJuTGWTJF0p6SFJv5F0dCrfRNLMpMtCSUek8lGSlqR2SyVNkXRgar9U\n0u5JbmNJ10uakyIehyc13gL+1MBcXiovkPTDpHuXpBcl/VsqHwtsDcwoa/K/wLt1xlmZdELSHmke\nC5Lem5SNX3FOkh6RtGNObpakMTXWIM/rwKt1dMT2rEbkcqxKfZMiTb9K87o5J/N3SdffSDo9yfaI\nGEo6M0W7DgG+AvyzpAckXQN8EJheuodzbbaS9GNJj6afvRqdq+3f2P5tuv4fsvv5A7n6FcDQJtYh\nKAwd7VYgaImOdisQtEBHR0e7VQhaIOxWTIpot3aeIrgD8HnbcyRdD5wKXFGqlLQNcCmwG9lD/P3J\nSXkOGGF7lyS3WWoyBbjY9lRJg2jNeRwIdOcLbO+ZxtkJOBfYy/Yrkt6fExtue+/kJEwF7gDeAI6y\n/aqkLYE5qQ5gNPBJ24slPQ58OrU/Io1xNHAe8IDtkyQNBeZKmmn7EeCRpNNY4BTbXyifSEnvsrKT\nU7uRwHRgkiQB3wOOAw4skz+m3oLZ/mrqcwPgR8B42/MlDSE5KDkqzim1OxaYKGl4Ws/5kr5dRT4/\n/m2l6+SAjbU9sZ7eDczritzHrwPb2X47d79Bdg93kDksSyVdXWreuztPl3QtsKrUt6SDgY50P52Y\nk78SuML2w5L+GrgP2KnZuUr6P8AGJYcrRwP/NvLddhAPgUEQBEEQrO90dnY2lLLYTgfrGdtz0vVk\n4HRyDhawBzDL9ssAkqYA44CLgO0lXQn8FJiRHua3tT0VwPZbzSqTHI1dky6V2J8sveuVNMbKXN1d\nqWyJpK1LXQKXKNuf1A1sm6tbZntxuv4VUHIangC2S9cHAYdLOit9HgSMBJaWBrU9D+jlXNWZ52Dg\nduBLtp+TdBowzfYL2RLQ6vncOwAv2J6fdHs1jZeXqTan28miZxOBTwE/riNfEdv3AH263yuxELhZ\n0l0kWyem2X4H+F9JK4BhTfYrKq/3AcCOWrN4QyRtbPsvJYF6c00vKP4b+GyF6pclja7geOWYWFf5\noL/RSTjCRaSTsFvx6OzsLORb9fWdsFsx6U926+jo6KHLBRdcUFGunQ5Wr7f8FWR6PXzaXilpV+Bg\n4IvAeLLUq5qOgaRTgZPTOIfaXp6rGwD8DngTmNbEHEq8WUHn44CtgN1sdys7dGJwBfnu3Odu1thE\nZFGup1vQpxbXAD9OKXMAewH7pPXZlCx1c5Xtc1vou55zVnVOkv4oaWeySNYpuape8inK9V5yGJlz\nfwRwnqQPp/JyO74PeIcsElqiV+prAwjY0/bbLbRF0qbAvcC/2n6sgsiVwAJJp9u+oZUxgiAIgiAI\ngsq0cw/WKEmlNLbPAA+W1c8FxknaQtJAYAIwO6XbDbR9J3A+MCZFS56VdCSApEGSNsp3Zvtq27vZ\nHpN3rlJdt+3tgMfJHvAr8XNgvKQt0hibV5ErORlDgReTc7UfMKqCTC3uA85Y3UD6SANtapKiVUNs\nf7dUZvt429vZ/iDwL8B/V3KuJN2otD+sCkuB4SltEUlDkt3y1JrTrcDZwGa2n2xAvhV6RYwkXVy6\nbyo2yKJII23PBs4BNgOG1BhjBfABSZtL2hD4RAt6zgBW78tKLxQaIqVq3gXcmP6NVOJc4G/CuVrX\n6Gi3AkFLdLRbgaAF+svb9KA5wm7FpIh2a6eD9RRwmqTFZIdLXJvKDZCcoHPI8ie6gMdSWtQIoFNS\nF3BTkgE4AThD0kLgIZpP1wL4NbBFpYqU0vdtMievC7g8r29eNP2eAuyR9DkeWFJBplL7EheSRZMW\npUMTvlUuIGmspB/UmE85ZwI7KzvkYr6kZtILdwFeqFaZoi3HAldJWkDmJGxYJlZrTj9J7W/NlV1U\nQ74Xkg6XNLFK3S9S3/tLekZSab/ZzsDySm0SA4HJyY7zgCtt/7mCXOm+fSfp+RiZg7ikgmyPNhX4\nMrC7ssNRnqRnRK80n2pz/RSwD/C5nJ13KZMZlA67CIIgCIIgCPoYxTfDryHt9dnS9jl1hdcjUsrZ\ndbarRfcKi6TpZcfyr9OkfYALbW9TQ8b989sJgtp0EtGQItJJY3YT8f91/6E/7QkJGifsVkz6s90k\nYbtXZlo792D1R+4AbljfHrrrYXsV1VMnC836ZGdJE8hORPxOA9JrW50gCJpg2LBR9YWCIAiCfkFE\nsIIg6IEkx9+FIAiCIAiC2lSLYLVzD1YQBEEQBEEQBME6RThYQRAE6wCNfPFh0P8IuxWTsFsxCbsV\nkyLaLRysIAiCIAiCIAiCPiL2YAVB0IPYgxUEQRAEQVCf2IMVBEEQBEEQBEGwlgkHKwiCYB2giDnq\nQditqITdiknYrZgU0W7xPVhBEPRCiu/BCoIgCIK+ZNiIYSx/bnm71QjeA2IPVhAEPZBkJrZbiyAI\ngiBYx5gI8dy9bhF7sIIgCIIgCIIgCNYybXGwJI2S9ESVulmSxrzXOqWxR0qaL2l6rmxZO3SphqR9\nJU1qQG5Z+l11rcvkN5X0rKT/myubJWlknXaTJI2ro+899cZvhnyfkk6U9M0G2kyX9IqkqQ2O8U1J\nJ9Sp/1rjWoOkHSR1SZonaXtJZ0haLOmmNI//qNO+0bmeKOnXkpbm5yBpgqSnJH21Gb2DgtCv/lIF\nDRN2KyZht2ISdiskRdyD1c4IVn+MkR4FzLB9SK6sP+rZiE6ucl2NC4HZranTlC5ro89G+v8OcPxa\n0KMZjgJutz3W9jLgn4EDbH821Tdr115I2hz4BrAHsCfwTUlDAWzfAuwLhIMVBEEQBEGwlming7WB\npMnpDf5tkgaXC6Q37ovSz6WpbECKmiyStFDSl1P5aEn3S1og6XFJ27eg0/uBF8vKXsrpc0Ias0vS\njalskqQrJT0k6TeSjk7lm0iamXRZKOmIVD5K0pLUbqmkKZIOTO2XSto9yW0s6XpJc1LE4/CkxlvA\nnxqYy0vlBZJ+mHTvkvSipH9L5WOBrYEZZU3+F3i3zjgrk05I2iPNY0HSe5Oy8SvOSdIjknbMyc2S\nNKbGGuR5HXi1jo7YntWIXI5VqW9SpOlXaV4352T+Lun6G0mnJ9keEUNJZ6Zo1yHAV4B/lvSApGuA\nDwLTS/dwrs1Wkn4s6dH0s1cTcz2Y7CXBn2yvJLPpx3PrsAIY2sQ6BEWhlb94QfsJuxWTsFsxCbsV\nko6Ojnar0DTtPEVwB+DztudIuh44FbiiVClpG+BSYDeyh/j7k5PyHDDC9i5JbrPUZApwse2pkgbR\nmvM4EOjOF9jeM42zE3AusJftVyS9Pyc23PbeyUmYCtwBvAEcZftVSVsCc1IdwGjgk7YXS3oc+HRq\nf0Qa42jgPOAB2yelCMRcSTNtPwI8knQaC5xi+wvlEynpXVZ2cmo3EpgOTJIk4HvAccCBZfLH1Fsw\n219NfW4A/AgYb3u+pCEkByVHxTmldscCEyUNT+s5X9K3q8jnx7+tdJ0csLG2J9bTu4F5XZH7+HVg\nO9tv5+43yO7hDjKHZamkq0vNe3fn6ZKuBVaV+pZ0MNCR7qcTc/JXAlfYfljSXwP3ATs1ONcRwLO5\nz8+nsjz1/23Myl1vR/ynFARBEATBek9nZ2dDKYvtdLCesT0nXU8GTifnYJGlOM2y/TKApCnAOOAi\nYHtJVwI/BWakh/ltbU8FsP1Ws8okR2PXpEsl9idL73oljbEyV3dXKlsiaetSl8AlyvYndQPb5uqW\n2V6crn8FlJyGJ8geZwEOAg6XdFb6PAgYCSwtDWp7HtDLuaozz8HA7cCXbD8n6TRgmu0XsiWg1fO5\ndwBesD0/6fZqGi8vU21Ot5NFWiYCnwJ+XEe+IrbvAfp0v1diIXCzpLtItk5Ms/0O8L+SVgDDmuxX\nVF7vA4AdtWbxhkja2PZfSgL/n3N9WdJo27+tKrFfiz0H7WMZ4QgXkbBbMQm7FZOwWyHp7OzsN1Gs\njo6OHrpccMEFFeXa6WD1estfQabXw6ftlZJ2JUuF+iIwniz1qqZjIOlU4OQ0zqG2l+fqBgC/A94E\npjUxhxJvVtD5OGArYDfb3coOnRhcQb4797mbNTYRWZTr6Rb0qcU1wI9TyhzAXsA+aX02JUvdXGX7\n3Bb6ruecVZ2TpD9K2pksknVKrqqXfIpyvZccRubcHwGcJ+nDqbzcju8D3iGLhJbolfraAAL2tP12\nC22fJ4uqlfgresajIIuQLZB0uu0bWhgjCIIgCIIgqEI792CNklRKY/sM8GBZ/VxgnKQtJA0EJgCz\nU7rdQNt3AucDY1K05FlJRwJIGiRpo3xntq+2vZvtMXnnKtV1294OeJzsAb8SPwfGS9oijbF5FbmS\nkzEUeDE5V/sBoyrI1OI+4IzVDaSPNNCmJilaNcT2d0tlto+3vZ3tDwL/Avx3JedK0o1K+8OqsBQY\nntIWkTQk2S1PrTndCpwNbGb7yQbkW6FXxEjSxaX7pmKDLIo00vZs4BxgM2BIjTFWAB+QtLmkDYFP\ntKDnDGD1vqz0QqFR7gMOlDQ03aMHprI85wJ/E87VOka8lS0mYbdiEnYrJmG3QtJfolfN0E4H6yng\nNEmLyQ6XuDaVGyA5QecAnUAX8FhKixoBdErqAm5KMgAnAGdIWgg8RPPpWgC/BraoVJFS+r5N5uR1\nAZfn9c2Lpt9TgD2SPscDSyrIVGpf4kKyaNKidGjCt8oFJI2V9IMa8ynnTGBnZYdczJfUTHrhLsAL\n1SpTtOVY4CpJC8ichA3LxGrN6Sep/a25sotqyPdC0uGSJlap+0Xqe39Jz0gq7TfbGaj1teoDgcnJ\njvOAK23/uYJc6b59J+n5GJljs6SCbI82FfgysLuyw1GepGdErzSfinNNKawXkr0seBS4oCydFWBQ\nOuwiCIIgCIIg6GMU3yi9hrTXZ0vb59QVXo+QtClwne1q0b3CIml62bH86zRpH+BC29vUkDET3zud\ngj4i9hYUk7BbMQm7FZN2220ixHN38/SnPVjlSMJ2r8y0du7B6o/cAdywvj1018P2KqqnThaa9cnO\nkiaQnYj4nbrCE9e2NkEQBEGwfjFsRCvJVUERiQhWEAQ9kOT4uxAEQRAEQVCbahGsdu7BCoIgCIIg\nCIIgWKcIBysIgmAdoJEvPgz6H2G3YhJ2KyZht2JSRLuFgxUEQRAEQRAEQdBHxB6sIAh6EHuwgiAI\ngiAI6hN7sIIgCIIgCIIgCNYy4WAFQRCsAxQxRz0IuxWVsFsxCbsVkyLaLb4HKwiCXki9ot3BOsaw\nYaNYvvz37VYjCIIgCNY5Yg9WEAQ9kGSIvwvrPiL+/gdBEARB68QerCAIgiAIgiAIgrVMQw6WpFGS\nnqhSN0vSmL5VqzEkjZQ0X9L0XNmyduhSDUn7SprUgNyynPw91WQkbdGHelUcJ1dfU+90X8yqI9Pn\n90e+z0bsLWkXSQ9LWijpbklDGmhTs19JqxrXeHWb70p6QtJlkraSNEfSPEn7NGLbBuf6HUlLJC2Q\n9BNJm+XqfiFprqStm9U9KAKd7VYgaIEi7i0Iwm5FJexWTIpot2YiWP0xl+QoYIbtQ3Jl/VHPRnRy\nletm+2mGev01q3c7aGT864Czbe8K3Amc3Qf9tjLvk4FdbH8dOABYZHus7V822F8jMjOAv7P9EeBp\n4F9XN7bHAfOAw5rWPAiCIAiCIGiIZhysDSRNlrRY0m2SBpcLSJogaVH6uTSVDZA0KZUtlPTlVD5a\n0v3pTfvjkrZvQf/3Ay+Wlb2U0+eENGaXpBtT2SRJV0p6SNJvJB2dyjeRNDPpslDSEal8VIoITJK0\nVNIUSQem9ksl7Z7kNpZ0fS4qcXhS4y3gTw3M5aXc9VBJ90p6StLVufLVOZ6SvpaiIYtya7pxateV\nysen8j2SvguSfpvkB5Y0LUUCuyStlPTZBvV+F3g59TEgF6FZIOm0cuG0bg+nNb416XuwpNtyMqsj\na5IOKpevs27V+FByYgBmAp9soM1LSYfhkman9Vkkae81quqiNNeHJX0gFU4q3VPp86r0+25gCDBP\n0tnAZcBRqd/B9LTtcZIeTXXXSKtPnKg7V9szbXenj3OAvyoTWU727yZY5+hotwJBC3R0dLRbhaAF\nwm7FJOxWTIpot2ZOEdwB+LztOZKuB04FrihVStoGuBTYDVgJ3J+clOeAEbZ3SXKllKUpwMW2p0oa\nRGv7wQYC3fkC23umcXYCzgX2sv2KpPxD5XDbe0vaEZgK3AG8ARxl+1VJW5I9nE5N8qOBT9peLOlx\n4NOp/RFpjKOB84AHbJ8kaSgwV9JM248AjySdxgKn2P5C+URKeif2AHYEngHuk3S07TtKlcrS405M\ncgOBRyV1Jj2ft/2JJLeppA2AHwHjbc9Xlh73etnYh+X6/S/gLturSnpXw/ZzwDHp4xeAUWQRGpet\nN2lNzwc+Zvv15GR8DbgE+L6kjWy/DhwL3Jzkz6sgf1G1dZM0DTjJ9vIyVX8l6QjbU4FP0dvpqDS3\nUr+fAX5m+5Lk6JScvE2Ah22fL+kysujUxZW6Sv0dKenPtkupjSuAsbbPSJ9Lc/jbtAZ/b/tdSf8J\nHAdMbnCuef6JzPZ5usnumTpMzF13EA/vQRAEQRCs73R2djaUstiMU/OM7TnpejKwT1n9HsAs2y+n\nN+hTgHHA74DtU9ToYGBVesjfNj3wYvst2280oQvpYXdXMgeuEvsDt9t+JY2xMld3VypbApT2owi4\nRNJCsijHtlqzV2WZ7cXp+lepHuAJYLt0fRBwjqQuss0Qg4CReYVsz6vkXFVgru0/ODvi6xZ6r/U+\nwJ2237D9GpmD+A9JnwMlXSJpn+Qk7QC8YHt+0uHVXIRjNZK2Am4CJqR2zXIA8P2kc/l6A3wU2Al4\nKK3RCcBI2+8CPwMOlzSQLH1tajX5WgrYPqyKw/FPwGmSHiNzjN5qYl6PAZ+X9A0y5/G1VP6m7Z+m\n63msuQ/KafS881L638eAMcBjad77Ax/sJVx9rtmg0nnA27ZvLqt6HtilvjoTcz8d9cWDfkBnuxUI\nWqCIewuCsFtRCbsVk/5kt46ODiZOnLj6pxrNRLDK939U2g/S62HS9kpJuwIHA18ExgNfqSTboyPp\nVLKogIFD8w+TkgaQOW5vAtOamEOJNyvofBywFbCb7W5lBwoMriDfnfvczZo1FFmU6+kW9CmnkbXu\n3ch+OkWhDgUulPQAmTNZb60HkDlyE5PTuTYQ2X654yrU3Qp8CXgFeMz2a8mBribfFLZ/TXb/IelD\nNLEHyfaDksalNjdIutz2ZODtnNi7rLkP3iG9uEhz2KBJdQXcaPu8Jtut6UD6HNk9sH+F6juAb0ha\nbHunVscIgiAIgiAIKtNMBGuUpHza1INl9XOBcZK2SJGICcDslOo10PadZCliY2y/Cjwr6UgASYMk\nbZTvzPbVtnezPab8Tb3tbtvbAY+TpVNV4ufAeKWT2SRtXkWu5HwMBV5MztV+ZOlu5TK1uA84Y3UD\n6SMNtKnGnsr2fg0gm1/5Wj9Itn9nsLL9VP8IPJjSNF9PUYvvkUVClgLDU3oikoYk++S5DFho+/ZK\nyijbw3VjHZ3vB04p9V1hvecAe0saneo3Ts4OwOyk68msSWmrJd8Uuf1RA8juwWvT520lzazTdiTZ\nfXE92WEZpRMRq90Tvwd2T9dH0tPBqnUfleoeAI7J6bx50qEhJH0cOAs4wvabFUROAKaHc7Uu0tFu\nBYIWKOLegiDsVlTCbsWkiHZrxsF6iizNajHZJvlrU3kpJWw5cA5ZnkoXWSTiHmAE0JnSnW5KMpA9\n6J2RUvIeAoa1oP+vgYpHW6eUvm+TOXldwOV5ffOi6fcUYI+kz/HAkgoyldqXuJDsIJBFyo60/1a5\ngKSxkn5QYz4l5gJXkaUj/tb2XfmxbXcBN5Clrz0C/MD2QmBnsr1fXcA3gItsv03mpF0laQHZKXMb\nlo13JnCQskMu5kv6RFn9SOAvdXS+DngWWJTGn1Cm8x+BzwG3pDV+mCx9kZSyeC/w8fS7pjxVbKDs\nsI7hFaomSFoKLCbbo3ZDKt+GnpGoSnQACyXNJ9u/9e+1dAB+COyb1uCjwGu5ulqRyNI6LSFzAmek\nec8Aes2pxlz/g+wwjfuTLa8uq9+c7HTBIAiCIAiCYC2gtGWmkEg6C9jS9jl1hYOWSYc43GT7yXbr\n0pcoO+nwD7bvbbcu7xXp0IxFtr9fQ8btP30/aJ5OmotiiSL//V9X6OzsLOTb2fWdsFsxCbsVk/5s\nN0nY7pWh1MwerP7IHWT7YqaXfRdW0Iek721a57D9n+3W4b1E0myyfYOVTjsMgiAIgiAI+oBCR7CC\nIOh7sghWsK4zbNgoli//fbvVCIIgCILCsq5GsIIgWAvEi5cgCIIgCILWaOXLfYMgCIJ+Rn/6npCg\nccJuxSTsVkzCbsWkiHYLBysIgiAIgiAIgqCPiD1YQRD0QJLj70IQBEEQBEFtqu3BighWEARBEARB\nEARBHxEOVhAEwTpAEXPUg7BbUQm7FZOwWzEpot3CwQqCIAiCIAiCIOgjYg9WEAQ9iO/BCoKgGsNG\nDGP5c8vbrUYQBEG/oNoerHCwgiDogSQzsd1aBEHQL5kY35MXBEFQIg65CIIgWJdZ1m4FgpYIuxWS\nIu4JCcJuRaWIdmvIwZI0StITVepmSRrTt2o1hqSRkuZLmp4r61f/XUnaV9KkBuSW5eTvqSYjaYs+\n1KviOLn6mnqn+2JWHZk+vz/yfTZib0m7SHpY0kJJd0sa0kCbmv1KWtW4xqvbfFfSE5Iuk7SVpDmS\n5knapxHbNjjXzSXNkLRU0n2ShubqfiFprqStm9U9CIIgCIIgaIxmIlj9MSfgKGCG7UNyZf1Rz0Z0\ncpXrZvtphnr9Nat3O2hk/OuAs23vCtwJnN0H/bYy75OBXWx/HTgAWGR7rO1fNthfIzLnADNt7wD8\nHPjX1Y3tccA84LCmNQ/6P9u3W4GgJcJuhaSjo6PdKgQtEHYrJkW0WzMO1gaSJktaLOk2SYPLBSRN\nkLQo/VyaygZImpTKFkr6ciofLel+SQskPS6plf9m3g+8WFb2Uk6fE9KYXZJuTGWTJF0p6SFJv5F0\ndCrfRNLMpMtCSUek8lGSlqR2SyVNkXRgar9U0u5JbmNJ1+eiEocnNd4C/tTAXF7KXQ+VdK+kpyRd\nnStfneMp6WspGrIot6Ybp3ZdqXx8Kt8j6bsg6bdJfmBJ01IksEvSSkmfbVDvd4GXUx8DchGaBZJO\nKxdO6/ZwWuNbk74HS7otJ7M6sibpoHL5OutWjQ8lJwZgJvDJBtq8lHQYLml2Wp9FkvZeo6ouSnN9\nWNIHUuGk0j2VPq9Kv+8GhgDzJJ0NXAYclfodTE/bHifp0VR3jaRSXSNzPRK4MV3fSPYSIs9ysn83\nQRAEQRAEwVrgfU3I7gB83vYcSdcDpwJXlColbQNcCuwGrATuT07Kc8AI27skuc1SkynAxbanShpE\na/vBBgLd+QLbe6ZxdgLOBfay/Yqk/EPlcNt7S9oRmArcAbwBHGX7VUlbAnNSHcBo4JO2F0t6HPh0\nan9EGuNo4DzgAdsnpbSsuZJm2n4EeCTpNBY4xfYXyidS0juxB7Aj8Axwn6Sjbd9RqlSWHndikhsI\nPCqpM+n5vO1PJLlNJW0A/AgYb3u+svS418vGPizX738Bd9leVdKW2VSCAAAgAElEQVS7GrafA45J\nH78AjCKL0LhsvUlrej7wMduvJyfja8AlwPclbWT7deBY4OYkf14F+YuqrZukacBJtsuPuPqVpCNs\nTwU+BfxVrXmV9fsZ4Ge2L0mOTsnJ2wR42Pb5ki4ji05dXKmr1N+Rkv5su5TauAIYa/uM9Lk0h79N\na/D3tt+V9J/AccDkBue6te0Vaczl6p0O2E12z9Qmn/i5HfGWvQgsI+xURMJuhaSzs7OQb9XXd8Ju\nxaQ/2a2zs7OhPWHNOFjP2J6TricDp5NzsMge9mfZLkU0pgDjyB6It5d0JfBTYEZ6yN82PfBi+60m\n9CD1L2DXpEsl9gdut/1KGmNlru6uVLYk9wAq4BJJ48geQrfN1S2zvThd/4osCgLwBNnjJ8BBwOGS\nzkqfBwEjgaWlQW3PI3NE6jHX9h/SPG8B9iFzAkvsA9xp+40kcwfwD8B9wPckXQJMs/1LSR8GXrA9\nP+nwamrTY0BJWwE3Acck56pZDgCucTpeqmy9AT4K7AQ8lGy3AZmD8q6kn5Gt3U/I0tfOAjoqyddS\noOQoVuCfgP+Q9G9kTnMz99tjwPXJUb3b9sJU/qbtn6breWTzr0Svk2WqUEr/+xgwBngszXswsKKX\ncPW5Vuu3xPNka1ub/RrsPQiCIAiCYD2ho6Ojh7N3wQUXVJRrxsEqf1CrtB+k18Ok7ZWSdgUOBr4I\njAe+Ukm2R0fSqWRRAQOH5t/USxoA/A54E5jWxBxKvFlB5+OArYDdbHcrO1BgcAX57tznbtasocii\nXE+3oE85jax170b20ykKdShwoaQHyJzJems9ALgFmGh7SQv6NoLI9ssdV6HuVuBLwCvAY7ZfS85F\nNfmmsP1rsvsPSR+iiT1Ith9MTvdhwA2SLrc9GXg7J/Yua+6Dd0jR2Jxj2AwCbrR9XpPtSqyQNMz2\nCknD6Z1CewfwDUmLbe/U4hhBfySiIMUk7FZI+svb9KA5wm7FpIh2ayYtb5SkfNrUg2X1c4FxkraQ\nNBCYAMxOqV4Dbd9JliI2JkVRnpV0JICkQZI2yndm+2rbu9keU54GZbvb9nbA42TpVJX4OTBe6WQ2\nSZtXkSs5H0OBF5NztR9Zulu5TC3uA85Y3UD6SANtqrGnsr1fA8jmV77WD5Lt3xmsbD/VPwIPpjTN\n123fDHyPLBKyFBie0hORNCTZJ89lwELbt1dSRtkerhsr1eW4Hzil1HeF9Z4D7C1pdKrfODk7ALOT\nrieTpTPWk2+K3P6oAWT34LXp87aSZtZpO5Lsvrie7LCM0omI1e6J3wO7p+sj6elg1bqPSnUPAMfk\ndN486dAoU4HPpesTgbvL6k8ApodzFQRBEARBsHZoxsF6CjhN0mKyTfLXpvJSSthyshPMOoEuskjE\nPcAIoFNSF1kK2jmp3QnAGZIWAg8Bw1rQ/9dAxaOtU0rft8mcvC7g8ry+edH0ewqwR9LneGBJBZlK\n7UtcSHYQyCJlR9p/q1xA0lhJP6gxnxJzgavI0hF/a/uu/Ni2u4AbyNLXHgF+kFLXdibb+9UFfAO4\nyPbbZE7aVZIWADOADcvGOxM4SNkhF/MlfaKsfiTwlzo6Xwc8CyxK408o0/mPZA/+t6Q1fphsXx+2\nu4F7gY+n3zXlqWIDZYd1DK9QNUHSUmAx2R61G1L5NvSMRFWiA1goaT7Z/q1/r6UD8ENg37QGHwVe\ny9XVikSW1mkJmRM4I817BtBrTjXmehlwYJrvx8j2RebZHOiLKGvQ3+hXX1ARNEzYrZAU8Xt5grBb\nUSmi3VTkb2RP+522tH1OXeGgZdIhDjfZfrLduvQlyk46/IPte9uty3tFOjRjke3v15AxE987nYI+\nIg5LKCZFs9tEKPJzQ1/RnzbdB40Tdism/dlukrDdK0Op6A7WaLJIzqtl34UVBEEZkmaT7Rs83vbz\nNeSK+0chCIK1yrARw1j+XPnhpUEQBOsn66SDFQRB3yPJ8XchCIIgCIKgNtUcrFa+eyoIgiDoZxQx\nRz0IuxWVsFsxCbsVkyLaLRysIAiCIAiCIAiCPiJSBIMg6EGkCAZBEARBENQnUgSDIAiCIAiCIAjW\nMuFgBUEQrAMUMUc9CLsVlbBbMQm7FZMi2i0crCAIgiAIgiAIgj4i9mAFQdCD+B6sIOjfDBs2iuXL\nf99uNYIgCNZ74nuwgiBoiMzBir8LQdB/EfF/dxAEQfuJQy6CIAjWaTrbrUDQEp3tViBogSLuCQnC\nbkWliHZryMGSNErSE1XqZkka07dqNYakkZLmS5qeK1vWDl2qIWlfSZMakFuWk7+nmoykLfpQr4rj\n5Opr6p3ui1l1ZPr8/sj32Yi9JX1T0nPpXpkv6eMNtKnZr6RVjWu8us13JT0h6TJJW0maI2mepH0a\nsW2Dc/2OpCWSFkj6iaTNcnW/kDRX0tbN6h4EQRAEQRA0RjMRrP6Yj3AUMMP2Ibmy/qhnIzq5ynWz\n/TRDvf6a1bsdNDr+FbbHpJ+f9UG/rcz7ZGAX218HDgAW2R5r+5cN9teIzAzg72x/BHga+NfVje1x\nwDzgsKY1DwpAR7sVCFqio90KBC3Q0dHRbhWCFgi7FZMi2q0ZB2sDSZMlLZZ0m6TB5QKSJkhalH4u\nTWUDJE1KZQslfTmVj5Z0f3rT/rik7VvQ//3Ai2VlL+X0OSGN2SXpxlQ2SdKVkh6S9BtJR6fyTSTN\nTLoslHREKh+VIgKTJC2VNEXSgan9Ukm7J7mNJV2fi0ocntR4C/hTA3N5KXc9VNK9kp6SdHWufHWO\np6SvpWjIotyabpzadaXy8al8j6TvgqTfJvmBJU1LkZ0uSSslfbZBvd8FXk59DMhFaBZIOq1cOK3b\nw2mNb036HizptpzM6siapIPK5eusWy165cfW4aWkw3BJs9P6LJK09xpVdVGa68OSPpAKJ5XuqfR5\nVfp9NzAEmCfpbOAy4KjU72B62vY4SY+mumsklerqztX2TNvd6eMc4K/KRJaT/bsJgiAIgiAI1gLv\na0J2B+DztudIuh44FbiiVClpG+BSYDdgJXB/clKeA0bY3iXJlVKWpgAX254qaRCt7QcbCHTnC2zv\nmcbZCTgX2Mv2K5LyD5XDbe8taUdgKnAH8AZwlO1XJW1J9nA6NcmPBj5pe7Gkx4FPp/ZHpDGOBs4D\nHrB9kqShwFxJM20/AjySdBoLnGL7C+UTKemd2APYEXgGuE/S0bbvKFUqS487MckNBB6V1Jn0fN72\nJ5LcppI2AH4EjLc9X9IQ4PWysQ/L9ftfwF22V5X0robt54Bj0scvAKPIIjQuW2/Smp4PfMz268nJ\n+BpwCfB9SRvZfh04Frg5yZ9XQf6iausmaRpwku3lFdT9UnIcHwfOtF3Tecz1+xngZ7YvSY5Oycnb\nBHjY9vmSLiOLTl1cqavU35GS/my7lNq4Ahhr+4z0uTSHv01r8Pe235X0n8BxwOQm5lrin8hsn6eb\n7J6pw8TcdQfxlr0IdBJ2KiKdhN2KR2dnZyHfqq/vhN2KSX+yW2dnZ0N7wppxsJ6xPSddTwZOJ+dg\nkT3sz7JdimhMAcaRPRBvL+lK4KfAjPSQv63tqQC232pCD1L/AnZNulRif+B226+kMVbm6u5KZUu0\nZj+KgEskjSN7CN02V7fM9uJ0/StgZrp+AtguXR8EHC7prPR5EDASWFoa1PY8MkekHnNt/yHN8xZg\nHzInsMQ+wJ2230gydwD/ANwHfE/SJcA027+U9GHgBdvzkw6vpjY9BpS0FXATcExyrprlAOAap6Ot\nytYb4KPATsBDyXYbkDko70r6Gdna/YQsfe0ssieOXvK1FCg5ihW4GvhWcvwuIrtvT2pwXo8B1ydH\n9W7bC1P5m7Z/mq7nkc2/Eo1Gzkrpfx8DxgCPpXkPBlb0Eq4+12xQ6Tzgbds3l1U9T0NPcxPriwRB\nEARBEKxHdHR09HD2LrjggopyzThY5fs/Ku0H6fUwaXulpF2Bg4EvAuOBr1SS7dGRdCpZVMDAofk3\n9ZIGAL8D3gSmNTGHEm9W0Pk4YCtgN9vdyg4UGFxBvjv3uZs1ayiyKNfTLehTTiNr3buR/XSKQh0K\nXCjpATJnst5aDwBuASbaXtKCvo0gsv1yx1WouxX4EvAK8Jjt15JzUU2+KWznU+t+CFQ93KNC2weT\n030YcIOky21PBt7Oib3LmvvgHVI0NucYNoOAG22f12S7NR1InyO7B/avUH0H8A1Ji23v1OoYQX+k\no90KBC3R0W4FghboL2/Tg+YIuxWTItqtmbS8UZLyaVMPltXPBcZJ2kLSQGACMDuleg20fSdZitiY\nFEV5VtKRAJIGSdoo35ntq23vlg4lWF5W1217O7J0r2Or6PtzYLzSyWySNq8iV3I+hgIvJudqP7J0\nt3KZWtwHnLG6gfSRBtpUY09le78GkM2vfK0fJNu/M1jZfqp/BB5MaZqvp6jF98giIUuB4Sk9EUlD\nkn3yXAYstH17JWWU7eG6sY7O9wOnlPqusN5zgL0ljU71G0v6UKqbnXQ9mTUpbbXkm0LS8NzHo4En\nU/m2kmZWbrW67Uiy++J64LqkJ1S/J34P7J6uj6Sng1XrPirVPQAcozV7ujZPOjSEshMSzwKOsP1m\nBZETgOnhXAVBEARBEKwdmnGwngJOk7SYbJP8tam8lBK2HDiHLKG8iywScQ8wAuiU1EWWgnZOancC\ncIakhcBDwLAW9P81UPFo65TS920yJ68LuDyvb140/Z4C7JH0OR5YUkGmUvsSF5IdBLJI2ZH23yoX\nkDRW0g9qzKfEXOAqsnTE39q+Kz+27S7gBrL0tUeAH6TUtZ3J9n51Ad8ALrL9NpmTdpWkBWSnzG1Y\nNt6ZwEHKDrmYL+kTZfUjgb/U0fk64FlgURp/QpnOfwQ+B9yS1vhhsn19pEMZ7gU+nn7XlKeKDZQd\n1jG8QtV3kl0WAPsCX03l29AzElWJDmChpPnAp4B/r6UDWYRs37QGHwVey9XVikSW1mkJ2YuIGWne\nM4Bec6ox1/8gO0zj/mTLq8vqNyc7XTBY5+hstwJBS3S2W4GgBYr4vTxB2K2oFNFuKvK3waf9Tlva\nPqeucNAy6RCHm2w/2W5d+hJlJx3+wfa97dblvSIdmrHI9vdryLj9p+8HzdNJpJsVkU6at5so8v/d\n6wL9adN90Dhht2LSn+0mCdu9MpSK7mCNJovkvFr2XVhBEJQhaTbZvsHjbT9fQ664fxSCYD1g2LBR\nLF/++3arEQRBsN6zTjpYQRD0PZIcfxeCIAiCIAhqU83BauW7p4IgCIJ+RhFz1IOwW1EJuxWTsFsx\nKaLdwsEKgiAIgiAIgiDoIyJFMAiCHkSKYBAEQRAEQX0iRTAIgiAIgiAIgmAtEw5WEATBOkARc9SD\nsFtRCbsVk7BbMSmi3cLBCoIgCIIgCIIg6CNiD1YQBD2I78EKgiAIho0YxvLnlrdbjSDo18T3YAVB\n0BCSzMR2axEEQRC0lYkQz4hBUJs45CIIgmBdZlm7FQhaIuxWTMJuhaSIe3mCYtqtIQdL0ihJT1Sp\nmyVpTN+q1RiSRkqaL2l6rqxf/dmTtK+kSQ3ILcvJ31NNRtIWfahXxXFy9TX1TvfFrDoyfX5/5Pts\nxN6SvinpuXSvzJf08Qba1OxX0qrGNV7d5ruSnpB0maStJM2RNE/SPo3YtsG5bi5phqSlku6TNDRX\n9wtJcyVt3azuQRAEQRAEQWM0E8Hqj3Hio4AZtg/JlfVHPRvRyVWum+2nGer116ze7aDR8a+wPSb9\n/KwP+m1l3icDu9j+OnAAsMj2WNu/bLC/RmTOAWba3gH4OfCvqxvb44B5wGFNax70f7ZvtwJBS4Td\niknYrZB0dHS0W4WgBYpot2YcrA0kTZa0WNJtkgaXC0iaIGlR+rk0lQ2QNCmVLZT05VQ+WtL9khZI\nelxSK3+u3g+8WFb2Uk6fE9KYXZJuTGWTJF0p6SFJv5F0dCrfRNLMpMtCSUek8lGSlqR2SyVNkXRg\nar9U0u5JbmNJ1+eiEocnNd4C/tTAXF7KXQ+VdK+kpyRdnStfneMp6WspGrIot6Ybp3ZdqXx8Kt8j\n6bsg6bdJfmBJ01Jkp0vSSkmfbVDvd4GXUx8DchGaBZJOKxdO6/ZwWuNbk74HS7otJ7M6sibpoHL5\nOutWi175sXV4KekwXNLstD6LJO29RlVdlOb6sKQPpMJJpXsqfV6Vft8NDAHmSTobuAw4KvU7mJ62\nPU7So6nuGkmlukbmeiRwY7q+kewlRJ7lZP9ugiAIgiAIgrXA+5qQ3QH4vO05kq4HTgWuKFVK2ga4\nFNgNWAncn5yU54ARtndJcpulJlOAi21PlTSI1vaDDQS68wW290zj7AScC+xl+xVJ+YfK4bb3lrQj\nMBW4A3gDOMr2q5K2BOakOoDRwCdtL5b0OPDp1P6INMbRwHnAA7ZPSmlZcyXNtP0I8EjSaSxwiu0v\nlE+kpHdiD2BH4BngPklH276jVKksPe7EJDcQeFRSZ9LzedufSHKbStoA+BEw3vZ8SUOA18vGPizX\n738Bd9leVdK7GrafA45JH78AjCKL0LhsvUlrej7wMduvJyfja8AlwPclbWT7deBY4OYkf14F+Yuq\nrZukacBJtisde/Sl5Dg+Dpxpu6bzmOv3M8DPbF+SHJ2Sk7cJ8LDt8yVdRhadurhSV6m/IyX92XYp\ntXEFMNb2GelzaQ5/m9bg722/K+k/geOAyQ3OdWvbK9KYy9U7HbCb7J6pTT7xczvibW0RWEbYqYiE\n3YpJ2K2QdHZ2FjIasr7Tn+zW2dnZ0J6wZhysZ2zPSdeTgdPJOVhkD/uzbJciGlOAcWQPxNtLuhL4\nKTAjPeRva3sqgO23mtCD1L+AXZMuldgfuN32K2mMlbm6u1LZktwDqIBLJI0jewjdNle3zPbidP0r\nYGa6foLs8RPgIOBwSWelz4OAkcDS0qC255E5IvWYa/sPaZ63APuQOYEl9gHutP1GkrkD+AfgPuB7\nki4Bptn+paQPAy/Ynp90eDW16TGgpK2Am4BjknPVLAcA1zgdOVS23gAfBXYCHkq224DMQXlX0s/I\n1u4nZOlrZwEdleRrKVByFCtwNfCt5PhdRHbfntTgvB4Drk+O6t22F6byN23/NF3PI5t/JRqNnJXS\n/z4GjAEeS/MeDKzoJVx9rtX6LfE82drWZr8Gew+CIAiCIFhP6Ojo6OHsXXDBBRXlmnGwyh/UKu0H\n6fUwaXulpF2Bg4EvAuOBr1SS7dGRdCpZVMDAofk39ZIGAL8D3gSmNTGHEm9W0Pk4YCtgN9vdyg4U\nGFxBvjv3uZs1ayiyKNfTLehTTiNr3buR/XSKQh0KXCjpATJnst5aDwBuASbaXtKCvo0gsv1yx1Wo\nuxX4EvAK8Jjt15JzUU2+KWznU+t+CFQ93KNC2weT030YcIOky21PBt7Oib3LmvvgHVI0NucYNoOA\nG22f12S7EiskDbO9QtJweqfQ3gF8Q9Ji2zu1OEbQH4m36cUk7FZMwm6FpL9EQYLmKKLdmknLGyUp\nnzb1YFn9XGCcpC0kDQQmALNTqtdA23eSpYiNSVGUZyUdCSBpkKSN8p3Zvtr2bulQguVldd22tyNL\n9zq2ir4/B8YrncwmafMqciXnYyjwYnKu9iNLdyuXqcV9wBmrG0gfaaBNNfZUtvdrANn8ytf6QbL9\nO4OV7af6R+DBlKb5uu2bge+RRUKWAsNTeiKShiT75LkMWGj79krKKNvDdWOluhz3A6eU+q6w3nOA\nvSWNTvUbS/pQqpuddD2ZLJ2xnnxTJEejxNHAk6l8W0kzK7da3XYk2X1xPXBd0hOq3xO/B3ZP10fS\n08GqdR+V6h4AjtGaPV2bJx0aZSrwuXR9InB3Wf0JwPRwroIgCIIgCNYOzThYTwGnSVpMtkn+2lRe\nSglbTnaCWSfQRRaJuAcYAXRK6iJLQTsntTsBOEPSQuAhYFgL+v8aqHi0dUrp+zaZk9cFXJ7XNy+a\nfk8B9kj6HA8sqSBTqX2JC8kOAlmk7Ej7b5ULSBor6Qc15lNiLnAVWTrib23flR/bdhdwA1n62iPA\nD1Lq2s5ke7+6gG8AF9l+m8xJu0rSAmAGsGHZeGcCByk75GK+pE+U1Y8E/lJH5+uAZ4FFafwJZTr/\nkezB/5a0xg+T7evDdjdwL/Dx9LumPFVsoOywjuEVqr6T7LIA2Bf4airfhp6RqEp0AAslzQc+Bfx7\nLR3IImT7pjX4KPBarq5WJLK0TkvIXkTMSPOeAfSaU425XgYcKGkpWbrhpWX1mwN9EWUN+hv96gsq\ngoYJuxWTsFshKeL3KQXFtJuK/C3dab/TlrbPqSsctEw6xOEm20+2W5e+RNlJh3+wfW+7dXmvSIdm\nLLL9/RoyZuJ7p1PQR8Sm+2ISdism64PdJkKRnxEr0Z8OSwgapz/bTRK2e2UoFd3BGk0WyXm17Luw\ngiAoQ9Jssn2Dx9t+voZccf8oBEEQBH3CsBHDWP5cpUN5gyAosU46WEEQ9D2SHH8XgiAIgiAIalPN\nwWrlu6eCIAiCfkYRc9SDsFtRCbsVk7BbMSmi3cLBCoIgCIIgCIIg6CMiRTAIgh5EimAQBEEQBEF9\nIkUwCIIgCIIgCIJgLRMOVhAEwTpAEXPUg7BbUQm7FZOwWzEpot3CwQqCIAiCIAiCIOgjYg9WEAQ9\niO/BCoJ1g2HDRrF8+e/brUYQBME6S3wPVhAEDZE5WPF3IQiKj4j/44MgCNYecchFEATBOk1nuxUI\nWqKz3QoELVDEPSFB2K2oFNFuDTlYkkZJeqJK3SxJY/pWrcaQNFLSfEnTc2XL2qFLNSTtK2lSA3LL\ncvL3VJORtEUf6lVxnFx9Tb3TfTGrjkyf3x/5Phuxt6RjJD0p6d1GdanXr6RVjWnbo813JT0h6TJJ\nW0maI2mepH0asW2Dc/2OpCWSFkj6iaTNcnW/kDRX0tbN6v7/2Lv3eKuqeu/jny+oeTuiUoGaoIdT\nPlkpopalwTbLynvmXY8e0y4vO1nZ5VhWgJfUSk+kWfnEQQsy9RzybiLK4ihGICKoIGnitQf1lBR2\nyoz9e/4YY8Hca6/b3m7ce+L3/Xrt155rzDHn/M0xJps11rgsMzMzM2tPT3qwBuI4g8OAGRHx4ULa\nQIyznZiiwXZPz9MTrc7X07j7QzvXfwD4CDC7D8/bm/v+OLBLRPwb8H5gcUTsHhF3t3m+dvLMAN4W\nEaOBR4CvrDk4YiywADiwx5FbCXT0dwDWKx39HYD1QkdHR3+HYL3geiunMtZbTxpYG0qaKmmJpGsk\nbVybQdKxkhbnnwty2iBJU3LaIkmfzemjJN2eP2m/V9KOvYh/S+C5mrTnC/GcmK+5UNKVOW2KpEmS\n5kh6VNLhOX0zSTNzLIskHZLTR+YegSmSlkmaJukD+fhlkvbI+TaVNLnQK3FwDuNvwB/buJfnC9tD\nJN0k6WFJlxXS14zxlHRG7g1ZXCjTTfNxC3P6kTl9zxzv/Tm+zYoXlnRz7glcKGmlpH9uM+7VwB/y\nOQYVemjul/Tp2sy53O7JZXx1jveDkq4p5FnTsyZp/9r8LcqtrohYFhGPFMuvDc/nGIZLmp3LZ7Gk\nvdeGqnPzvd4j6Q05cUr1mcqvV+Xf1wObAwskfRm4EDgsn3djutbt8ZJ+nff9QFJ1Xzv3OjMiOvPL\nucCbarKsIP27MTMzM7N1YIMe5N0JODki5kqaDJwGXFzdKWkb4AJgN2AlcHtupDwNbBcRu+R81SFL\n04BvRsQNkjaid/PBBgOdxYSIeFe+zs7AV4F3R8QLkopvKodHxN6S3grcAEwH/gocFhEvShpKenN6\nQ84/CvhoRCyRdC9wTD7+kHyNw4GzgDsi4hRJQ4B5kmZGxK+AX+WYdgc+GRGfqL2RatzZnsBbgSeB\n2yQdHhHTqzuVhrmdlPMNBn4tqZLjfCYiDsr5/kHShsDPgSMj4j5JmwN/qbn2gYXz/gdwXUSsqsbd\nSEQ8DRyRX34CGEnqoYma8iaX6deA/SLiL7mRcQZwPvAjSZtExF+Ao4Gf5fxn1cl/bqNyk3QzcEpE\nrGgWdzsK5z0O+GVEnJ8bOtVG3mbAPRHxNUkXknqnvlnvVPl8h0r6U0RUhzY+C+weEafn19V7+D+5\nDN4TEaslfR84Hpjai3v9GKnuizpJz0wLEwrbHfhT9jKo4Hoqowqut/KpVCql/FT9tc71Vk4Dqd4q\nlUpbc8J60sB6MiLm5u2pwGcoNLBIb/ZnRUS1R2MaMJb0hnhHSZOAW4AZ+U3+thFxA0BE/K0HcZDP\nL2DXHEs97wOujYgX8jVWFvZdl9OWau18FAHnSxpLehO6bWHf8ohYkrcfAmbm7QeAHfL2/sDBkr6U\nX28EjACWVS8aEQtIDZFW5kXEE/k+rwL2ITUCq/YBfhERf815pgPvBW4DviPpfODmiLhb0tuB30XE\nfTmGF/MxXS4o6fXAT4EjcuOqp94P/CDyklU15Q2wF7AzMCfX3YakBspqSb8kld1/kYavfYn0jqNb\n/mYBVBuKfWw+MDk3VK+PiEU5/aWIuCVvLyDdfz3t9ppVh//tB4wB5uf73hh4tlvmFvcq6Szg5Yj4\nWc2uZ2jr3dyE1lnMzMzMXkM6Ojq6NPYmTpxYN19PGli18z/qzQfp9mYyIlZK2hX4IPAp4Ejgc/Xy\ndjmRdBqpVyCAA4qf1EsaBDwGvATc3IN7qHqpTszHA68HdouITqUFBTauk7+z8LqTtWUoUi/XI72I\np1Y7Zd39oIhHci/UAcA5ku4gNSZblfUg4CpgQkQs7UW87RBpvtzxdfZdDfwr8AIwPyL+nBsXjfK/\naiLirtzoPhC4QtJFETEVeLmQbTVrn4O/k3tjCw3DnhBwZUSc1duYJf0L6Rl4X53d04FvSFoSETv3\n9ho2EHX0dwDWKx39HYD1wkD5NN16xvVWTmWst54MyxspqThs6q6a/fOAsZK2ljQYOBaYnYd6DY6I\nX5CGiI3JvShPSToUQNJGkjYpniwiLouI3SJiTO0wqIjojDHZmKEAACAASURBVIgdgHtJw6nquRM4\nUnllNklbNchXbXwMAZ7Ljat9ScPdavM0cxtw+poDpNFtHNPIu5Tmfg0i3V9tWd9Fmr+zsdJ8qo8A\nd+Vhmn/JvRbfIfWELAOG5+GJSNo810/RhcCiiLi2XjBKc7iubBHz7cAnq+euU95zgb0ljcr7N5X0\n5rxvdo7146wd0tYs/ytRnOu0raSZTTNLI0jPxWTgxznOLuep8TiwR94+lK4NrGbPUXXfHcARhTld\nW+UY2iLpQ6QewEMi4qU6WU4EbnXjyszMzGzd6EkD62Hg05KWkCbJ/zCnV4eErQDOJA0oX0jqibgR\n2A6oSFpIGoJ2Zj7uROB0SYuAOcCwXsT/G6Du0tZ5SN95pEbeQuCiYrzFrPn3NGDPHM8JwNI6eeod\nX3UOaSGQxUpL2p9dm0HS7pIub3I/VfOAS0nDEX8bEdcVrx0RC4ErSMPXfgVcnoeuvYM092sh8A3g\n3Ih4mdRIu1TS/aRV5l5Xc70vAPsrLXJxn6SDavaPAP63Rcw/Bp4CFufrH1sT8/8A/wJclcv4HtK8\nPvKiDDcBH8q/m+anQR0oLdYxvE76YZKeIg1TvElrl/Xfhq49UfV0AIsk3QccBXy3WQzA/wXG5TLY\nC/hzYV+znshqOS0lfRAxI9/3DKDePdW9V+AS0mIat+e6vKxm/1ak1QVtvVPp7wCsVyr9HYD1Qhm/\nl8dcb2VVxnpTmb/lPc93GhoRZ7bMbL2WF3H4aUQ82N+x9CWllQ6fiIib+juWV0teNGNxRPyoSZ7o\n/9X3recqeLhZGVVYd/Umyvx//EA2kCbdW/tcb+U0kOtNEhHRbYRS2RtYo0g9OS/WfBeWmdWQNJs0\nb/CEiHimST43sMzWC25gmZmtS+tlA8vM+l5qYJlZ2Q0bNpIVKx7v7zDMzNZbjRpYvfnuKTNbz0WE\nf0r2M2vWrH6PwT8Dq97cuFp3yjgnxFxvZVXGenMDy8zMzMzMrI94iKCZdSEp/HfBzMzMrDkPETQz\nMzMzM1vH3MAyM1sPlHGMurneysr1Vk6ut3IqY725gWVmZmZmZtZHPAfLzLrwHCwzMzOz1hrNwdqg\nP4Ixs4FN6va3wszMrEeGbTeMFU+v6O8wzF517sEysy4kBRP6OwrrseXAjv0dhPWY662cXG/tmZC+\nV3GgqFQqdHR09HcY1kMDud68iqCZmZmZmdk61lYDS9JISQ802DdL0pi+Das9kkZIuk/SrYW05f0R\nSyOSxkma0ka+5YX8NzbKI2nrPoyr7nUK+5vGnZ+LWS3y9PnzUTxnO/Ut6QhJD0pa3W4src4raVV7\n0XY55tuSHpB0oaTXS5oraYGkfdqp2zbvdStJMyQtk3SbpCGFff8taZ6kN/Y0disBf5peTq63cnK9\nldJA7QWx5spYbz3pwRo4fbxrHQbMiIgPF9IGYpztxBQNtnt6np5odb6ext0f2rn+A8BHgNl9eN7e\n3PfHgV0i4t+A9wOLI2L3iLi7zfO1k+dMYGZE7ATcCXxlzcERY4EFwIE9jtzMzMzM2tKTBtaGkqZK\nWiLpGkkb12aQdKykxfnngpw2SNKUnLZI0mdz+ihJt0u6X9K9knrzedCWwHM1ac8X4jkxX3OhpCtz\n2hRJkyTNkfSopMNz+maSZuZYFkk6JKePlLQ0H7dM0jRJH8jHL5O0R863qaTJhV6Jg3MYfwP+2Ma9\nPF/YHiLpJkkPS7qskL5mjKekM3JvyOJCmW6aj1uY04/M6XvmeO/P8W1WvLCkm3NP4EJJKyX9c5tx\nrwb+kM8xqNBDc7+kT9dmzuV2Ty7jq3O8H5R0TSHPmp41SfvX5m9RbnVFxLKIeKRYfm14PscwXNLs\nXD6LJe29NlSdm+/1HklvyIlTqs9Ufr0q/74e2BxYIOnLwIXAYfm8G9O1bo+X9Ou87wfSmhUnWt4r\ncChwZd6+kvQhRNEK0r8bW98MqL57a5vrrZxcb6VUxu9TsnLWW09WEdwJODki5kqaDJwGXFzdKWkb\n4AJgN2AlcHtupDwNbBcRu+R8W+RDpgHfjIgbJG1E7+aDDQY6iwkR8a58nZ2BrwLvjogXJBXfVA6P\niL0lvRW4AZgO/BU4LCJelDQUmJv3AYwCPhoRSyTdCxyTjz8kX+Nw4Czgjog4JQ/LmidpZkT8CvhV\njml34JMR8YnaG6nGne0JvBV4ErhN0uERMb26U2mY20k532Dg15IqOc5nIuKgnO8fJG0I/Bw4MiLu\nk7Q58Jeaax9YOO9/ANdFxKpq3I1ExNPAEfnlJ4CRpB6aqClvcpl+DdgvIv6SGxlnAOcDP5K0SUT8\nBTga+FnOf1ad/Oc2KjdJNwOnRMQrXrKocN7jgF9GxPm5oVNt5G0G3BMRX5N0Ial36pv1TpXPd6ik\nP0VEdWjjs8DuEXF6fl29h/+Ty+A9EbFa0veB44Gpbd7rGyPi2XzNFeo+HLCT9Mw0Vxz4uQMeDmNm\nZmaveZVKpa0GX08aWE9GxNy8PRX4DIUGFunN/qyIqPZoTAPGkt4Q7yhpEnALMCO/yd82Im4AiIi/\n9SAO8vkF7Jpjqed9wLUR8UK+xsrCvuty2tLCG1AB50saS3oTum1h3/KIWJK3HwJm5u0HSG8/AfYH\nDpb0pfx6I2AEsKx60YhYQGqItDIvIp7I93kVsA+pEVi1D/CLiPhrzjMdeC9wG/AdSecDN0fE3ZLe\nDvwuIu7LMbyYj+lyQUmvB34KHJEbVz31fuAH1S9QqilvgL2AnYE5ue42JDVQVkv6Jans/os0fO1L\nQEe9/M0CqDYU+9h8YHJuqF4fEYty+ksRcUveXkC6/3ra7TWrDv/bDxgDzM/3vTHwbLfM7d9r7bDC\nZ0hl29y+bZ7dBg43gsvJ9VZOrrdSKuNcHhtY9dbR0dElnokTJ9bN15MGVu0btXrzQbq9mYyIlZJ2\nBT4IfAo4EvhcvbxdTiSdRuoVCOCA4if1kgYBjwEvATf34B6qXqoT8/HA64HdIqJTaUGBjevk7yy8\n7mRtGYrUy/VIL+Kp1U5Zdz8o4pHcC3UAcI6kO0iNyVZlPQi4CpgQEUt7EW87RJovd3ydfVcD/wq8\nAMyPiD/nxkWj/K+aiLgrN7oPBK6QdFFETAVeLmRbzdrn4O/k3thCw7AnBFwZEWf1MuRnJQ2LiGcl\nDaf7ENrpwDckLYmInXt5DTMzMzNroCfD8kZKKg6buqtm/zxgrKStJQ0GjgVm56FegyPiF6QhYmNy\nL8pTkg4FkLSRpE2KJ4uIyyJit4gYUzsMKiI6I2IH4F7ScKp67gSOVF6ZTdJWDfJVGx9DgOdy42pf\n0nC32jzN3AacvuYAaXQbxzTyLqW5X4NI91db1neR5u9srDSf6iPAXXmY5l8i4mfAd0g9IcuA4Xl4\nIpI2z/VTdCGwKCKurReM0hyuK+vtK7gd+GT13HXKey6wt6RRef+mkt6c983OsX6cNJyxVf5XojjX\naVtJM5tmlkaQnovJwI9znF3OU+NxYI+8fShdG1jNnqPqvjuAIwpzurbKMbTrBuBf8vZJwPU1+08E\nbnXjaj3kOSHl5HorJ9dbKZVxLo+Vs9560sB6GPi0pCWkSfI/zOnVIWErSCuYVYCFpJ6IG4HtgIqk\nhaQhaGfm404ETpe0CJgDDOtF/L8B6i5tnYf0nUdq5C0ELirGW8yaf08D9szxnAAsrZOn3vFV55AW\nAlmstKT92bUZJO0u6fIm91M1D7iUNBzxtxFxXfHaEbEQuII0fO1XwOV56No7SHO/FgLfAM6NiJdJ\njbRLJd0PzABeV3O9LwD7Ky1ycZ+kg2r2jwD+t0XMPwaeAhbn6x9bE/P/kN74X5XL+B7SvD4iohO4\nCfhQ/t00Pw3qQGmxjuF10g+T9BRpmOJNWrus/zZ07YmqpwNYJOk+4Cjgu81iAP4vMC6XwV7Anwv7\nmvVEVstpKemDiBn5vmcA9e6p7r2SGssfkLSMNNzwgpr9WwF90ctqZmZmZnVoIH3Ddk/l+U5DI+LM\nlpmt1/IiDj+NiAf7O5a+pLTS4RMRcVN/x/JqyYtmLI6IHzXJE0x49WIyM7P11AQo8/tMs1YkERHd\nRiiVvYE1itST82LNd2GZWQ1Js0nzBk+IiGea5CvvHwUzMxswhm03jBVPv+KFfc0GrPWygWVmfU9S\n+O9C+VQqlQG10pK1x/VWTq63cnK9ldNArrdGDazefPeUmZmZmZmZ1eEeLDPrwj1YZmZmZq25B8vM\nzMzMzGwdcwPLzGw9UMbvCTHXW1m53srJ9VZOZaw3N7DMzMzMzMz6iOdgmVkXnoNlZmZm1lqjOVgb\n9EcwZjawSd3+VlgfGDZsJCtWPN7fYZiZmdk65CGCZlZH+Gcd/Dz77BM9qoWeKOMYdXO9lZXrrZxc\nb+VUxnpr2sCSNFLSAw32zZI0Zt2E1ZykEZLuk3RrIW15f8TSiKRxkqa0kW9AxV3UTmyt8kgaL+mM\nvouq6zklTZE0tkX+LSVNl7RI0lxJO7dxjVmSRrTY36PnX9IRkpZIuiO/vkrS/ZI+m+/j8BbHt3Ov\nx+X7XCTpbkm7FPZdJOkhSeN6EreZmZmZta+dHqyBOBnjMGBGRHy4kDYQ42wnpoEYd1XZ46/6KrAw\nInYFTgK+109xnAKcGhH7SRoO7BERoyNiUh9e4zFgbL7Xc4HLqzsi4gvA2cDH+vB6NkAM1G+5t+Zc\nb+Xkeisn11s5lbHe2mlgbShpav7k/RpJG9dmkHSspMX554KcNih/4r44f5r+2Zw+StLt+ZP7eyXt\n2Iu4twSeq0l7vhDPifmaCyVdmdOmSJokaY6kR6u9BZI2kzQzx7JI0iE5faSkpfm4ZZKmSfpAPn6Z\npD1yvk0lTc49IwskHZzD+Bvwxzbu5fl8nuGSZueeucWS9s7pqySdm8vrHklvyOkHFa45o5A+XtJP\nct5lkk7N6ePy+W+S9LCky5ScLOnfC2V3qqSLasu0VfyNyr1I0j9KulXS/BzLWyRtIenxQp5NJT0p\naXC9/HWuv5JU1s3sDNwJEBHLgB2q5dXE74HVjZ7j7ChJv87lWa2vkyRdUrifGyWNlfR1YB9gsqRv\nAbcB2+X63qemnMZIquT7vlXSsHbvNSLmRkT1uZsLbFeTZQXp34+ZmZmZrQsR0fAHGAl0Anvl15OB\nM/L2LGAMsA3wBLA1qcF2B3BI3jejcK4t8u+5wCF5eyNg42YxNIhrIvC5Bvt2Bh4Gtsqvt8y/pwBX\n5+23Ao/k7cHA5nl7aCF9JOnN7M759b3A5Lx9CDA9b58HHJe3hwDLgE1qYtoduLzFPZ0BfCVvC9gs\nb3cCB+TtC4GvVq9VOPYU4Nt5ezywMJftUOBJYDgwDvjffF8CZgCHA5sBjwKD8/FzgLf1ok4alfv4\nwjMzExiVt98J3JG3fwGMy9tHVcuqSf4156zzXBxUJ/084KLCef4G7NbmfTV6jmcVyvzDwO15+yTg\ne4X8N5J6lKrH7FZ4vhYX8k3J9bFBroOhhfKY3O691uT5Yu1zB7wXuKnFcQHhn3XyQ6wrs2bNWmfn\ntnXH9VZOrrdycr2V00Cut/z/erf3Uu2sIvhkRMzN21OBzwAXF/bvCcyKiD8ASJoGjCUNT9pR0iTg\nFmCGpM2BbSPiBlJErXoeupEkYNccSz3vA66NiBfyNVYW9l2X05ZKemP1lMD5SnNbOoFtC/uWR8SS\nvP0Q6Q0/wAPADnl7f+BgSV/KrzcCRpAaWuTrLQA+0eLW5pN6NzYEro+IRTn9pYi4JW8vAN6ft7eX\ndA2pgbshsLxwrutz2f5e0p2kRsUfgXkR8QSk+T/APhExXWlO0EGSHgY2iIiHWsRaT7NyR9JmwHuA\na3MdkuMGuAY4GpgNHAN8v0X+uiJifINdFwCTJN1HqruFwOo27+sxap7jwr7p+fcCUoOpHa2W59sJ\neDtwe77vQcDvajM1udd0EWlf4GRSr1nRM8BbJL0uIl5qfIYJhe2O/GNmZmb22lWpVNpadKOdBla0\neA113jRGxEpJuwIfBD4FHAl8rl7eLieSTgM+nq9zQESsKOwbRHrD+xJwcxux1yq+oazGcTzwelLP\nQqfSog0b18nfWXjdydqyE/DRiHikF/GsERF35UbegcAVki6KiKnAy4VsqwvXvQT4TkTcrLRoQfEN\nd7GORP06K+abTJqn9DCpJ2VdGAS8EBH1Foa4AThP0lakHqM7gc2b5O+RiFhFYd5RruPH2jy23nN8\nat5dfR6K9fJ3ug697TaktgUBD0bE3j08bu0J0sIWlwMfqjZ4qyLiMUlLgSck7de4MT2ht5e3flLG\nMermeisr11s5ud7KaSDVW0dHR5d4Jk6cWDdfO3OwRkp6V94+DrirZv88YKykrSUNBo4FZksaShp2\n9gvga8CYiHgReErSoQCSNpK0SfFkEXFZROwWEWOKjau8rzMidiAN1zu6Qbx3AkdK2jpfY6sG+aoN\nrCHAc7lxtS9deyLa+TKg24DT1xwgjW7jmO7BpBXrnouIycCPSQ2NZjFswdqejZNq9h2ay3YoaWjg\n/Jy+p9LcskGk8rsbICLmAduT6u6qBvEtbXELTcs9N3KWSzqicM5d8r4/k+p0Emn4WjTL31OShuSe\nQSR9HJidn0WU5t9t0+TYbs9xo6z59+PA6Dy/bXtS72HD09dJWwa8QdJe+fobqI1VDwvxjgD+C/jn\niPhtnf27ADuSepJ701NpZmZmZk2008B6GPi0pCWkyfE/zOlpskZqBJ0JVEhDr+ZHxI2kyfUVSQuB\nn+Y8ACcCp0taRJprUp3A3xO/Ic356iYP6TuP1MhbCFQXbGjUEzeN1PBYBJwALK2Tp97xVeeQFgJZ\nrLSk/dm1GSTtLuny7od20QEsysPYjgK+2+K6E4H/lDSf7otRLCbVxz3A2YWG6r3ApaThjr/NjYaq\na4A5sXaBhGL8Q1vE3qzci04ATlFasONB0ly2qqtJvYk/L6Qd3yR/N5ImSjqozq63Ag/mRuIHgeqC\nKwJGAX9octpGz3Hd5yki5pAaWQ+R6nBBbZ4Gr6vHvwwcAVwo6X7Sv6l39+Bev076t3GZ0mIj82r2\nbwU8HhGddY61Eivj94SY662sXG/l5HorpzLWm9L8rHLJ852GRsSZLTO/xkgaD6yKiItr0scBX4iI\nuo0USTcCF0fErDr7DgR2jIhL10XM/UXS24CTI+KL/R3Lq0XSUcBHIuLYJnmicbveXhmxrv7mViqV\nATWMwtrjeisn11s5ud7KaSDXmyQiotuIpLI2sEYBVwAvRtfvwnrN62kDS9IQ0jDPhRFxzKsXqb3a\nlJbffy9ptco7muRzA2udWXcNLDMzM3t1rVcNLDNbd1IDy9aFYcNGsmLF4/0dhpmZmfWBRg2sduZg\nmdlrTL3vdPDPK/9Zl42rMo5RN9dbWbneysn1Vk5lrDc3sMzMzMzMzPqIhwiaWReSwn8XzMzMzJrz\nEEEzMzMzM7N1zA0sM7P1QBnHqJvrraxcb+XkeiunMtabG1hmZmZmZmZ9xHOwzKwLz8EyMzMza63R\nHKwN+iMYMxvYpG5/K8z61LDthrHi6RX9HYaZmVmfcw+WmXUhKZjQ31FYjy0HduzvIHpgQvq+tde6\nSqVCR0dHf4dhPeR6KyfXWzkN5Hrr1SqCkkZKeqDBvlmSxvRVgD0haYSk+yTdWkhb3h+xNCJpnKQp\nbeQbUHEXtRNbqzySxks6o++i6npOSVMkjW2Rf0tJ0yUtkjRX0s5tXGOWpBEt9vfo+Zd0hKQlku7I\nr6+SdL+kz+b7OLzF8S3vNef7nqRH8rlHF9IvkvSQpHE9idvMzMzM2tfOIhcD8SPGw4AZEfHhQtpA\njLOdmAZi3FVlj7/qq8DCiNgVOAn4Xj/FcQpwakTsJ2k4sEdEjI6ISX11AUkfBkZFxJuBTwI/rO6L\niC8AZwMf66vr2QBSpt4rW2Ogfiprzbneysn1Vk5lrLd2GlgbSpqaP3m/RtLGtRkkHStpcf65IKcN\nyp+4L849B5/N6aMk3Z4/Xb9XUm/eFmwJPFeT9nwhnhPzNRdKujKnTZE0SdIcSY9WewskbSZpZo5l\nkaRDcvpISUvzccskTZP0gXz8Mkl75HybSpqce0YWSDo4h/E34I9t3Mvz+TzDJc3OPXOLJe2d01dJ\nOjeX1z2S3pDTDypcc0Yhfbykn+S8yySdmtPH5fPfJOlhSZcpOVnSvxfK7lRJF9WWaav4G5V7kaR/\nlHSrpPk5lrdI2kLS44U8m0p6UtLgevnrXH8lqayb2Rm4EyAilgE7VMurid8Dqxs9x9lRkn6dy7Na\nXydJuqRwPzdKGivp68A+wGRJ3wJuA7bL9b1PTTmNkVTJ932rpGE9uNdDgZ/ke/01MKRwPMAK0r8f\nMzMzM1sH2mlg7QRcGhE7A6uA04o7JW0DXAB0AKOBPXMjZTSwXUTsknsOqsPlpgGXRMRo4D3A/+tF\n3IOBzmJCRLwrx7MzqceiIyJ2A4pviIdHxN7AwcCFOe2vwGERsQfwPuCiQv5RwLcjYqdcDsfk47+U\nrwFwFnBHROyVj/+OpE0i4lcR8fkc0+6SLq93I9W4geOAX0bEGGBX4P6cvhlwTy6vu4CP5/S7ImKv\niNgduBr4cuG07yDVx3uAbyj1lgDsCXwaeCvwT8BHgGuAgyUNznlOBv6jJraG2iz3qsuBf42IPUll\n+IOI+BOwUGuHrR2Uy2F1vfx1rv/5iJibY5go6aA6110EVBvU7wRGAG9qcV9HRMQzNH6OAQbn+/88\ndJm11K1XLyLOAe4FjouILwOHAI9GxJiIuLuaT9IGwCXAR/N9TwG+2YN73Q54qvD6mZxW1Un692Pr\nmwE72NiaKeP3u5jrraxcb+VUxnprZxXBJ6tv6oCpwGeAiwv79wRmRcQfACRNA8YC5wI7SpoE3ALM\nkLQ5sG1E3AAQEa0+je9GkkgNkKkNsrwPuDYiXsjXWFnYd11OWyrpjdVTAucrzW3pBLYt7FseEUvy\n9kPAzLz9ALBD3t6f1ED5Un69EekN/LLqRSNiAfCJFrc2n9S7sSFwfUQsyukvRcQteXsB8P68vb2k\na4BtgA3p+vbq+ly2v5d0J/BOUm/avIh4AtL8H2CfiJiuNCfoIEkPAxtExEMtYq2nWbkjaTNSg+/a\nXIfkuCE18o4GZgPHAN9vkb+uiBjfYNcFwCRJ95HqbiGwus37eoya57iwb3r+vQAY2eb5Wi3PtxPw\nduD2fN+DgN/VZmpyr608A7xF0usi4qWGuWYVtnfAw8/MzMzsNa9SqbTV4GungVX7aXy9OTfd3jRG\nxEpJuwIfBD4FHAl8rl7eLieSTiP10gRwQESsKOwbRHrD+xJwcxux1yq+oazGcTzwemC3iOhUWrRh\n4zr5OwuvO1lbdiL1NjzSi3jWiIi7ciPvQOAKSRdFxFTg5UK21YXrXgJ8JyJuzr0/xTfcxToSjedJ\nVdMnk3qfHqZrD01fGgS8kHvoat0AnCdpK2AMaTjf5k3y90hErKIw7yjX8WNtHlvvOT41764+D8V6\n+Ttde4a7DaltQcCDuae0N54Bti+8flNOAyAiHpO0FHhC0n4NG9P79vLq1n/cCC6lMs4tMNdbWbne\nymkg1VtHR0eXeCZOnFg3XztDBEdKKg5ju6tm/zxgrKSt8zCzY4HZkoaShlD9AvgaMCYiXgSeknQo\ngKSNJG1SPFlEXBYRu+WhUytq9nVGxA6koVZHN4j3TuBISVvna2zVIF+1gTUEeC43rvala09EO18G\ndBtw+poDCqu29YTSinXPRcRk4MekhkazGLZgbc/GSTX7Ds1lOxQYR+odgzR8c2RuqB4N3A0QEfNI\nb8qPBa5qEN/SFrfQtNxzI2e5pCMK59wl7/szqU4nATdF0jB/T0kaknsGkfRxYHZ+FlGaf7dNk2O7\nPceNsubfjwOjlWxP6j1sePo6acuAN0jaK19/A7Wx6mHBDcCJ+di9gJUR8WzhfnYhvRXftpc9lWZm\nZmbWRDsNrIeBT0taQpocX12VLAByI+hMoEIaejU/Im4kzfuoSFoI/DTngfTm73RJi4A5QHECfrt+\nA2xdb0ce0nceqZG3kLVzqhr1xE0jNTwWAScAS+vkqXd81TmkhUAWKy1pf3ZthmZzsAo6gEV5GNtR\nwHdbXHci8J+S5tN9MYrFpPq4Bzi70FC9F7iUNNzxt7nRUHUNMCciui3MkRsZTTUp96ITgFOUFux4\nkDQPqepqUm/izwtpxzfJ302TeUlvBR7MjcQPkueH5SF4o4A/NDlto+e47vMUEXNIjayHSHW4oDZP\ng9fV418GjgAulHQ/6d/Uu9u91zycdLmkR4EfUTNnEtgKeDwiOmuPtZLzHKxSKuPcAnO9lZXrrZzK\nWG+l/KLhPN9paESc2TLza4yk8cCqiLi4Jn0c8IWIqNtIkXQjcHFEzKqz70Bgx4i4dF3E3F8kvQ04\nOSK+2N+xvFokHQV8JCKObZLHXzRcRv6i4VIayF+gaY253srJ9VZOA7ne1OCLhsvawBoFXAG8WPNd\nWK95PW1gSRpCGua5MCKOefUitVeb0vL77wW+EhF3NMnnBpatexPcwDIzs3JbrxpYZrbuSPIfBVvn\nhm03jBVPr2id0czMbIBq1MBqZw6Wmb3GRIR/SvYza9asfo+hJz9uXCVlnFtgrreycr2VUxnrzQ0s\nMzMzMzOzPuIhgmbWhaTw3wUzMzOz5jxE0MzMzMzMbB1zA8vMbD1QxjHq5norK9dbObneyqmM9eYG\nlpmZmZmZWR/xHCwz68JzsMzMzMxaazQHa4P+CMbMBjap298KMzMzs/XesGEjWbHi8Vd0DvdgmVkX\n6YuG/XehfCpARz/HYD1XwfVWRhVcb2VUwfVWRhVe3XoT7baPerWKoKSRkh5osG+WpDFtXb2PSRoh\n6T5JtxbSlvdHLI1IGidpShv5BlTcRe3E1iqPpPGSzui7qLqeU9IUSWPbOOZ7kh6RdL+k0W3knyVp\nRIv9PXr+JR0haYmkO/Lrq3I8n833cXiL41veq6Tjupj/5QAAIABJREFUJC3KP3dL2qWw7yJJD0ka\n15O4zczMzKx97QwRHIgfZR8GzIiIMwtpAzHOdmIaiHFXlT1+ACR9GBgVEW+W9C7gh8Be/RDKKcCp\nEXGPpOHAHhHx5hxjy8Z4mx4DxkbEHyV9CLicfK8R8QVJ84CPAbP76Ho2YHT0dwDWKx39HYD1Skd/\nB2C90tHfAVivdPR3AD3WziqCG0qamj95v0bSxrUZJB0raXH+uSCnDcqfuC/On6Z/NqePknR7/uT+\nXkk79iLuLYHnatKeL8RzYr7mQklX5rQpkiZJmiPp0WpvgaTNJM3MsSySdEhOHylpaT5umaRpkj6Q\nj18maY+cb1NJkyXNlbRA0sE5jL8Bf2zjXp7P5xkuaXbumVssae+cvkrSubm87pH0hpx+UOGaMwrp\n4yX9JOddJunUnD4un/8mSQ9LukzJyZL+vVB2p0q6qLZMW8XfqNyLJP2jpFslzc+xvEXSFpIeL+TZ\nVNKTkgbXy1/n+itJZd3MocBPACLi18AQScNaHPN7YHWj5zg7StKvc3lW6+skSZcU7udGSWMlfR3Y\nB5gs6VvAbcB2ub73qSmnMZIq+b5vLcTa8l4jYm5EVJ+7ucB2NVlWkP79mJmZmdm6EBENf4CRQCew\nV349GTgjb88CxgDbAE8AW5MabHcAh+R9Mwrn2iL/ngsckrc3AjZuFkODuCYCn2uwb2fgYWCr/HrL\n/HsKcHXefivwSN4eDGyet4cW0keS3szunF/fC0zO24cA0/P2ecBxeXsIsAzYpCam3YHLW9zTGcBX\n8raAzfJ2J3BA3r4Q+Gr1WoVjTwG+nbfHAwtz2Q4FngSGA+OA/833JWAGcDiwGfAoMDgfPwd4Wy/q\npFG5jy88MzNJPUkA7wTuyNu/AMbl7aOqZdUk/5pz1nkuDqqTfiPwnsLrmcCYNu+r0XM8q1DmHwZu\nz9snAd+rufbYwjG7FZ6vxYV8U3J9bJDrYGihPCa3e681eb5Y+9wB7wVuanFcQPindD+zBkAM/nG9\nvVZ+XG/l/HG9lfPn1a43ol05L7U/7QwRfDIi5ubtqcBngIsL+/cEZkXEHwAkTQPGAucCO0qaBNwC\nzJC0ObBtRNxAiqhVz0M3kgTsmmOp533AtRHxQr7GysK+63LaUklvrJ4SOF9pbksnsG1h3/KIWJK3\nHyK9MQd4ANghb+8PHCzpS/n1RsAIUkOLfL0FwCda3Np8Uu/GhsD1EbEop78UEbfk7QXA+/P29pKu\nITVwNwSWF851fS7b30u6k9Q4+SMwLyKegDT/B9gnIqYrzQk6SNLDwAYR8VCLWOtpVu5I2gx4D3Bt\nrkNy3ADXAEeThq0dA3y/Rf66ImJ8L+Ju5TFqnuPCvun59wJSg6kdrZbn2wl4O3B7vu9BwO9qM7W6\nV0n7AieTes2KngHeIul1EfFS4zNMKGx3UMbueTMzM7O+VKlU2vri497Mwap9DXXeNEbESkm7Ah8E\nPgUcCXyuXt4uJ5JOAz6er3NARKwo7BtEesP7EnBzG7HXKr6hrMZxPPB6Us9Cp9KiDRvXyd9ZeN3J\n2rIT8NGIeKQX8awREXflRt6BwBWSLoqIqcDLhWyrC9e9BPhORNystGhB8Q13sY5E/Tor5psMfJXU\nAzXlldxHE4OAFyKi3sIQNwDnSdqK1GN0J7B5k/w99QywfeH1m3JaSw2e41Pz7urzUKyXv9N16G23\nIbUtCHgwIvbu4XFrT5AWtrgc+FC1wVsVEY9JWgo8IWm/xo3pCb29vPWbjv4OwHqlo78DsF7p6O8A\nrFc6+jsA65WO/g5gjY6ODjo6Ota8njhxYt187czBGqm0MADAccBdNfvnAWMlbS1pMHAsMFvSUNKw\ns18AXyMNyXoReErSoQCSNpK0SfFkEXFZROwWEWOKjau8rzMidiAN1zu6Qbx3AkdK2jpfY6sG+aoN\nrCHAc7lxtS9deyLa+TKg24DT1xzQxgp1dYNJK9Y9FxGTgR+TGhrNYtiCtT0bJ9XsOzSX7VDS0MD5\nOX1Ppbllg0jldzdARMwjNUCOBa5qEN/SFrfQtNwjYhWwXNIRhXPukvf9mVSnk0jD16JZ/l64ATgx\nn2MvYGVEPJtfz5S0TaMD6z3HjbLm348Do/P8tu1JvYcNT18nbRnwhhwnkjaQtHOTc9TGOwL4L+Cf\nI+K3dfbvAuxI6knuTU+lmZmZmTXRTgPrYeDTkpaQJsf/MKcHQG4EnUlapH4hMD8ibiRNrq9IWgj8\nNOeB9Eb3dEmLSHNNWi02UM9vSHO+uslD+s4jNfIWAtUFGxr1xE0jNTwWAScAS+vkqXd81TmkhUAW\nKy1pf3ZtBkm7S7q8yf1Aap4vknQfad7Nd1tcdyLwn5Lm030xisWk+rgHOLvQUL0XuJQ03PG3udFQ\ndQ0wJ9YukFCMf2iL2JuVe9EJwClKC3Y8SJrLVnU1qTfx54W045vk70bSREkH1YntFlJj7VHgR8Bp\nOb+AUcAfmpy20XNc93mKiDmkRtZDpDpcUJunwevq8S8DRwAXSrqf9G/q3e3eK/B10r+Ny5QWG5lX\ns38r4PGI6KxzrJVapb8DsF6p9HcA1iuV/g7AeqXS3wFYr1T6O4AeK+UXDef5TkOj6zLtRlpFEFgV\nERfXpI8DvhARdRspkm4ELo6IWXX2HQjsGBGXrouY+4uktwEnR8QX+zuWV4uko4CPRMSxTfJE43a9\nDVwVBtIwCmtXBddbGVVwvZVRBddbGVUo2xcNl7WBNQq4AngxIj7cz+EMKD1tYEkaQhrmuTAijnn1\nIrVXm9Ly++8lrVZ5R5N8bmCZmZnZa9RrtIFlZuuOG1hmZmb22vXKG1jtrCJoZq857azvYmZmZrZ+\nGTas3W/eacwNLDPrxj3b5VOpVLosHWvl4HorJ9dbObneyqmM9eYhgmbWhaTw3wUzMzOz5hoNEWxn\nmXYzMzMzMzNrgxtYZmbrgUql0t8hWC+43srJ9VZOrrdyKmO9uYFlZmZmZmbWRzwHy8y68BwsMzMz\ns9Y8B8vMzMzMzGwd8zLtZtaN5O/BMjMzs4Fj2HbDWPH0iv4Ooy0eImhmXUgKJvR3FNZjy4Ed+zsI\n6zHXWzm53srJ9VZO1XqbMPC+p7NXQwQljZT0QIN9sySN6asAe0LSCEn3Sbq1kLa8P2JpRNI4SVPa\nyDeg4i5qJ7ZWeSSNl3RG30XV9ZySpkga28Yx35P0iKT7JY1uI/8sSSNa7O/R8y/pCElLJN2RX1+V\n4/lsvo/DWxz/iu5V0kWSHpI0ridxW0n4TUM5ud7KyfVWTq63ciphvbUzB2tgNRWTw4AZEfHhQtpA\njLOdmAZi3FVljx8ASR8GRkXEm4FPAj/sp1BOAU6NiP0kDQf2iIjRETGpry7Q7F4j4gvA2cDH+up6\nZmZmZtZVOw2sDSVNzZ+8XyNp49oMko6VtDj/XJDTBuVP3BdLWiTpszl9lKTb86fr90rqTbt0S+C5\nmrTnC/GcmK+5UNKVOW2KpEmS5kh6tNpbIGkzSTNzLIskHZLTR0pamo9bJmmapA/k45dJ2iPn21TS\nZElzJS2QdHAO42/AH9u4l+fzeYZLmp175hZL2junr5J0bi6veyS9IacfVLjmjEL6eEk/yXmXSTo1\np4/L579J0sOSLlNysqR/L5TdqZIuqi3TVvE3KvciSf8o6VZJ83Msb5G0haTHC3k2lfSkpMH18te5\n/kpSWTdzKPATgIj4NTBE0rAWx/weWN3oOc6OkvTrXJ7V+jpJ0iWF+7lR0lhJXwf2ASZL+hZwG7Bd\nru99asppjKRKvu9bC7H2xb2uIP37sfXNgO0Lt6Zcb+Xkeisn11s5lbDe2mlg7QRcGhE7A6uA04o7\nJW0DXAB0AKOBPXMjZTSwXUTsEhG7AtXhctOASyJiNPAe4P/1Iu7BQGcxISLelePZGfgq0BERuwHF\nN8TDI2Jv4GDgwpz2V+CwiNgDeB9wUSH/KODbEbFTLodj8vFfytcAOAu4IyL2ysd/R9ImEfGriPh8\njml3SZfXu5Fq3MBxwC8jYgywK3B/Tt8MuCeX113Ax3P6XRGxV0TsDlwNfLlw2neQ6uM9wDeUeksA\n9gQ+DbwV+CfgI8A1wMGSBuc8JwP/URNbQ22We9XlwL9GxJ6kMvxBRPwJWKi1w9YOyuWwul7+Otf/\nfETMzTFMlHRQnetuBzxVeP1MTmt2X0dExDM0fo4BBuf7/zx0mbXUrVcvIs4B7gWOi4gvA4cAj0bE\nmIi4u5pP0gbAJcBH831PAb7Zh/faSfr3Y2ZmZmbrQDurCD5ZfVMHTAU+A1xc2L8nMCsi/gAgaRow\nFjgX2FHSJOAWYIakzYFtI+IGgIho9Wl8N5JEaoBMbZDlfcC1EfFCvsbKwr7rctpSSW+snhI4X2lu\nSyewbWHf8ohYkrcfAmbm7QeAHfL2/qQGypfy642AEcCy6kUjYgHwiRa3Np/Uu7EhcH1ELMrpL0XE\nLXl7AfD+vL29pGuAbYAN6dq+vz6X7e8l3Qm8k9SbNi8inoA0/wfYJyKmK80JOkjSw8AGEfFQi1jr\naVbuSNqM1OC7NtchOW5IjbyjgdnAMcD3W+SvKyLG9yLuVh6j5jku7Juefy8ARrZ5vlbL8+0EvB24\nPd/3IOB3tZlewb0+A7xF0usi4qWGuWYVtneglOOfX3NcR+Xkeisn11s5ud7KaQDVW6VSoVKptMzX\nTgOr9tP4enNuur1pjIiVknYFPgh8CjgS+Fy9vF1OJJ1G6qUJ4ICIWFHYN4j0hvcl4OY2Yq9VfENZ\njeN44PXAbhHRqbRow8Z18ncWXneytuxE6m14pBfxrBERd+VG3oHAFZIuioipwMuFbKsL170E+E5E\n3Jx7f4pvuIt1JBrPk6qmTyb1Pj1M1x6avjQIeCH30NW6AThP0lbAGOBOYPMm+XvqGWD7wus35bSW\nGjzHp+bd1eehWC9/p2vPcLchtS0IeDD3lPZG03uNiMckLQWekLRfw8b0vr28upmZmdl6qqOjg46O\njjWvJ06cWDdfO0MER0oqDmO7q2b/PGCspK3zMLNjgdmShpKGUP0C+BowJiJeBJ6SdCiApI0kbVI8\nWURcFhG75aFTK2r2dUbEDqShVkc3iPdO4EhJW+drbNUgX7WBNQR4Ljeu9qVrT0Q7XwZ0G3D6mgPa\nWKGubjBpxbrnImIy8GNSQ6NZDFuwtmfjpJp9h+ayHQqMI/WOQRq+OTI3VI8G7gaIiHmkN+XHAlc1\niG9pi1toWu4RsQpYLumIwjl3yfv+TKrTScBNkTTM3ws3ACfmc+wFrIyIZ/PrmXmYa131nuNGWfPv\nx4HRSrYn9R42PH2dtGXAG3KcSNogD79sV8N7zWm7kD4L2raXPZU2UJVwjLrheisr11s5ud7KqYT1\n1k4D62Hg05KWkCbHV1clC4DcCDoTqAALgfkRcSNp3kdF0kLgpzkPpDd/p0taBMwBWi02UM9vgK3r\n7chD+s4jNfIWsnZOVaOeuGmkhsci4ARgaZ089Y6vOoe0EMhipSXtz67N0GwOVkEHsEjSfcBRwHdb\nXHci8J+S5tN9MYrFpPq4Bzi70FC9F7iUNNzxt7nRUHUNMCciui3MkRsZTTUp96ITgFOUFux4kDQP\nqepqUm/izwtpxzfJ302jeUl5iOVySY8CPyLPI8xD8EYBf2hy2kbPcd3nKSLmkBpZD5HqcEFtngav\nq8e/DBwBXCjpftK/qXe/0nst2Ap4PCI6a481MzMzs1eulF80nOc7DY2IM1tmfo2RNB5YFREX16SP\nA74QEXUbKZJuBC6OiFl19h0I7BgRl66LmPuLpLcBJ0fEF/s7lleLpKOAj0TEsU3y+IuGzczMbGCZ\nsJ580fAANh3YW4UvGrbekTRE0jLgz/UaVwARcfP61rgCiIiHXmONq4uAL5KGoJqZmZnZOlDKHiwz\nW3ck+Y+CmZmZDSjDthvGiqdXtM74KmrUg9XOKoJm9hrjD17Kp1KpdFnZyMrB9VZOrrdycr2VUxnr\nzT1YZtaFpPDfBTMzM7Pm1rc5WGZmZmZmZgOOG1hmZuuBdr5Z3gYe11s5ud7KyfVWTmWsNzewzMzM\nzMzM+ojnYJlZF56DZWZmZtaa52CZmZmZmZmtY25gmVk3kgbEz/A3De/voiiNMo5RN9dbWbneysn1\nVk5lrDd/D5aZdTehvwNInp3wbH+HYGZmZtYjnoNl/U7Sqoj4h/6OA0DSGOBKYF5EnJLTlkfEjv0Q\ny67AthFxa359ErBDRExscdytwF7AXRFxSCH9WGA88KOI+Pcmx8dAaWAxwV96bGZmZgOT52DZQDaQ\n3kGfAHy/2rjKehSfpL76dzUaOKAmrZ1YvkW6j64HRlwFjAM+/8pDMzMzM7N63MCyAUPSREkLJd0n\n6WlJkyWNlLRU0hRJyyRNk/QBSXPy6z3ysXtKukfSAkl3S3pzL8PYEniuJu35fI1xkmZLuknSw5Iu\nK8S+StJ3JC0E9pI0RlJF0nxJt0oalvOdLukhSfdL+llO2zTf69wc/8GSNgTOBo7K5XEk8L/Ai61u\nICJmNcoXEc8CQ3pcKjbglXGMurneysr1Vk6ut3IqY715DpYNGBExHhgvaQjw38Aledco4KMRsUTS\nvcAxEbG3pEOAs4CPAEuBfSKiU9J+wPnAEb0IYzDQWRPXuwov9wTeCjwJ3Cbp8IiYDmwG/Coivihp\nA2A2cEhE/F7SUcA3gVOAfyMN83tZ0hb5nGcBd0TEKfne5wEzgW8Au0fE6bVBSjo475vQi3v0Bytm\nZmZm64gbWDYQTQUuioj7JY0ElkfEkrzvIVLjA+ABYGTe3hL4Se65CnrxbOeG0dtY27CrZ15EPJHz\nXwXsA0wHVuffADsBbwdulyRSg+Z3ed8i4GeSrgOuy2n7AwdL+lJ+vREwolmsEXEjcGP7d9fFHySN\niojfNswxq7C9A/Cqz0Cznuro6OjvEKwXXG/l5HorJ9dbOQ2keqtUKm31qLmBZQOKpAnAkxHxk0Ly\nS4XtzsLrTtY+w+cAd0bE4blRVmwiVM99LnAgEBExpmbfm0g9R49GxL1NQqydA1V9/ZfCt/MKeDAi\n9q5z/IHAWOAQ4CxJ78j5PxoRj9TEtFeTOF6JScD9kj4TEVfUzbHvOrqymZmZWUl1dHR0afBNnFh/\n3TEPFbKBQLBm2Nv7gc/W29/CEOCZvH1yvQwR8bWI2K22cZX3PQ1sl8JQR5PrvDPPCxsEHA3cVSfG\nZcAbqg0kSRtI2jnvGxERs4EzgS1IQwtvA9YMA5Q0Om+uynl6QzQut68C/9SwcWWlVMYx6uZ6KyvX\nWzm53sqpjPXmBpYNBNWen88D2wLz88IOE2r2124XfQu4QNICevlc5x6oR4Gtm2S7F7iUNFTxtxFR\nHea3Jq6IeJk0/+tCSfcDC4F35yGIUyUtAhYAkyLiT6Tetw0lLZb0AGlxC0i9cDsXFrlYIy+EMaFe\ngJL+G7gaeJ+kJyV9oCbLRnmxCzMzMzPrY/4eLLMCSd8HHoiIH9bZNw74QvG7pcpG0huBRRGxTZM8\n/h4sMzMzsxb8PVhm7fkJcLKkyf0dSF/LXzQ8g9TbZ2ZmZmbrgHuwzKwLSQPmj8Kw7Yax4ukV/R1G\nKVQqlQG10pK1x/VWTq63cnK9ldNArrdGPVheRdDMuvEHL2ZmZma94x4sM+tCUvjvgpmZmVlznoNl\nZmZmZma2jrmBZWa2Hijj94SY662sXG/l5HorpzLWmxtYZmZmZmZmfcRzsMysC8/BMjMzM2vNc7DM\nzMzMzMzWMTewzKwbSa/oZ/ibhvf3LbzmlHGMurneysr1Vk6ut3IqY735e7DMrLsJr+zwZyc82ydh\nmJmZmZWN52CZ9RNJI4GbIuIdbeb/FnAw8BLwW+DkiPhTD663EzAFGAN8NSIubpAvXmkDiwn+smIz\nMzNbv3kOltnA1JNWyAzgbRExGngE+EoPr/V74DPAt3t4nJmZmZm1yQ0ss/61oaSpkpZIukbSxpJ2\nl7RQ0n2SFktaDRARMyOiMx83F3hTTy4UEf8TEQuAv/fxPdgAUMYx6uZ6KyvXWzm53sqpjPXmBpZZ\n/9oJuDQidgZWAadFxIKI2C0ixgC/pH6P08eAW1/FOM3MzMysDZ6DZdZP8hys2RGxQ369L/CZiDg8\nvz4aOBXYv/jFVJLOAsZExEd7ed3xwKqmc7DGFRJ2AHbs4UUmeA6WmZmZrV8qlUqXHrWJEyfWnYPl\nVQTN+ldtKyQAJL0d+Abw3prG1b8ABwDvq3cySecCBwKRe8B6Z99eH2lmZv+/vTsPsqws7zj+/SGF\nCwIxUQczCGjU4IYM4qCBShor4BYQNSoGRVyyQQIVxXJLdKbKqMQykaASFzIhmIFCRUXKEhBoE1QE\nHAaQTRIWlzhoBQiSKBF48sd9G05PLzPT0829p/v7qZq657xne24/dXv6ue/7niNpURobG2NsbOz+\n9dWrV0+7n0MEpeHaLcm+bfkPgIuS7ASsBY6oqtsmdkzyQuBtwCFVdfd0J6uqv+wML5zNlG9b1G99\nHKMu89ZX5q2fzFs/9TFv9mBJw3UdcHSSNcB3gZOAVwG7Ap9KEh7ojToR2A44b9DMxVV11OZeKMky\n4DJgB+C+JMcCT6uqu+bzDUmSJC1lzsGSNInPwZIkSdo0n4MlSZIkSQvMHixJkyTZ6l8Ky5YvY8MP\nN8xHONpM4+Pjkybeqh/MWz+Zt34yb/00ynmbqQfLOViSpvCLF0mSpLmxB0vSJEnK3wuSJEmzcw6W\nJEmSJC0wCyxJWgT6+JwQmbe+Mm/9ZN76qY95s8CSJEmSpHniHCxJkzgHS5IkadOcgyVJkiRJC8wC\nS9IUSRb9v5132XnYP+Z51ccx6jJvfWXe+sm89VMf8+ZzsCRNtWrYASy8W1fdOuwQJEnSIuQcLGnE\nJLkJeHZV3ZbkoqraP8nvAMdV1cFbcd6Tgd8Dbq2qPWfZr5ZCgcUqH6gsSZLmzjlYUn/c/1d/Ve0/\nXfscrQFesJXnkCRJ0iwssKQhSfLHSS5Psi7JjUnOn9jU2ednnUN2SnJ2kuuSfHxLr1dVFwG3b2XY\nGlF9HKMu89ZX5q2fzFs/9TFvFljSkFTVJ6pqBbAS+AHw4el26yw/BzgaeCrwpCQvX/goJUmStCW8\nyYU0fH8PXFBVX9nEfpdU1S0ASU4D9gfOXJCILuws7w48YUGuonk0NjY27BA0B+atn8xbP5m3fhql\nvI2Pj29Wj5oFljRESY4EHl9VR23G7hvPwZq0nmQl8InW/p6qOnvOgR0w5yMlSZIWpbGxsUkF3+rV\nq6fdzyGC0pAkeTbwVuC1s+3WWd43yW5JtgFeDVzU3bGqLqmqFVW19yzFVTY6pxaJPo5Rl3nrK/PW\nT+atn/qYNwssaXiOBh4FXNhudPHJ1t7tmeouXwJ8FLga+I+q+sKWXCzJWuCbwFOSfD/JG+YeuiRJ\nkqbjc7AkTeJzsCRJkjbN52BJkiRJ0gKzwJKkRaCPY9Rl3vrKvPWTeeunPubNuwhKmmrVsANYeMuW\nLxt2CJIkaRFyDpakSZKUvxckSZJm5xwsSZIkSVpgFliStAj0cYy6zFtfmbd+Mm/91Me8WWBJkiRJ\n0jxxDpakSZyDJUmStGnOwZIkSZKkBWaBJUmLQB/HqMu89ZV56yfz1k99zJvPwZI0RTKlt1uSJPXM\nsuXL2PDDDcMOY8lxDpakSZLUUnjQsCRJi94q8G/9heMcLGkzJLkvyYc6629N8p4hxbJbi+foTtuJ\nSY4YRjySJEnaNAssabK7gZcn+dVhB9L8BDg2icN5Nbubhh2A5sS89ZN56yfz1kt9nINlgSVNdg/w\nSeAtG29oPUrnJ1mf5Lwku7T2NUlOSPKNJP+e5OWdY45Lckk75r1ziOenwPnAkdPEs1eSb7Vzfz7J\nTq39wiQfTPLtJNcl2a+1b5Pkb1r7+iR/OId4JEmSNAsLLGmyAj4GHJ5kh422nQisqaq9gLVtfcLO\nVbUfcDBwPECSA4EnV9VKYAWwT5L95xDP8cBxmXrniVOAt7V4vgt0C7iHVNW+wF/A/TOq3gTc0dpX\nAn+UZLctjEej6gnDDkBzYt76ybz1k3nrpbGxsWGHsMUcdiRtpKruSnIKcCzw886m5wEva8un0gqp\n5ovt2GuTPLa1HQQcmGQdEGB74MnARVsYz81JLgYOn2hLsiOwU1VNnOsU4IzOYWe21+8AE0XUQcAz\nk7yyre/Y4rllykUv7Czvjv8pSZKkJW98fHyzhixaYEnTOwFYB6zptM12G567O8vpvH6gqj4100FJ\nDmXQ81TAm6tq3Qy7fgD4HDA+zXVmi+deHvicB/jzqjpvluMGDtjkHho1N2Eh3EfmrZ/MWz+Zt14a\nHx8fmV6ssbGxSbGsXr162v0cIihNFoCqup1Bj9CbOtu+CbymLb8W+LfZzgGcA7wxyfYASX49yWO6\nO1bVF6tqRVXtPUNxNRHP9cA1wCFt/U7gton5VcDrgK9vRjxHTdwwI8mTkzx8hmMkSZI0B/ZgSZN1\ne6k+DBzdaTsGWJPkOAY3n3jDNMfcv15V5yXZA/hWmz71MwaF2U/nGM9fM+hVm3Ak8A+tSLpxU/EA\nn2Yw4G9dm8/1E+DQLYhFo8xvZfvJvPWTeesn89ZLo9J7tSV80LCkSXzQsCRJi8QqHzS8kHzQsCQt\nZj7fpZ/MWz+Zt34yb73Ux+dgOURQ0lSrhh2AJEnaWsuWLxt2CFtt/fr1vRsmaIElaQqHE/TPqlWr\nWLVq1bDD0BYyb/1k3vrJvPXTHXfcMewQtphDBCVJkiRpnlhgSdIicPPNNw87BM2Beesn89ZP5q2f\n+pg37yIoaZIk/lKQJEnaDNPdRdACS5IkSZLmiUMEJUmSJGmeWGBJkiRJ0jyxwJIkSZKkeWKBJQmA\nJC9Mcl2S7yV5+7DjWeqSnJzk1iRXdtoeleTcJNcnOSfJTp1t70xyQ5JrkxzUad87yZUtrx95sN/H\nUpNklyQXJLk6yVVJjmnt5m6EJXlokm8nubxC8BmbAAAGy0lEQVTl7v2t3bz1QJJtkqxLclZbN28j\nLsnNSa5on7lLWtuiyZsFliSSbAN8FHgB8HTgNUn2GG5US94aBvnoegfwtar6TeAC4J0ASZ4GvAp4\nKvAi4ONJJu5qdBLwpqp6CvCUJBufU/PrHuAtVfV04HnA0e2zZO5GWFXdDRxQVSuAPYHnJ9kP89YX\nxwLXdNbN2+i7DxirqhVVtbK1LZq8WWBJAlgJ3FBVt1TVL4HTgZcOOaYlraouAm7fqPmlwClt+RTg\n0LZ8CHB6Vd1TVTcDNwArk+wM7FBVl7b9/rlzjBZAVW2oqvVt+S7gWmAXzN3Iq6r/bYsPZfD30e2Y\nt5GXZBfgxcCnO83mbfSFqXXIosmbBZYkgOXADzrrP2xtGi2PrapbYfCHPPDY1r5x/n7U2pYzyOUE\n8/ogSrI7sBdwMbDM3I22NszscmADMF5V12De+uDvgLcB3ecOmbfRV8B5SS5N8ubWtmjytu2wA5Ak\nzZkPMhxRSR4JfA44tqrumuYB3uZuxFTVfcCKJDsC5yQZY2qezNsISfIS4NaqWt/yNRPzNnr2q6of\nJ3kMcG6S61lEnzd7sCTB4NugXTvru7Q2jZZbkywDaEMjftLafwQ8vrPfRP5matcCSrItg+Lq1Kr6\nUms2dz1RVXcCXwH2wbyNuv2AQ5LcCJzGYO7cqcAG8zbaqurH7fWnwBcZTFVYNJ83CyxJAJcCT0qy\nW5LtgMOAs4YckwZj1NNZPws4si2/HvhSp/2wJNsleQLwJOCSNsTiv5OsbBOCj+gco4Xzj8A1VXVC\np83cjbAkj564Y1mShwMHApdj3kZaVb2rqnatqicy+H/rgqp6HfBlzNvISvKI1stPku2Bg4CrWESf\nN4cISqKq7k3yZ8C5DL54Obmqrh1yWEtakrXAGPBrSb4PvBf4IPDZJG8EbmFwVyWq6pokZzC4i9Yv\ngaOqamJoxdHAPwEPA75SVV99MN/HUtPuPHc4cFWbz1PAu4DjgTPM3ch6HHBK+yNtGwa9j+e3HJq3\n/vkg5m2ULQO+0IZObwv8S1Wdm+QyFkne8kB8kiRJkqSt4RBBSZIkSZonFliSJEmSNE8ssCRJkiRp\nnlhgSZIkSdI8scCSJEmSpHligSVJkiRJ88QCS5IkjZQkFybZe4Ztpyd5Ylu+OcnXN9q+PsmVbfn1\nSU6c5TonJXneDNsOSfJXc38XkpYqCyxJktQLSX4D2L6qbmxNBeyQZHnbvkdr65rtgZ/7AhfPsO3L\nwCuSbLsVIUtagiywJEnSrJI8IsnZSS5PcmWSV7b2m5Ic39ou7vQsPTrJ55J8u/37rc55Tm77fifJ\nIa39YUlOS3J1kjOBh80QymEMCp+uM1o7wGuAtRtt37X1iF2f5D2d97QH8L2qqiTHtGuvT7IWoKoK\n+CZw0Bx/bJKWKAssSZK0KS8EflRVK6pqT+CrnW23t7aPASe0thOAv62qfYHfBz7d2t8NnF9VzwWe\nD3woycOBPwX+p6qeDrwX2GeGOPYHLuusF/B54GVt/WCmFmDPadufBbyyM/TwRZ338XZgr6raC/iT\nzrGXAr89QyySNC0LLEmStClXAQcm+UCS/avqZ51tp7fX04DntuXfBT6a5HLgLOCRSR7BoDfoHa19\nHNgO2JVBEfMZgKq6Crhihjh2A368Udt/AbcneTVwDfDzjbafV1V3VNUvgDMZFGkAL+CBAusKYG2S\nw4F7O8f+J7D7DLFI0rQcVyxJkmZVVTe0np8XA+9L8rWqet/E5u6u7XUbYN+q+mX3PEkAXlFVN0zT\nPqlpplBm2HYGgx60I2Y4ZtJ66zXbqao2tLaXMCjyDgHeneQZVXVfu9Zsc7gkaQp7sCRJ0qySPA74\neVWtBT4EdO/w9+r2ehjwrbZ8DnBs5/hnddqP6bTv1Rb/FTi8tT0D2HOGUG4Bdu6G1l6/ABwPnDvN\nMQcm+ZVWVB0KfAM4ALiwXS/ArlX1deAdwI7AI9uxj2vXlKTNZg+WJEnalGcymC91H/B/TJ6n9Kgk\nVwC/YHCTCRgUVx9r7Q9hUEAdBbwP+Ei7jXqAmxj0Gp0ErElyNXAtk+dZdV3EYH7WurZeAFV1F4PC\nb7resEsYDA1cDpxaVevards/27Y/BPhMkh1bTCdU1Z1t20rg7E3+dCSpI4Ob5EiSJG2ZJDcBz66q\n2x6k6z0ROLGqXrKV57mMwRDGe2fZJwwKuedU1T1bcz1JS4tDBCVJ0lw9qN/Studf3TlxO/itOM8+\nsxVXzcHA5y2uJG0pe7AkSZIkaZ7YgyVJkiRJ88QCS5IkSZLmiQWWJEmSJM0TCyxJkiRJmicWWJIk\nSZI0T/4f8YECY6U7o3gAAAAASUVORK5CYII=\n", "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "co_t, de_t = compression_decompression_times()\n", "\n", "fig = plt.figure(figsize=(12, len(compression_configs)*.3))\n", "fig.suptitle('Decompression speed', fontsize=14, y=1.01)\n", "\n", "\n", "ax = fig.add_subplot(1, 1, 1)\n", "\n", "y = [i for i, (c, o) in enumerate(compression_configs) if c == 'blosc' and o['shuffle'] == 2]\n", "x = (nbytes / 1000000) / np.array([de_t[i] for i in y])\n", "ax.barh(bottom=np.array(y)+.2, width=x.max(axis=1), height=.6, label='bit shuffle', color='b')\n", "\n", "y = [i for i, (c, o) in enumerate(compression_configs) if c != 'blosc' or o['shuffle'] == 0]\n", "x = (nbytes / 1000000) / np.array([de_t[i] for i in y])\n", "ax.barh(bottom=np.array(y)+.2, width=x.max(axis=1), height=.6, label='no shuffle', color='g')\n", "\n", "ax.set_yticks(np.arange(len(labels))+.5)\n", "ax.set_yticklabels(labels, rotation=0)\n", "\n", "xlim = (0, np.max((nbytes / 1000000) / np.array(de_t)) + 100)\n", "ax.set_xlim(*xlim)\n", "ax.set_ylim(0, len(de_t))\n", "ax.set_xlabel('speed (Mb/s)')\n", "ax.grid(axis='x')\n", "ax.legend(loc='upper right')\n", "\n", "fig.tight_layout();" ] }, { "cell_type": "code", "execution_count": 61, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import cpuinfo" ] }, { "cell_type": "code", "execution_count": 63, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Vendor ID: GenuineIntel\n", "Hardware Raw: \n", "Brand: Intel(R) Xeon(R) CPU E3-1505M v5 @ 2.80GHz\n", "Hz Advertised: 2.8000 GHz\n", "Hz Actual: 1.1000 GHz\n", "Hz Advertised Raw: (2800000000, 0)\n", "Hz Actual Raw: (1100000000, 0)\n", "Arch: X86_64\n", "Bits: 64\n", "Count: 8\n", "Raw Arch String: x86_64\n", "L2 Cache Size: 8192 KB\n", "L2 Cache Line Size: 0\n", "L2 Cache Associativity: 0\n", "Stepping: 3\n", "Model: 94\n", "Family: 6\n", "Processor Type: 0\n", "Extended Model: 0\n", "Extended Family: 0\n", "Flags: 3dnowprefetch, abm, acpi, adx, aes, aperfmperf, apic, arat, arch_perfmon, avx, avx2, bmi1, bmi2, bts, clflush, clflushopt, cmov, constant_tsc, cx16, cx8, de, ds_cpl, dtes64, dtherm, dts, eagerfpu, epb, ept, erms, est, f16c, flexpriority, fma, fpu, fsgsbase, fxsr, hle, ht, hwp, hwp_act_window, hwp_epp, hwp_noitfy, ida, invpcid, lahf_lm, lm, mca, mce, mmx, monitor, movbe, mpx, msr, mtrr, nonstop_tsc, nopl, nx, pae, pat, pbe, pcid, pclmulqdq, pdcm, pdpe1gb, pebs, pge, pln, pni, popcnt, pse, pse36, pts, rdrand, rdseed, rdtscp, rep_good, rtm, sep, smap, smep, smx, ss, sse, sse2, sse4_1, sse4_2, ssse3, syscall, tm, tm2, tpr_shadow, tsc, tsc_adjust, tsc_deadline_timer, vme, vmx, vnmi, vpid, x2apic, xgetbv1, xsave, xsavec, xsaveopt, xtopology, xtpr\n" ] } ], "source": [ "cpuinfo.main()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.2" } }, "nbformat": 4, "nbformat_minor": 0 } zarr-python-3.0.6/notebooks/object_arrays.ipynb000066400000000000000000000732121476711733500217350ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Object arrays\n", "\n", "See [#212](https://github.com/alimanfoo/zarr/pull/212) for more information." ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import numpy as np" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'2.2.0a2.dev82+dirty'" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import zarr\n", "zarr.__version__" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'0.5.0'" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import numcodecs\n", "numcodecs.__version__" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## API changes in Zarr version 2.2" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Creation of an object array requires providing new ``object_codec`` argument:" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "z = zarr.empty(10, chunks=5, dtype=object, object_codec=numcodecs.MsgPack())\n", "z" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "To maintain backwards compatibility with previously-created data, the object codec is treated as a filter and inserted as the first filter in the chain:" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Typezarr.core.Array
Data typeobject
Shape(10,)
Chunk shape(5,)
OrderC
Read-onlyFalse
Filter [0]MsgPack(encoding='utf-8')
CompressorBlosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
Store typebuiltins.dict
No. bytes80
No. bytes stored396
Storage ratio0.2
Chunks initialized0/2
" ], "text/plain": [ "Type : zarr.core.Array\n", "Data type : object\n", "Shape : (10,)\n", "Chunk shape : (5,)\n", "Order : C\n", "Read-only : False\n", "Filter [0] : MsgPack(encoding='utf-8')\n", "Compressor : Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)\n", "Store type : builtins.dict\n", "No. bytes : 80\n", "No. bytes stored : 396\n", "Storage ratio : 0.2\n", "Chunks initialized : 0/2" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "z.info" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array(['foo', 'bar', 1, list([2, 4, 6, 'baz']), {'a': 'b', 'c': 'd'}, None,\n", " None, None, None, None], dtype=object)" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "z[0] = 'foo'\n", "z[1] = b'bar' # msgpack doesn't support bytes objects correctly\n", "z[2] = 1\n", "z[3] = [2, 4, 6, 'baz']\n", "z[4] = {'a': 'b', 'c': 'd'}\n", "a = z[:]\n", "a" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "If no ``object_codec`` is provided, a ``ValueError`` is raised:" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "ename": "ValueError", "evalue": "missing object_codec for object array", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mz\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mzarr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mempty\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunks\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mobject\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/creation.py\u001b[0m in \u001b[0;36mempty\u001b[0;34m(shape, **kwargs)\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \"\"\"\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcreate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/creation.py\u001b[0m in \u001b[0;36mcreate\u001b[0;34m(shape, chunks, dtype, compressor, fill_value, order, store, synchronizer, overwrite, path, chunk_store, filters, cache_metadata, read_only, object_codec, **kwargs)\u001b[0m\n\u001b[1;32m 112\u001b[0m init_array(store, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor,\n\u001b[1;32m 113\u001b[0m \u001b[0mfill_value\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfill_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0morder\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0morder\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moverwrite\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moverwrite\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpath\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 114\u001b[0;31m chunk_store=chunk_store, filters=filters, object_codec=object_codec)\n\u001b[0m\u001b[1;32m 115\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;31m# instantiate array\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/storage.py\u001b[0m in \u001b[0;36minit_array\u001b[0;34m(store, shape, chunks, dtype, compressor, fill_value, order, overwrite, path, chunk_store, filters, object_codec)\u001b[0m\n\u001b[1;32m 289\u001b[0m \u001b[0morder\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0morder\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moverwrite\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moverwrite\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpath\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 290\u001b[0m \u001b[0mchunk_store\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mchunk_store\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilters\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfilters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 291\u001b[0;31m object_codec=object_codec)\n\u001b[0m\u001b[1;32m 292\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 293\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/storage.py\u001b[0m in \u001b[0;36m_init_array_metadata\u001b[0;34m(store, shape, chunks, dtype, compressor, fill_value, order, overwrite, path, chunk_store, filters, object_codec)\u001b[0m\n\u001b[1;32m 346\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mfilters\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0;31m# there are no filters so we can be sure there is no object codec\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'missing object_codec for object array'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 349\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 350\u001b[0m \u001b[0;31m# one of the filters may be an object codec, issue a warning rather\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mValueError\u001b[0m: missing object_codec for object array" ] } ], "source": [ "z = zarr.empty(10, chunks=5, dtype=object)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "For API backward-compatibility, if object codec is provided via filters, issue a warning but don't raise an error." ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/aliman/src/github/alimanfoo/zarr/zarr/storage.py:353: FutureWarning: missing object_codec for object array; this will raise a ValueError in version 3.0\n", " 'ValueError in version 3.0', FutureWarning)\n" ] } ], "source": [ "z = zarr.empty(10, chunks=5, dtype=object, filters=[numcodecs.MsgPack()])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "If a user tries to subvert the system and create an object array with no object codec, a runtime check is added to ensure no object arrays are passed down to the compressor (which could lead to nasty errors and/or segfaults):" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "z = zarr.empty(10, chunks=5, dtype=object, object_codec=numcodecs.MsgPack())\n", "z._filters = None # try to live dangerously, manually wipe filters" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "ename": "RuntimeError", "evalue": "cannot write object array without object codec", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mz\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'foo'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m__setitem__\u001b[0;34m(self, selection, value)\u001b[0m\n\u001b[1;32m 1094\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1095\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mselection\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpop_fields\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1096\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_basic_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1097\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mset_basic_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36mset_basic_selection\u001b[0;34m(self, selection, value, fields)\u001b[0m\n\u001b[1;32m 1189\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_set_basic_selection_zd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1190\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1191\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_set_basic_selection_nd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1192\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1193\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mset_orthogonal_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m_set_basic_selection_nd\u001b[0;34m(self, selection, value, fields)\u001b[0m\n\u001b[1;32m 1480\u001b[0m \u001b[0mindexer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBasicIndexer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1481\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1482\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_set_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindexer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1483\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1484\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_set_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mindexer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m_set_selection\u001b[0;34m(self, indexer, value, fields)\u001b[0m\n\u001b[1;32m 1528\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1529\u001b[0m \u001b[0;31m# put data\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1530\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_chunk_setitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mchunk_coords\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunk_selection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunk_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1531\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1532\u001b[0m def _chunk_getitem(self, chunk_coords, chunk_selection, out, out_selection,\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m_chunk_setitem\u001b[0;34m(self, chunk_coords, chunk_selection, value, fields)\u001b[0m\n\u001b[1;32m 1633\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mlock\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1634\u001b[0m self._chunk_setitem_nosync(chunk_coords, chunk_selection, value,\n\u001b[0;32m-> 1635\u001b[0;31m fields=fields)\n\u001b[0m\u001b[1;32m 1636\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1637\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_chunk_setitem_nosync\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunk_coords\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchunk_selection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m_chunk_setitem_nosync\u001b[0;34m(self, chunk_coords, chunk_selection, value, fields)\u001b[0m\n\u001b[1;32m 1707\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1708\u001b[0m \u001b[0;31m# encode chunk\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1709\u001b[0;31m \u001b[0mcdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_encode_chunk\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mchunk\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1710\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1711\u001b[0m \u001b[0;31m# store\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m_encode_chunk\u001b[0;34m(self, chunk)\u001b[0m\n\u001b[1;32m 1753\u001b[0m \u001b[0;31m# check object encoding\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1754\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mchunk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndarray\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mchunk\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mobject\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1755\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'cannot write object array without object codec'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1756\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1757\u001b[0m \u001b[0;31m# compress\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mRuntimeError\u001b[0m: cannot write object array without object codec" ] } ], "source": [ "z[0] = 'foo'" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Here is another way to subvert the system, wiping filters **after** storing some data. To cover this case a runtime check is added to ensure no object arrays are handled inappropriately during decoding (which could lead to nasty errors and/or segfaults)." ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array(['¡Hola mundo!', 'Hej Världen!', 'Servus Woid!', 'Hei maailma!',\n", " 'Xin chào thế giới', 'Njatjeta Botë!', 'Γεια σου κόσμε!', 'こんにちは世界',\n", " '世界,你好!', 'Helló, világ!', 'Zdravo svete!', 'เฮลโลเวิลด์'], dtype=object)" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from numcodecs.tests.common import greetings\n", "z = zarr.array(greetings, chunks=5, dtype=object, object_codec=numcodecs.MsgPack())\n", "z[:]" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "ename": "RuntimeError", "evalue": "cannot read object array without object codec", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0mz\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_filters\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;31m# try to live dangerously, manually wipe filters\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mz\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m__getitem__\u001b[0;34m(self, selection)\u001b[0m\n\u001b[1;32m 551\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 552\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mselection\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpop_fields\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 553\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_basic_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 554\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 555\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_basic_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mselection\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mEllipsis\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36mget_basic_selection\u001b[0;34m(self, selection, out, fields)\u001b[0m\n\u001b[1;32m 677\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 678\u001b[0m return self._get_basic_selection_nd(selection=selection, out=out,\n\u001b[0;32m--> 679\u001b[0;31m fields=fields)\n\u001b[0m\u001b[1;32m 680\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 681\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_get_basic_selection_zd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m_get_basic_selection_nd\u001b[0;34m(self, selection, out, fields)\u001b[0m\n\u001b[1;32m 719\u001b[0m \u001b[0mindexer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBasicIndexer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 720\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 721\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindexer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mindexer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfields\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 722\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 723\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_orthogonal_selection\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mselection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m_get_selection\u001b[0;34m(self, indexer, out, fields)\u001b[0m\n\u001b[1;32m 1007\u001b[0m \u001b[0;31m# load chunk selection into output array\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1008\u001b[0m self._chunk_getitem(chunk_coords, chunk_selection, out, out_selection,\n\u001b[0;32m-> 1009\u001b[0;31m drop_axes=indexer.drop_axes, fields=fields)\n\u001b[0m\u001b[1;32m 1010\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1011\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m_chunk_getitem\u001b[0;34m(self, chunk_coords, chunk_selection, out, out_selection, drop_axes, fields)\u001b[0m\n\u001b[1;32m 1597\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1598\u001b[0m \u001b[0;31m# decode chunk\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1599\u001b[0;31m \u001b[0mchunk\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_decode_chunk\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1600\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1601\u001b[0m \u001b[0;31m# select data from chunk\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/src/github/alimanfoo/zarr/zarr/core.py\u001b[0m in \u001b[0;36m_decode_chunk\u001b[0;34m(self, cdata)\u001b[0m\n\u001b[1;32m 1733\u001b[0m \u001b[0mchunk\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mchunk\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mastype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1734\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1735\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'cannot read object array without object codec'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1736\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mchunk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndarray\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1737\u001b[0m \u001b[0mchunk\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mchunk\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mview\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mRuntimeError\u001b[0m: cannot read object array without object codec" ] } ], "source": [ "z._filters = [] # try to live dangerously, manually wipe filters\n", "z[:]" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.1" } }, "nbformat": 4, "nbformat_minor": 2 } zarr-python-3.0.6/notebooks/repr_info.ipynb000066400000000000000000000543001476711733500210660ustar00rootroot00000000000000{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import zarr" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "root = zarr.group()\n", "root" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Name/
Typezarr.hierarchy.Group
Read-onlyFalse
Store typezarr.storage.DictStore
No. members0
No. arrays0
No. groups0
" ], "text/plain": [ "Name : /\n", "Type : zarr.hierarchy.Group\n", "Read-only : False\n", "Store type : zarr.storage.DictStore\n", "No. members : 0\n", "No. arrays : 0\n", "No. groups : 0" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "root.info" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "z = root.zeros('foo/bar/baz', shape=1000000)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "z" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Name/foo/bar/baz
Typezarr.core.Array
Data typefloat64
Shape(1000000,)
Chunk shape(15625,)
OrderC
Read-onlyFalse
CompressorBlosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
Store typezarr.storage.DictStore
No. bytes8000000 (7.6M)
No. bytes stored321
Storage ratio24922.1
Chunks initialized0/64
" ], "text/plain": [ "Name : /foo/bar/baz\n", "Type : zarr.core.Array\n", "Data type : float64\n", "Shape : (1000000,)\n", "Chunk shape : (15625,)\n", "Order : C\n", "Read-only : False\n", "Compressor : Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)\n", "Store type : zarr.storage.DictStore\n", "No. bytes : 8000000 (7.6M)\n", "No. bytes stored : 321\n", "Storage ratio : 24922.1\n", "Chunks initialized : 0/64" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "z.info" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "z[:] = 42" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Name/foo/bar/baz
Typezarr.core.Array
Data typefloat64
Shape(1000000,)
Chunk shape(15625,)
OrderC
Read-onlyFalse
CompressorBlosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
Store typezarr.storage.DictStore
No. bytes8000000 (7.6M)
No. bytes stored39553 (38.6K)
Storage ratio202.3
Chunks initialized64/64
" ], "text/plain": [ "Name : /foo/bar/baz\n", "Type : zarr.core.Array\n", "Data type : float64\n", "Shape : (1000000,)\n", "Chunk shape : (15625,)\n", "Order : C\n", "Read-only : False\n", "Compressor : Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)\n", "Store type : zarr.storage.DictStore\n", "No. bytes : 8000000 (7.6M)\n", "No. bytes stored : 39553 (38.6K)\n", "Storage ratio : 202.3\n", "Chunks initialized : 64/64" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "z.info" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "for i in range(1000):\n", " root.create_group(i)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "root" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Name/
Typezarr.hierarchy.Group
Read-onlyFalse
Store typezarr.storage.DictStore
No. members1001
No. arrays0
No. groups1001
Groups0, 1, 10, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 11, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 12, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 13, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 14, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 15, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 16, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 17, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 18, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 19, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 2, 20, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 21, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 22, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 23, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 24, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 25, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 26, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 27, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 28, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 29, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 3, 30, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 31, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 32, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 33, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 34, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 35, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 36, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 37, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 38, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 39, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 4, 40, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 41, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 42, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 43, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 44, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 45, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 46, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 47, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 48, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 49, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 5, 50, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 51, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 52, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 53, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 54, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 55, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 56, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 57, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 58, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 59, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 6, 60, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 61, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 62, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 63, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 64, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 65, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 66, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 67, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 68, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 69, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 7, 70, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 71, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 72, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 73, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 74, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 75, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 76, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 77, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 78, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 79, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 8, 80, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 81, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 82, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 83, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 84, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 85, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 86, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 87, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 88, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 89, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 9, 90, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 91, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 92, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 93, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 94, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 95, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 96, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 97, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 98, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 99, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, foo
" ], "text/plain": [ "Name : /\n", "Type : zarr.hierarchy.Group\n", "Read-only : False\n", "Store type : zarr.storage.DictStore\n", "No. members : 1001\n", "No. arrays : 0\n", "No. groups : 1001\n", "Groups : 0, 1, 10, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 11,\n", " : 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 12, 120, 121,\n", " : 122, 123, 124, 125, 126, 127, 128, 129, 13, 130, 131, 132, 133,\n", " : 134, 135, 136, 137, 138, 139, 14, 140, 141, 142, 143, 144, 145,\n", " : 146, 147, 148, 149, 15, 150, 151, 152, 153, 154, 155, 156, 157,\n", " : 158, 159, 16, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,\n", " : 17, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 18, 180,\n", " : 181, 182, 183, 184, 185, 186, 187, 188, 189, 19, 190, 191, 192,\n", " : 193, 194, 195, 196, 197, 198, 199, 2, 20, 200, 201, 202, 203, 204,\n", " : 205, 206, 207, 208, 209, 21, 210, 211, 212, 213, 214, 215, 216,\n", " : 217, 218, 219, 22, 220, 221, 222, 223, 224, 225, 226, 227, 228,\n", " : 229, 23, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 24,\n", " : 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 25, 250, 251,\n", " : 252, 253, 254, 255, 256, 257, 258, 259, 26, 260, 261, 262, 263,\n", " : 264, 265, 266, 267, 268, 269, 27, 270, 271, 272, 273, 274, 275,\n", " : 276, 277, 278, 279, 28, 280, 281, 282, 283, 284, 285, 286, 287,\n", " : 288, 289, 29, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 3,\n", " : 30, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 31, 310,\n", " : 311, 312, 313, 314, 315, 316, 317, 318, 319, 32, 320, 321, 322,\n", " : 323, 324, 325, 326, 327, 328, 329, 33, 330, 331, 332, 333, 334,\n", " : 335, 336, 337, 338, 339, 34, 340, 341, 342, 343, 344, 345, 346,\n", " : 347, 348, 349, 35, 350, 351, 352, 353, 354, 355, 356, 357, 358,\n", " : 359, 36, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 37,\n", " : 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 38, 380, 381,\n", " : 382, 383, 384, 385, 386, 387, 388, 389, 39, 390, 391, 392, 393,\n", " : 394, 395, 396, 397, 398, 399, 4, 40, 400, 401, 402, 403, 404, 405,\n", " : 406, 407, 408, 409, 41, 410, 411, 412, 413, 414, 415, 416, 417,\n", " : 418, 419, 42, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429,\n", " : 43, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 44, 440,\n", " : 441, 442, 443, 444, 445, 446, 447, 448, 449, 45, 450, 451, 452,\n", " : 453, 454, 455, 456, 457, 458, 459, 46, 460, 461, 462, 463, 464,\n", " : 465, 466, 467, 468, 469, 47, 470, 471, 472, 473, 474, 475, 476,\n", " : 477, 478, 479, 48, 480, 481, 482, 483, 484, 485, 486, 487, 488,\n", " : 489, 49, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 5, 50,\n", " : 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 51, 510, 511,\n", " : 512, 513, 514, 515, 516, 517, 518, 519, 52, 520, 521, 522, 523,\n", " : 524, 525, 526, 527, 528, 529, 53, 530, 531, 532, 533, 534, 535,\n", " : 536, 537, 538, 539, 54, 540, 541, 542, 543, 544, 545, 546, 547,\n", " : 548, 549, 55, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559,\n", " : 56, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 57, 570,\n", " : 571, 572, 573, 574, 575, 576, 577, 578, 579, 58, 580, 581, 582,\n", " : 583, 584, 585, 586, 587, 588, 589, 59, 590, 591, 592, 593, 594,\n", " : 595, 596, 597, 598, 599, 6, 60, 600, 601, 602, 603, 604, 605, 606,\n", " : 607, 608, 609, 61, 610, 611, 612, 613, 614, 615, 616, 617, 618,\n", " : 619, 62, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 63,\n", " : 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 64, 640, 641,\n", " : 642, 643, 644, 645, 646, 647, 648, 649, 65, 650, 651, 652, 653,\n", " : 654, 655, 656, 657, 658, 659, 66, 660, 661, 662, 663, 664, 665,\n", " : 666, 667, 668, 669, 67, 670, 671, 672, 673, 674, 675, 676, 677,\n", " : 678, 679, 68, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689,\n", " : 69, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 7, 70, 700,\n", " : 701, 702, 703, 704, 705, 706, 707, 708, 709, 71, 710, 711, 712,\n", " : 713, 714, 715, 716, 717, 718, 719, 72, 720, 721, 722, 723, 724,\n", " : 725, 726, 727, 728, 729, 73, 730, 731, 732, 733, 734, 735, 736,\n", " : 737, 738, 739, 74, 740, 741, 742, 743, 744, 745, 746, 747, 748,\n", " : 749, 75, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 76,\n", " : 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 77, 770, 771,\n", " : 772, 773, 774, 775, 776, 777, 778, 779, 78, 780, 781, 782, 783,\n", " : 784, 785, 786, 787, 788, 789, 79, 790, 791, 792, 793, 794, 795,\n", " : 796, 797, 798, 799, 8, 80, 800, 801, 802, 803, 804, 805, 806, 807,\n", " : 808, 809, 81, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819,\n", " : 82, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 83, 830,\n", " : 831, 832, 833, 834, 835, 836, 837, 838, 839, 84, 840, 841, 842,\n", " : 843, 844, 845, 846, 847, 848, 849, 85, 850, 851, 852, 853, 854,\n", " : 855, 856, 857, 858, 859, 86, 860, 861, 862, 863, 864, 865, 866,\n", " : 867, 868, 869, 87, 870, 871, 872, 873, 874, 875, 876, 877, 878,\n", " : 879, 88, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 89,\n", " : 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 9, 90, 900, 901,\n", " : 902, 903, 904, 905, 906, 907, 908, 909, 91, 910, 911, 912, 913,\n", " : 914, 915, 916, 917, 918, 919, 92, 920, 921, 922, 923, 924, 925,\n", " : 926, 927, 928, 929, 93, 930, 931, 932, 933, 934, 935, 936, 937,\n", " : 938, 939, 94, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949,\n", " : 95, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 96, 960,\n", " : 961, 962, 963, 964, 965, 966, 967, 968, 969, 97, 970, 971, 972,\n", " : 973, 974, 975, 976, 977, 978, 979, 98, 980, 981, 982, 983, 984,\n", " : 985, 986, 987, 988, 989, 99, 990, 991, 992, 993, 994, 995, 996,\n", " : 997, 998, 999, foo" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "root.info" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Name/foo/bar
Typezarr.hierarchy.Group
Read-onlyFalse
Store typezarr.storage.DictStore
No. members1
No. arrays1
No. groups0
Arraysbaz
" ], "text/plain": [ "Name : /foo/bar\n", "Type : zarr.hierarchy.Group\n", "Read-only : False\n", "Store type : zarr.storage.DictStore\n", "No. members : 1\n", "No. arrays : 1\n", "No. groups : 0\n", "Arrays : baz" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "root['foo/bar'].info" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.1" } }, "nbformat": 4, "nbformat_minor": 2 } zarr-python-3.0.6/notebooks/store_benchmark.ipynb000066400000000000000000000700731476711733500222560ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "There are lies, damn lies and benchmarks..." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Setup" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'2.2.0a2.dev22+dirty'" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import zarr\n", "zarr.__version__" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'6.2.5'" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import bsddb3\n", "bsddb3.__version__" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'0.93'" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import lmdb\n", "lmdb.__version__" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "import numpy as np" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "import dbm.gnu\n", "import dbm.ndbm" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "import os\n", "import shutil\n", "bench_dir = '../data/bench'\n", "\n", "\n", "def clean():\n", " if os.path.isdir(bench_dir):\n", " shutil.rmtree(bench_dir)\n", " os.makedirs(bench_dir)\n", "\n", " \n", "def setup(a, name='foo/bar'):\n", " global fdict_z, hdict_z, lmdb_z, gdbm_z, ndbm_z, bdbm_btree_z, bdbm_hash_z, zip_z, dir_z\n", " \n", " clean()\n", " fdict_root = zarr.group(store=dict())\n", " hdict_root = zarr.group(store=zarr.DictStore())\n", " lmdb_root = zarr.group(store=zarr.LMDBStore(os.path.join(bench_dir, 'lmdb')))\n", " gdbm_root = zarr.group(store=zarr.DBMStore(os.path.join(bench_dir, 'gdbm'), open=dbm.gnu.open))\n", " ndbm_root = zarr.group(store=zarr.DBMStore(os.path.join(bench_dir, 'ndbm'), open=dbm.ndbm.open))\n", " bdbm_btree_root = zarr.group(store=zarr.DBMStore(os.path.join(bench_dir, 'bdbm_btree'), open=bsddb3.btopen))\n", " bdbm_hash_root = zarr.group(store=zarr.DBMStore(os.path.join(bench_dir, 'bdbm_hash'), open=bsddb3.hashopen))\n", " zip_root = zarr.group(store=zarr.ZipStore(os.path.join(bench_dir, 'zip'), mode='w'))\n", " dir_root = zarr.group(store=zarr.DirectoryStore(os.path.join(bench_dir, 'dir')))\n", "\n", " fdict_z = fdict_root.empty_like(name, a)\n", " hdict_z = hdict_root.empty_like(name, a)\n", " lmdb_z = lmdb_root.empty_like(name, a)\n", " gdbm_z = gdbm_root.empty_like(name, a)\n", " ndbm_z = ndbm_root.empty_like(name, a)\n", " bdbm_btree_z = bdbm_btree_root.empty_like(name, a)\n", " bdbm_hash_z = bdbm_hash_root.empty_like(name, a)\n", " zip_z = zip_root.empty_like(name, a)\n", " dir_z = dir_root.empty_like(name, a)\n", "\n", " # check compression ratio\n", " fdict_z[:] = a\n", " return fdict_z.info\n", " \n", " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Main benchmarks" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "def save(a, z):\n", " if isinstance(z.store, zarr.ZipStore):\n", " # needed for zip benchmarks to avoid duplicate entries\n", " z.store.clear()\n", " z[:] = a\n", " if hasattr(z.store, 'flush'):\n", " z.store.flush()\n", " \n", " \n", "def load(z, a):\n", " z.get_basic_selection(out=a)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## arange" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Name/foo/bar
Typezarr.core.Array
Data typeint64
Shape(500000000,)
Chunk shape(488282,)
OrderC
Read-onlyFalse
CompressorBlosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
Store typebuiltins.dict
No. bytes4000000000 (3.7G)
No. bytes stored59269657 (56.5M)
Storage ratio67.5
Chunks initialized1024/1024
" ], "text/plain": [ "Name : /foo/bar\n", "Type : zarr.core.Array\n", "Data type : int64\n", "Shape : (500000000,)\n", "Chunk shape : (488282,)\n", "Order : C\n", "Read-only : False\n", "Compressor : Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)\n", "Store type : builtins.dict\n", "No. bytes : 4000000000 (3.7G)\n", "No. bytes stored : 59269657 (56.5M)\n", "Storage ratio : 67.5\n", "Chunks initialized : 1024/1024" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "a = np.arange(500000000)\n", "setup(a)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### save" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "324 ms ± 60.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit save(a, fdict_z)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "302 ms ± 11.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit save(a, hdict_z)" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "316 ms ± 12.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit save(a, lmdb_z)" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "938 ms ± 111 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit save(a, gdbm_z)" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "406 ms ± 8.93 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit save(a, ndbm_z)" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1.43 s ± 156 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit save(a, bdbm_btree_z)" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1.24 s ± 260 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit save(a, bdbm_hash_z)" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "519 ms ± 59.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit save(a, zip_z)" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "609 ms ± 48.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit save(a, dir_z)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### load" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "454 ms ± 56.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit load(fdict_z, a)" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "428 ms ± 13.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit load(hdict_z, a)" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "429 ms ± 19.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit load(lmdb_z, a)" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "459 ms ± 10 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit load(gdbm_z, a)" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "473 ms ± 5.71 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit load(ndbm_z, a)" ] }, { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "504 ms ± 8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit load(bdbm_btree_z, a)" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "519 ms ± 9.59 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit load(bdbm_hash_z, a)" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "575 ms ± 12.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit load(zip_z, a)" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "494 ms ± 10.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], "source": [ "%timeit load(dir_z, a)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## randint" ] }, { "cell_type": "code", "execution_count": 28, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Name/foo/bar
Typezarr.core.Array
Data typeint64
Shape(500000000,)
Chunk shape(488282,)
OrderC
Read-onlyFalse
CompressorBlosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
Store typebuiltins.dict
No. bytes4000000000 (3.7G)
No. bytes stored2020785466 (1.9G)
Storage ratio2.0
Chunks initialized1024/1024
" ], "text/plain": [ "Name : /foo/bar\n", "Type : zarr.core.Array\n", "Data type : int64\n", "Shape : (500000000,)\n", "Chunk shape : (488282,)\n", "Order : C\n", "Read-only : False\n", "Compressor : Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)\n", "Store type : builtins.dict\n", "No. bytes : 4000000000 (3.7G)\n", "No. bytes stored : 2020785466 (1.9G)\n", "Storage ratio : 2.0\n", "Chunks initialized : 1024/1024" ] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" } ], "source": [ "np.random.seed(42)\n", "a = np.random.randint(0, 2**30, size=500000000)\n", "setup(a)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### save" ] }, { "cell_type": "code", "execution_count": 29, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "670 ms ± 78.1 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 save(a, fdict_z)" ] }, { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "611 ms ± 6.11 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 save(a, hdict_z)" ] }, { "cell_type": "code", "execution_count": 31, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "846 ms ± 24 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 save(a, lmdb_z)" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "6.35 s ± 785 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 save(a, gdbm_z)" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "4.62 s ± 1.09 s per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 save(a, ndbm_z)" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "7.84 s ± 1.66 s per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 save(a, bdbm_btree_z)" ] }, { "cell_type": "code", "execution_count": 35, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "6.49 s ± 808 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 save(a, bdbm_hash_z)" ] }, { "cell_type": "code", "execution_count": 36, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "3.68 s ± 441 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 save(a, zip_z)" ] }, { "cell_type": "code", "execution_count": 38, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "3.55 s ± 1.24 s per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 save(a, dir_z)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### load" ] }, { "cell_type": "code", "execution_count": 39, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "566 ms ± 72.8 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 load(fdict_z, a)" ] }, { "cell_type": "code", "execution_count": 40, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "521 ms ± 16.1 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 load(hdict_z, a)" ] }, { "cell_type": "code", "execution_count": 41, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "532 ms ± 16.1 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 load(lmdb_z, a)" ] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1.2 s ± 10.9 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 load(gdbm_z, a)" ] }, { "cell_type": "code", "execution_count": 43, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1.18 s ± 13.2 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 load(ndbm_z, a)" ] }, { "cell_type": "code", "execution_count": 44, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1.59 s ± 16.7 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 load(bdbm_btree_z, a)" ] }, { "cell_type": "code", "execution_count": 45, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1.61 s ± 7.31 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 load(bdbm_hash_z, a)" ] }, { "cell_type": "code", "execution_count": 46, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2.33 s ± 19.8 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 load(zip_z, a)" ] }, { "cell_type": "code", "execution_count": 47, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "790 ms ± 56 ms per loop (mean ± std. dev. of 3 runs, 1 loop each)\n" ] } ], "source": [ "%timeit -r3 load(dir_z, a)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### dask" ] }, { "cell_type": "code", "execution_count": 48, "metadata": {}, "outputs": [], "source": [ "import dask.array as da" ] }, { "cell_type": "code", "execution_count": 50, "metadata": {}, "outputs": [], "source": [ "def dask_op(source, sink, chunks=None):\n", " if isinstance(sink.store, zarr.ZipStore):\n", " sink.store.clear()\n", " if chunks is None:\n", " try:\n", " chunks = sink.chunks\n", " except AttributeError:\n", " chunks = source.chunks\n", " d = da.from_array(source, chunks=chunks, asarray=False, fancy=False, lock=False)\n", " result = (d // 2) * 2\n", " da.store(result, sink, lock=False)\n", " if hasattr(sink.store, 'flush'):\n", " sink.store.flush()\n", " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Compare sources" ] }, { "cell_type": "code", "execution_count": 76, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 15.6 s, sys: 1.8 s, total: 17.4 s\n", "Wall time: 3.07 s\n" ] } ], "source": [ "%time dask_op(fdict_z, fdict_z)" ] }, { "cell_type": "code", "execution_count": 77, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 16.5 s, sys: 104 ms, total: 16.6 s\n", "Wall time: 2.59 s\n" ] } ], "source": [ "%time dask_op(hdict_z, fdict_z)" ] }, { "cell_type": "code", "execution_count": 78, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 15.1 s, sys: 524 ms, total: 15.6 s\n", "Wall time: 3.02 s\n" ] } ], "source": [ "%time dask_op(lmdb_z, fdict_z)" ] }, { "cell_type": "code", "execution_count": 79, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 16.5 s, sys: 712 ms, total: 17.2 s\n", "Wall time: 3.13 s\n" ] } ], "source": [ "%time dask_op(gdbm_z, fdict_z)" ] }, { "cell_type": "code", "execution_count": 80, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 16.3 s, sys: 604 ms, total: 16.9 s\n", "Wall time: 3.22 s\n" ] } ], "source": [ "%time dask_op(ndbm_z, fdict_z)" ] }, { "cell_type": "code", "execution_count": 81, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 19.6 s, sys: 1.24 s, total: 20.9 s\n", "Wall time: 3.27 s\n" ] } ], "source": [ "%time dask_op(bdbm_btree_z, fdict_z)" ] }, { "cell_type": "code", "execution_count": 82, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 20.3 s, sys: 1.08 s, total: 21.4 s\n", "Wall time: 3.53 s\n" ] } ], "source": [ "%time dask_op(bdbm_hash_z, fdict_z)" ] }, { "cell_type": "code", "execution_count": 83, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 15.7 s, sys: 700 ms, total: 16.4 s\n", "Wall time: 3.12 s\n" ] } ], "source": [ "%time dask_op(zip_z, fdict_z)" ] }, { "cell_type": "code", "execution_count": 84, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 17.4 s, sys: 1.08 s, total: 18.5 s\n", "Wall time: 2.91 s\n" ] } ], "source": [ "%time dask_op(dir_z, fdict_z)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Compare sinks" ] }, { "cell_type": "code", "execution_count": 51, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 15.8 s, sys: 1.4 s, total: 17.2 s\n", "Wall time: 3.04 s\n" ] } ], "source": [ "%time dask_op(fdict_z, hdict_z)" ] }, { "cell_type": "code", "execution_count": 52, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 16.2 s, sys: 1.6 s, total: 17.8 s\n", "Wall time: 2.71 s\n" ] } ], "source": [ "%time dask_op(fdict_z, lmdb_z)" ] }, { "cell_type": "code", "execution_count": 59, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 16.8 s, sys: 3.05 s, total: 19.8 s\n", "Wall time: 8.01 s\n" ] } ], "source": [ "%time dask_op(fdict_z, gdbm_z)" ] }, { "cell_type": "code", "execution_count": 54, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 17.9 s, sys: 3.01 s, total: 20.9 s\n", "Wall time: 5.46 s\n" ] } ], "source": [ "%time dask_op(fdict_z, ndbm_z)" ] }, { "cell_type": "code", "execution_count": 61, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 13.8 s, sys: 3.39 s, total: 17.2 s\n", "Wall time: 7.87 s\n" ] } ], "source": [ "%time dask_op(fdict_z, bdbm_btree_z)" ] }, { "cell_type": "code", "execution_count": 56, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 13.9 s, sys: 3.27 s, total: 17.2 s\n", "Wall time: 6.73 s\n" ] } ], "source": [ "%time dask_op(fdict_z, bdbm_hash_z)" ] }, { "cell_type": "code", "execution_count": 57, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 13.9 s, sys: 2.5 s, total: 16.4 s\n", "Wall time: 3.8 s\n" ] } ], "source": [ "%time dask_op(fdict_z, zip_z)" ] }, { "cell_type": "code", "execution_count": 58, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 15.7 s, sys: 3.72 s, total: 19.4 s\n", "Wall time: 3.1 s\n" ] } ], "source": [ "%time dask_op(fdict_z, dir_z)" ] }, { "cell_type": "code", "execution_count": 62, "metadata": {}, "outputs": [], "source": [ "lmdb_z.store.close()\n", "gdbm_z.store.close()\n", "ndbm_z.store.close()\n", "bdbm_btree_z.store.close()\n", "bdbm_hash_z.store.close()\n", "zip_z.store.close()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.1" } }, "nbformat": 4, "nbformat_minor": 2 } zarr-python-3.0.6/notebooks/zip_benchmark.ipynb000066400000000000000000000254611476711733500217250ustar00rootroot00000000000000{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "'2.0.2.dev0+dirty'" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import sys\n", "sys.path.insert(0, '..')\n", "import zarr\n", "zarr.__version__" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "Array(/3L/calldata/genotype, (7449486, 773, 2), int8, chunks=(13107, 40, 2), order=C)\n", " nbytes: 10.7G; nbytes_stored: 193.5M; ratio: 56.7; initialized: 11380/11380\n", " compressor: Blosc(cname='zstd', clevel=1, shuffle=2)\n", " store: ZipStore" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "store = zarr.ZipStore('/data/coluzzi/ag1000g/data/phase1/release/AR3.1/haplotypes/main/zarr2/zstd/ag1000g.phase1.ar3.1.haplotypes.zip',\n", " mode='r')\n", "grp = zarr.Group(store)\n", "z = grp['3L/calldata/genotype']\n", "z" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " 1832 function calls in 0.024 seconds\n", "\n", " Ordered by: cumulative time\n", "\n", " ncalls tottime percall cumtime percall filename:lineno(function)\n", " 1 0.000 0.000 0.024 0.024 {built-in method builtins.exec}\n", " 1 0.000 0.000 0.024 0.024 :1()\n", " 1 0.000 0.000 0.024 0.024 core.py:292(__getitem__)\n", " 20 0.000 0.000 0.023 0.001 core.py:539(_chunk_getitem)\n", " 20 0.000 0.000 0.020 0.001 core.py:679(_decode_chunk)\n", " 20 0.000 0.000 0.020 0.001 codecs.py:355(decode)\n", " 20 0.020 0.001 0.020 0.001 {zarr.blosc.decompress}\n", " 20 0.000 0.000 0.002 0.000 storage.py:766(__getitem__)\n", " 20 0.000 0.000 0.001 0.000 zipfile.py:1235(open)\n", " 20 0.000 0.000 0.001 0.000 zipfile.py:821(read)\n", " 20 0.000 0.000 0.001 0.000 zipfile.py:901(_read1)\n", " 80 0.000 0.000 0.001 0.000 zipfile.py:660(read)\n", " 20 0.000 0.000 0.000 0.000 zipfile.py:854(_update_crc)\n", " 40 0.000 0.000 0.000 0.000 {built-in method zlib.crc32}\n", " 80 0.000 0.000 0.000 0.000 {method 'read' of '_io.BufferedReader' objects}\n", " 20 0.000 0.000 0.000 0.000 zipfile.py:937(_read2)\n", " 80 0.000 0.000 0.000 0.000 core.py:390()\n", " 20 0.000 0.000 0.000 0.000 zipfile.py:953(close)\n", " 20 0.000 0.000 0.000 0.000 {method 'reshape' of 'numpy.ndarray' objects}\n", " 20 0.000 0.000 0.000 0.000 util.py:106(is_total_slice)\n", " 20 0.000 0.000 0.000 0.000 zipfile.py:708(__init__)\n", " 20 0.000 0.000 0.000 0.000 {method 'decode' of 'bytes' objects}\n", " 20 0.000 0.000 0.000 0.000 core.py:676(_chunk_key)\n", " 80 0.000 0.000 0.000 0.000 {method 'seek' of '_io.BufferedReader' objects}\n", " 20 0.000 0.000 0.000 0.000 {built-in method numpy.core.multiarray.frombuffer}\n", " 80 0.000 0.000 0.000 0.000 core.py:398()\n", " 20 0.000 0.000 0.000 0.000 {method 'join' of 'str' objects}\n", " 20 0.000 0.000 0.000 0.000 core.py:386()\n", " 20 0.000 0.000 0.000 0.000 {built-in method builtins.all}\n", " 40 0.000 0.000 0.000 0.000 util.py:121()\n", " 231 0.000 0.000 0.000 0.000 {built-in method builtins.isinstance}\n", " 20 0.000 0.000 0.000 0.000 cp437.py:14(decode)\n", " 80 0.000 0.000 0.000 0.000 {method 'tell' of '_io.BufferedReader' objects}\n", " 20 0.000 0.000 0.000 0.000 zipfile.py:667(close)\n", " 20 0.000 0.000 0.000 0.000 {built-in method _struct.unpack}\n", " 140 0.000 0.000 0.000 0.000 {built-in method builtins.max}\n", " 20 0.000 0.000 0.000 0.000 {function ZipExtFile.close at 0x7f8cd5ca2048}\n", " 20 0.000 0.000 0.000 0.000 zipfile.py:1194(getinfo)\n", " 140 0.000 0.000 0.000 0.000 {built-in method builtins.min}\n", " 20 0.000 0.000 0.000 0.000 threading.py:1224(current_thread)\n", " 20 0.000 0.000 0.000 0.000 zipfile.py:654(__init__)\n", " 1 0.000 0.000 0.000 0.000 util.py:195(get_chunk_range)\n", " 20 0.000 0.000 0.000 0.000 {built-in method _codecs.charmap_decode}\n", " 1 0.000 0.000 0.000 0.000 util.py:166(normalize_array_selection)\n", " 1 0.000 0.000 0.000 0.000 util.py:198()\n", " 20 0.000 0.000 0.000 0.000 zipfile.py:1715(_fpclose)\n", " 20 0.000 0.000 0.000 0.000 {method 'get' of 'dict' objects}\n", " 63 0.000 0.000 0.000 0.000 {built-in method builtins.len}\n", " 1 0.000 0.000 0.000 0.000 {built-in method numpy.core.multiarray.empty}\n", " 2 0.000 0.000 0.000 0.000 util.py:182()\n", " 20 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr}\n", " 20 0.000 0.000 0.000 0.000 {built-in method _thread.get_ident}\n", " 1 0.000 0.000 0.000 0.000 util.py:130(normalize_axis_selection)\n", " 20 0.000 0.000 0.000 0.000 zipfile.py:636(_get_decompressor)\n", " 20 0.000 0.000 0.000 0.000 threading.py:1298(main_thread)\n", " 4 0.000 0.000 0.000 0.000 core.py:373()\n", " 3 0.000 0.000 0.000 0.000 util.py:187()\n", " 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n", "\n", "\n" ] } ], "source": [ "import cProfile\n", "cProfile.run('z[:10]', sort='cumtime')" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "'0.11.0'" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import dask\n", "import dask.array as da\n", "dask.__version__" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "dask.array" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "d = da.from_array(z, chunks=z.chunks)\n", "d" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 3min 35s, sys: 4.36 s, total: 3min 40s\n", "Wall time: 29.5 s\n" ] }, { "data": { "text/plain": [ "array([[3, 0],\n", " [1, 0],\n", " [2, 0],\n", " ..., \n", " [2, 8],\n", " [8, 8],\n", " [0, 1]])" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%time d.sum(axis=1).compute()" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "Array(/3L/calldata/genotype, (7449486, 773, 2), int8, chunks=(13107, 40, 2), order=C)\n", " nbytes: 10.7G; nbytes_stored: 193.5M; ratio: 56.7; initialized: 11380/11380\n", " compressor: Blosc(cname='zstd', clevel=1, shuffle=2)\n", " store: DirectoryStore" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# compare with same data via directory store\n", "store_dir = zarr.DirectoryStore('/data/coluzzi/ag1000g/data/phase1/release/AR3.1/haplotypes/main/zarr2/zstd/ag1000g.phase1.ar3.1.haplotypes')\n", "grp_dir = zarr.Group(store_dir)\n", "z_dir = grp_dir['3L/calldata/genotype']\n", "z_dir" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "dask.array" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "d_dir = da.from_array(z_dir, chunks=z_dir.chunks)\n", "d_dir" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 3min 39s, sys: 4.91 s, total: 3min 44s\n", "Wall time: 31.1 s\n" ] }, { "data": { "text/plain": [ "array([[3, 0],\n", " [1, 0],\n", " [2, 0],\n", " ..., \n", " [2, 8],\n", " [8, 8],\n", " [0, 1]])" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%time d_dir.sum(axis=1).compute()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.1" } }, "nbformat": 4, "nbformat_minor": 1 } zarr-python-3.0.6/pyproject.toml000066400000000000000000000276521476711733500167630ustar00rootroot00000000000000[build-system] requires = ["hatchling", "hatch-vcs"] build-backend = "hatchling.build" [tool.hatch.build.targets.sdist] exclude = [ "/.github", "/bench", "/docs", "/notebooks" ] [project] name = "zarr" description = "An implementation of chunked, compressed, N-dimensional arrays for Python" readme = { file = "README.md", content-type = "text/markdown" } authors = [ { name = "Alistair Miles", email = "alimanfoo@googlemail.com" }, ] maintainers = [ { name = "Davis Bennett", email = "davis.v.bennett@gmail.com" }, { name = "jakirkham" }, { name = "Josh Moore", email = "josh@openmicroscopy.org" }, { name = "Joe Hamman", email = "joe@earthmover.io" }, { name = "Juan Nunez-Iglesias", email = "juan.nunez-iglesias@monash.edu" }, { name = "Martin Durant", email = "mdurant@anaconda.com" }, { name = "Norman Rzepka" }, { name = "Ryan Abernathey" }, { name = "David Stansby" }, { name = "Tom Augspurger", email = "tom.w.augspurger@gmail.com" }, { name = "Deepak Cherian" } ] requires-python = ">=3.11" # If you add a new dependency here, please also add it to .pre-commit-config.yml dependencies = [ 'packaging>=22.0', 'numpy>=1.25', 'numcodecs[crc32c]>=0.14', 'typing_extensions>=4.9', 'donfig>=0.8', ] dynamic = [ "version", ] classifiers = [ 'Development Status :: 6 - Mature', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', 'Operating System :: Unix', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', ] license = {text = "MIT License"} keywords = ["Python", "compressed", "ndimensional-arrays", "zarr"] [project.optional-dependencies] # User extras remote = [ "fsspec>=2023.10.0", ] gpu = [ "cupy-cuda12x", ] # Development extras test = [ "coverage", "pytest", "pytest-cov", 'zarr[remote]', "botocore", "s3fs", "moto[s3,server]", "pytest-asyncio", "pytest-accept", "requests", "rich", "mypy", "hypothesis", "universal-pathlib", ] optional = ["rich", "universal-pathlib"] docs = [ # Doc building 'sphinx==8.1.3', 'sphinx-autobuild>=2021.3.14', 'sphinx-autoapi==3.4.0', 'sphinx_design', 'sphinx-issues', 'sphinx-copybutton', 'sphinx-reredirects', 'pydata-sphinx-theme', 'numpydoc', # Changelog generation 'towncrier', # Optional dependencies to run examples 'numcodecs[msgpack]', 'rich', 's3fs', 'astroid<4' ] [project.urls] "Bug Tracker" = "https://github.com/zarr-developers/zarr-python/issues" Changelog = "https://zarr.readthedocs.io/en/stable/release.html" Discussions = "https://github.com/zarr-developers/zarr-python/discussions" Documentation = "https://zarr.readthedocs.io/" Homepage = "https://github.com/zarr-developers/zarr-python" [tool.coverage.report] exclude_lines = [ "pragma: no cover", "if TYPE_CHECKING:", "pragma: ${PY_MAJOR_VERSION} no cover", '.*\.\.\.' # Ignore "..." lines ] [tool.coverage.run] omit = [ "bench/compress_normal.py", ] [tool.hatch] version.source = "vcs" build.hooks.vcs.version-file = "src/zarr/_version.py" [tool.hatch.envs.test] dependencies = [ "numpy~={matrix:numpy}", "universal_pathlib", ] features = ["test"] [[tool.hatch.envs.test.matrix]] python = ["3.11", "3.12", "3.13"] numpy = ["1.25", "2.1"] version = ["minimal"] [[tool.hatch.envs.test.matrix]] python = ["3.11", "3.12", "3.13"] numpy = ["1.25", "2.1"] features = ["optional"] [[tool.hatch.envs.test.matrix]] python = ["3.11", "3.12", "3.13"] numpy = ["1.25", "2.1"] features = ["gpu"] [tool.hatch.envs.test.scripts] run-coverage = "pytest --cov-config=pyproject.toml --cov=pkg --cov-report xml --cov=src --junitxml=junit.xml -o junit_family=legacy" run-coverage-gpu = "pip install cupy-cuda12x && pytest -m gpu --cov-config=pyproject.toml --cov=pkg --cov-report xml --cov=src --junitxml=junit.xml -o junit_family=legacy" run-coverage-html = "pytest --cov-config=pyproject.toml --cov=pkg --cov-report html --cov=src" run = "run-coverage --no-cov" run-pytest = "run" run-verbose = "run-coverage --verbose" run-mypy = "mypy src" run-hypothesis = "run-coverage --hypothesis-profile ci --run-slow-hypothesis tests/test_properties.py tests/test_store/test_stateful*" list-env = "pip list" [tool.hatch.envs.doctest] features = ["test", "optional"] description = "Test environment for doctests" [tool.hatch.envs.doctest.scripts] run = "rm -r data/; pytest docs/user-guide --doctest-glob='*.rst'" fix = "rm -r data/; pytest docs/user-guide --doctest-glob='*.rst' --accept" list-env = "pip list" [tool.hatch.envs.gputest] dependencies = [ "numpy~={matrix:numpy}", "universal_pathlib", ] features = ["test", "gpu"] [[tool.hatch.envs.gputest.matrix]] python = ["3.11", "3.12", "3.13"] numpy = ["1.25", "2.1"] version = ["minimal"] [tool.hatch.envs.gputest.scripts] run-coverage = "pytest -m gpu --cov-config=pyproject.toml --cov=pkg --cov-report xml --cov=src --junitxml=junit.xml -o junit_family=legacy" run = "run-coverage --no-cov" run-verbose = "run-coverage --verbose" run-mypy = "mypy src" run-hypothesis = "pytest --hypothesis-profile ci tests/test_properties.py tests/test_store/test_stateful*" list-env = "pip list" [tool.hatch.envs.docs] features = ['docs'] [tool.hatch.envs.docs.scripts] build = "cd docs && make html" serve = "sphinx-autobuild docs docs/_build --host 0.0.0.0" [tool.hatch.envs.upstream] python = "3.13" dependencies = [ 'packaging @ git+https://github.com/pypa/packaging', 'numpy', # from scientific-python-nightly-wheels 'numcodecs @ git+https://github.com/zarr-developers/numcodecs', 'fsspec @ git+https://github.com/fsspec/filesystem_spec', 's3fs @ git+https://github.com/fsspec/s3fs', 'universal_pathlib @ git+https://github.com/fsspec/universal_pathlib', 'typing_extensions @ git+https://github.com/python/typing_extensions', 'donfig @ git+https://github.com/pytroll/donfig', # test deps 'zarr[test]', ] [tool.hatch.envs.upstream.env-vars] PIP_INDEX_URL = "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple/" PIP_EXTRA_INDEX_URL = "https://pypi.org/simple/" PIP_PRE = "1" [tool.hatch.envs.upstream.scripts] run = "pytest --verbose" run-mypy = "mypy src" run-hypothesis = "pytest --hypothesis-profile ci tests/test_properties.py tests/test_store/test_stateful*" run-coverage = "pytest --cov-config=pyproject.toml --cov=pkg --cov-report xml --cov=src --junitxml=junit.xml -o junit_family=legacy" run-coverage-gpu = "pip install cupy-cuda12x && pytest -m gpu --cov-config=pyproject.toml --cov=pkg --cov-report xml --cov=src --junitxml=junit.xml -o junit_family=legacy" run-coverage-html = "pytest --cov-config=pyproject.toml --cov=pkg --cov-report html --cov=src" list-env = "pip list" [tool.hatch.envs.min_deps] description = """Test environment for minimum supported dependencies See Spec 0000 for details and drop schedule: https://scientific-python.org/specs/spec-0000/ """ python = "3.11" dependencies = [ 'zarr[remote]', 'packaging==22.*', 'numpy==1.25.*', 'numcodecs==0.14.*', # 0.14 needed for zarr3 codecs 'fsspec==2023.10.0', 's3fs==2023.10.0', 'universal_pathlib==0.0.22', 'typing_extensions==4.9.*', 'donfig==0.8.*', # test deps 'zarr[test]', ] [tool.hatch.envs.min_deps.scripts] run = "pytest --verbose" run-hypothesis = "pytest --hypothesis-profile ci tests/test_properties.py tests/test_store/test_stateful*" list-env = "pip list" run-coverage = "pytest --cov-config=pyproject.toml --cov=pkg --cov-report xml --cov=src --junitxml=junit.xml -o junit_family=legacy" run-coverage-gpu = "pip install cupy-cuda12x && pytest -m gpu --cov-config=pyproject.toml --cov=pkg --cov-report xml --cov=src --junitxml=junit.xml -o junit_family=legacy" run-coverage-html = "pytest --cov-config=pyproject.toml --cov=pkg --cov-report html --cov=src" [tool.ruff] line-length = 100 force-exclude = true extend-exclude = [ ".bzr", ".direnv", ".eggs", ".git", ".mypy_cache", ".nox", ".pants.d", ".ruff_cache", ".venv", "__pypackages__", "_build", "buck-out", "build", "dist", "notebooks", # temporary, until we achieve compatibility with ruff ≥ 0.6 "venv", "docs", "src/zarr/v2/", "tests/v2/", ] [tool.ruff.lint] extend-select = [ "ANN", # flake8-annotations "B", # flake8-bugbear "EXE", # flake8-executable "C4", # flake8-comprehensions "FA", # flake8-future-annotations "FLY", # flynt "FURB", # refurb "G", # flake8-logging-format "I", # isort "ISC", # flake8-implicit-str-concat "LOG", # flake8-logging "PERF", # Perflint "PIE", # flake8-pie "PGH", # pygrep-hooks "PT", # flake8-pytest-style "PYI", # flake8-pyi "RET", # flake8-return "RSE", # flake8-raise "RUF", "SIM", # flake8-simplify "SLOT", # flake8-slots "TCH", # flake8-type-checking "TRY", # tryceratops "UP", # pyupgrade "W", # pycodestyle warnings ] ignore = [ "ANN401", "PT011", # TODO: apply this rule "PT012", # TODO: apply this rule "RET505", "RET506", "RUF005", "SIM108", "TRY003", "UP038", # https://github.com/astral-sh/ruff/issues/7871 # https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules "W191", "E111", "E114", "E117", "D206", "D300", "Q000", "Q001", "Q002", "Q003", "COM812", "COM819", ] [tool.ruff.lint.extend-per-file-ignores] "tests/**" = ["ANN001", "ANN201", "RUF029", "SIM117", "SIM300"] [tool.mypy] python_version = "3.11" ignore_missing_imports = true namespace_packages = false strict = true warn_unreachable = true enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] [[tool.mypy.overrides]] module = [ "zarr.v2.*", ] ignore_errors = true [[tool.mypy.overrides]] module = [ "zarr.testing.stateful", # lots of hypothesis decorator errors "tests.package_with_entrypoint.*", "tests.test_codecs.test_codecs", "tests.test_codecs.test_transpose", "tests.test_metadata.*", "tests.test_store.*", "tests.test_config", "tests.test_group", "tests.test_indexing", "tests.test_properties", "tests.test_sync", "tests.test_v2", ] ignore_errors = true [tool.pytest.ini_options] minversion = "7" testpaths = ["tests", "docs/user-guide"] log_cli_level = "INFO" xfail_strict = true asyncio_mode = "auto" doctest_optionflags = [ "NORMALIZE_WHITESPACE", "ELLIPSIS", "IGNORE_EXCEPTION_DETAIL", ] addopts = [ "--durations=10", "-ra", "--strict-config", "--strict-markers", ] filterwarnings = [ "error:::zarr.*", "ignore:PY_SSIZE_T_CLEAN will be required.*:DeprecationWarning", "ignore:The loop argument is deprecated since Python 3.8.*:DeprecationWarning", "ignore:Creating a zarr.buffer.gpu.*:UserWarning", "ignore:Duplicate name:UserWarning", # from ZipFile "ignore:.*is currently not part in the Zarr format 3 specification.*:UserWarning", ] markers = [ "gpu: mark a test as requiring CuPy and GPU", "slow_hypothesis: slow hypothesis tests", ] [tool.repo-review] ignore = [ "PC111", # fix Python code in documentation - enable later "PC180", # for JavaScript - not interested ] [tool.numpydoc_validation] # See https://numpydoc.readthedocs.io/en/latest/validation.html#built-in-validation-checks for list of checks checks = [ "GL06", "GL07", # Currently broken; see https://github.com/numpy/numpydoc/issues/573 # "GL09", "GL10", "SS02", "SS04", "PR02", "PR03", "PR05", "PR06", ] [tool.towncrier] directory = 'changes' filename = "docs/release-notes.rst" underlines = ["-", "~", "^"] issue_format = ":issue:`{issue}`" [tool.codespell] ignore-words-list = "astroid" zarr-python-3.0.6/src/000077500000000000000000000000001476711733500146225ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/000077500000000000000000000000001476711733500156005ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/__init__.py000066400000000000000000000024171476711733500177150ustar00rootroot00000000000000from zarr._version import version as __version__ from zarr.api.synchronous import ( array, consolidate_metadata, copy, copy_all, copy_store, create, create_array, create_group, create_hierarchy, empty, empty_like, full, full_like, group, load, ones, ones_like, open, open_array, open_consolidated, open_group, open_like, save, save_array, save_group, tree, zeros, zeros_like, ) from zarr.core.array import Array, AsyncArray from zarr.core.config import config from zarr.core.group import AsyncGroup, Group # in case setuptools scm screw up and find version to be 0.0.0 assert not __version__.startswith("0.0.0") __all__ = [ "Array", "AsyncArray", "AsyncGroup", "Group", "__version__", "array", "config", "consolidate_metadata", "copy", "copy_all", "copy_store", "create", "create_array", "create_group", "create_hierarchy", "empty", "empty_like", "full", "full_like", "group", "load", "ones", "ones_like", "open", "open_array", "open_consolidated", "open_group", "open_like", "save", "save_array", "save_group", "tree", "zeros", "zeros_like", ] zarr-python-3.0.6/src/zarr/_compat.py000066400000000000000000000044441476711733500176020ustar00rootroot00000000000000import warnings from collections.abc import Callable from functools import wraps from inspect import Parameter, signature from typing import Any, TypeVar T = TypeVar("T") # Based off https://github.com/scikit-learn/scikit-learn/blob/e87b32a81c70abed8f2e97483758eb64df8255e9/sklearn/utils/validation.py#L63 def _deprecate_positional_args( func: Callable[..., T] | None = None, *, version: str = "3.1.0" ) -> Callable[..., T]: """Decorator for methods that issues warnings for positional arguments. Using the keyword-only argument syntax in pep 3102, arguments after the * will issue a warning when passed as a positional argument. Parameters ---------- func : callable, default=None Function to check arguments on. version : callable, default="3.1.0" The version when positional arguments will result in error. """ def _inner_deprecate_positional_args(f: Callable[..., T]) -> Callable[..., T]: sig = signature(f) kwonly_args = [] all_args = [] for name, param in sig.parameters.items(): if param.kind == Parameter.POSITIONAL_OR_KEYWORD: all_args.append(name) elif param.kind == Parameter.KEYWORD_ONLY: kwonly_args.append(name) @wraps(f) def inner_f(*args: Any, **kwargs: Any) -> T: extra_args = len(args) - len(all_args) if extra_args <= 0: return f(*args, **kwargs) # extra_args > 0 args_msg = [ f"{name}={arg}" for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:], strict=False) ] formatted_args_msg = ", ".join(args_msg) warnings.warn( ( f"Pass {formatted_args_msg} as keyword args. From version " f"{version} passing these as positional arguments " "will result in an error" ), FutureWarning, stacklevel=2, ) kwargs.update(zip(sig.parameters, args, strict=False)) return f(**kwargs) return inner_f if func is not None: return _inner_deprecate_positional_args(func) return _inner_deprecate_positional_args # type: ignore[return-value] zarr-python-3.0.6/src/zarr/abc/000077500000000000000000000000001476711733500163255ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/abc/__init__.py000066400000000000000000000000001476711733500204240ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/abc/codec.py000066400000000000000000000327641476711733500177700ustar00rootroot00000000000000from __future__ import annotations from abc import abstractmethod from typing import TYPE_CHECKING, Any, Generic, TypeVar from zarr.abc.metadata import Metadata from zarr.core.buffer import Buffer, NDBuffer from zarr.core.common import ChunkCoords, concurrent_map from zarr.core.config import config if TYPE_CHECKING: from collections.abc import Awaitable, Callable, Iterable from typing import Self import numpy as np from zarr.abc.store import ByteGetter, ByteSetter from zarr.core.array_spec import ArraySpec from zarr.core.chunk_grids import ChunkGrid from zarr.core.indexing import SelectorTuple __all__ = [ "ArrayArrayCodec", "ArrayBytesCodec", "ArrayBytesCodecPartialDecodeMixin", "ArrayBytesCodecPartialEncodeMixin", "BaseCodec", "BytesBytesCodec", "CodecInput", "CodecOutput", "CodecPipeline", ] CodecInput = TypeVar("CodecInput", bound=NDBuffer | Buffer) CodecOutput = TypeVar("CodecOutput", bound=NDBuffer | Buffer) class BaseCodec(Metadata, Generic[CodecInput, CodecOutput]): """Generic base class for codecs. Codecs can be registered via zarr.codecs.registry. Warnings -------- This class is not intended to be directly, please use ArrayArrayCodec, ArrayBytesCodec or BytesBytesCodec for subclassing. """ is_fixed_size: bool @abstractmethod def compute_encoded_size(self, input_byte_length: int, chunk_spec: ArraySpec) -> int: """Given an input byte length, this method returns the output byte length. Raises a NotImplementedError for codecs with variable-sized outputs (e.g. compressors). Parameters ---------- input_byte_length : int chunk_spec : ArraySpec Returns ------- int """ ... def resolve_metadata(self, chunk_spec: ArraySpec) -> ArraySpec: """Computed the spec of the chunk after it has been encoded by the codec. This is important for codecs that change the shape, data type or fill value of a chunk. The spec will then be used for subsequent codecs in the pipeline. Parameters ---------- chunk_spec : ArraySpec Returns ------- ArraySpec """ return chunk_spec def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self: """Fills in codec configuration parameters that can be automatically inferred from the array metadata. Parameters ---------- array_spec : ArraySpec Returns ------- Self """ return self def validate(self, *, shape: ChunkCoords, dtype: np.dtype[Any], chunk_grid: ChunkGrid) -> None: """Validates that the codec configuration is compatible with the array metadata. Raises errors when the codec configuration is not compatible. Parameters ---------- shape : ChunkCoords The array shape dtype : np.dtype[Any] The array data type chunk_grid : ChunkGrid The array chunk grid """ async def _decode_single(self, chunk_data: CodecOutput, chunk_spec: ArraySpec) -> CodecInput: raise NotImplementedError async def decode( self, chunks_and_specs: Iterable[tuple[CodecOutput | None, ArraySpec]], ) -> Iterable[CodecInput | None]: """Decodes a batch of chunks. Chunks can be None in which case they are ignored by the codec. Parameters ---------- chunks_and_specs : Iterable[tuple[CodecOutput | None, ArraySpec]] Ordered set of encoded chunks with their accompanying chunk spec. Returns ------- Iterable[CodecInput | None] """ return await _batching_helper(self._decode_single, chunks_and_specs) async def _encode_single( self, chunk_data: CodecInput, chunk_spec: ArraySpec ) -> CodecOutput | None: raise NotImplementedError async def encode( self, chunks_and_specs: Iterable[tuple[CodecInput | None, ArraySpec]], ) -> Iterable[CodecOutput | None]: """Encodes a batch of chunks. Chunks can be None in which case they are ignored by the codec. Parameters ---------- chunks_and_specs : Iterable[tuple[CodecInput | None, ArraySpec]] Ordered set of to-be-encoded chunks with their accompanying chunk spec. Returns ------- Iterable[CodecOutput | None] """ return await _batching_helper(self._encode_single, chunks_and_specs) class ArrayArrayCodec(BaseCodec[NDBuffer, NDBuffer]): """Base class for array-to-array codecs.""" class ArrayBytesCodec(BaseCodec[NDBuffer, Buffer]): """Base class for array-to-bytes codecs.""" class BytesBytesCodec(BaseCodec[Buffer, Buffer]): """Base class for bytes-to-bytes codecs.""" Codec = ArrayArrayCodec | ArrayBytesCodec | BytesBytesCodec class ArrayBytesCodecPartialDecodeMixin: """Mixin for array-to-bytes codecs that implement partial decoding.""" async def _decode_partial_single( self, byte_getter: ByteGetter, selection: SelectorTuple, chunk_spec: ArraySpec ) -> NDBuffer | None: raise NotImplementedError async def decode_partial( self, batch_info: Iterable[tuple[ByteGetter, SelectorTuple, ArraySpec]], ) -> Iterable[NDBuffer | None]: """Partially decodes a batch of chunks. This method determines parts of a chunk from the slice selection, fetches these parts from the store (via ByteGetter) and decodes them. Parameters ---------- batch_info : Iterable[tuple[ByteGetter, SelectorTuple, ArraySpec]] Ordered set of information about slices of encoded chunks. The slice selection determines which parts of the chunk will be fetched. The ByteGetter is used to fetch the necessary bytes. The chunk spec contains information about the construction of an array from the bytes. Returns ------- Iterable[NDBuffer | None] """ return await concurrent_map( list(batch_info), self._decode_partial_single, config.get("async.concurrency"), ) class ArrayBytesCodecPartialEncodeMixin: """Mixin for array-to-bytes codecs that implement partial encoding.""" async def _encode_partial_single( self, byte_setter: ByteSetter, chunk_array: NDBuffer, selection: SelectorTuple, chunk_spec: ArraySpec, ) -> None: raise NotImplementedError async def encode_partial( self, batch_info: Iterable[tuple[ByteSetter, NDBuffer, SelectorTuple, ArraySpec]], ) -> None: """Partially encodes a batch of chunks. This method determines parts of a chunk from the slice selection, encodes them and writes these parts to the store (via ByteSetter). If merging with existing chunk data in the store is necessary, this method will read from the store first and perform the merge. Parameters ---------- batch_info : Iterable[tuple[ByteSetter, NDBuffer, SelectorTuple, ArraySpec]] Ordered set of information about slices of to-be-encoded chunks. The slice selection determines which parts of the chunk will be encoded. The ByteSetter is used to write the necessary bytes and fetch bytes for existing chunk data. The chunk spec contains information about the chunk. """ await concurrent_map( list(batch_info), self._encode_partial_single, config.get("async.concurrency"), ) class CodecPipeline: """Base class for implementing CodecPipeline. A CodecPipeline implements the read and write paths for chunk data. On the read path, it is responsible for fetching chunks from a store (via ByteGetter), decoding them and assembling an output array. On the write path, it encodes the chunks and writes them to a store (via ByteSetter).""" @abstractmethod def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self: """Fills in codec configuration parameters that can be automatically inferred from the array metadata. Parameters ---------- array_spec : ArraySpec Returns ------- Self """ ... @classmethod @abstractmethod def from_codecs(cls, codecs: Iterable[Codec]) -> Self: """Creates a codec pipeline from an iterable of codecs. Parameters ---------- codecs : Iterable[Codec] Returns ------- Self """ ... @property @abstractmethod def supports_partial_decode(self) -> bool: ... @property @abstractmethod def supports_partial_encode(self) -> bool: ... @abstractmethod def validate(self, *, shape: ChunkCoords, dtype: np.dtype[Any], chunk_grid: ChunkGrid) -> None: """Validates that all codec configurations are compatible with the array metadata. Raises errors when a codec configuration is not compatible. Parameters ---------- shape : ChunkCoords The array shape dtype : np.dtype[Any] The array data type chunk_grid : ChunkGrid The array chunk grid """ ... @abstractmethod def compute_encoded_size(self, byte_length: int, array_spec: ArraySpec) -> int: """Given an input byte length, this method returns the output byte length. Raises a NotImplementedError for codecs with variable-sized outputs (e.g. compressors). Parameters ---------- byte_length : int array_spec : ArraySpec Returns ------- int """ ... @abstractmethod async def decode( self, chunk_bytes_and_specs: Iterable[tuple[Buffer | None, ArraySpec]], ) -> Iterable[NDBuffer | None]: """Decodes a batch of chunks. Chunks can be None in which case they are ignored by the codec. Parameters ---------- chunk_bytes_and_specs : Iterable[tuple[Buffer | None, ArraySpec]] Ordered set of encoded chunks with their accompanying chunk spec. Returns ------- Iterable[NDBuffer | None] """ ... @abstractmethod async def encode( self, chunk_arrays_and_specs: Iterable[tuple[NDBuffer | None, ArraySpec]], ) -> Iterable[Buffer | None]: """Encodes a batch of chunks. Chunks can be None in which case they are ignored by the codec. Parameters ---------- chunk_arrays_and_specs : Iterable[tuple[NDBuffer | None, ArraySpec]] Ordered set of to-be-encoded chunks with their accompanying chunk spec. Returns ------- Iterable[Buffer | None] """ ... @abstractmethod async def read( self, batch_info: Iterable[tuple[ByteGetter, ArraySpec, SelectorTuple, SelectorTuple, bool]], out: NDBuffer, drop_axes: tuple[int, ...] = (), ) -> None: """Reads chunk data from the store, decodes it and writes it into an output array. Partial decoding may be utilized if the codecs and stores support it. Parameters ---------- batch_info : Iterable[tuple[ByteGetter, ArraySpec, SelectorTuple, SelectorTuple]] Ordered set of information about the chunks. The first slice selection determines which parts of the chunk will be fetched. The second slice selection determines where in the output array the chunk data will be written. The ByteGetter is used to fetch the necessary bytes. The chunk spec contains information about the construction of an array from the bytes. out : NDBuffer """ ... @abstractmethod async def write( self, batch_info: Iterable[tuple[ByteSetter, ArraySpec, SelectorTuple, SelectorTuple, bool]], value: NDBuffer, drop_axes: tuple[int, ...] = (), ) -> None: """Encodes chunk data and writes it to the store. Merges with existing chunk data by reading first, if necessary. Partial encoding may be utilized if the codecs and stores support it. Parameters ---------- batch_info : Iterable[tuple[ByteSetter, ArraySpec, SelectorTuple, SelectorTuple]] Ordered set of information about the chunks. The first slice selection determines which parts of the chunk will be encoded. The second slice selection determines where in the value array the chunk data is located. The ByteSetter is used to fetch and write the necessary bytes. The chunk spec contains information about the chunk. value : NDBuffer """ ... async def _batching_helper( func: Callable[[CodecInput, ArraySpec], Awaitable[CodecOutput | None]], batch_info: Iterable[tuple[CodecInput | None, ArraySpec]], ) -> list[CodecOutput | None]: return await concurrent_map( list(batch_info), _noop_for_none(func), config.get("async.concurrency"), ) def _noop_for_none( func: Callable[[CodecInput, ArraySpec], Awaitable[CodecOutput | None]], ) -> Callable[[CodecInput | None, ArraySpec], Awaitable[CodecOutput | None]]: async def wrap(chunk: CodecInput | None, chunk_spec: ArraySpec) -> CodecOutput | None: if chunk is None: return None return await func(chunk, chunk_spec) return wrap zarr-python-3.0.6/src/zarr/abc/metadata.py000066400000000000000000000025751476711733500204700ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Sequence from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Self from zarr.core.common import JSON from dataclasses import dataclass, fields __all__ = ["Metadata"] @dataclass(frozen=True) class Metadata: def to_dict(self) -> dict[str, JSON]: """ Recursively serialize this model to a dictionary. This method inspects the fields of self and calls `x.to_dict()` for any fields that are instances of `Metadata`. Sequences of `Metadata` are similarly recursed into, and the output of that recursion is collected in a list. """ out_dict = {} for field in fields(self): key = field.name value = getattr(self, key) if isinstance(value, Metadata): out_dict[field.name] = getattr(self, field.name).to_dict() elif isinstance(value, str): out_dict[key] = value elif isinstance(value, Sequence): out_dict[key] = tuple(v.to_dict() if isinstance(v, Metadata) else v for v in value) else: out_dict[key] = value return out_dict @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: """ Create an instance of the model from a dictionary """ return cls(**data) zarr-python-3.0.6/src/zarr/abc/store.py000066400000000000000000000346161476711733500200450ustar00rootroot00000000000000from __future__ import annotations from abc import ABC, abstractmethod from asyncio import gather from dataclasses import dataclass from itertools import starmap from typing import TYPE_CHECKING, Protocol, runtime_checkable from zarr.core.buffer.core import default_buffer_prototype from zarr.core.common import concurrent_map from zarr.core.config import config if TYPE_CHECKING: from collections.abc import AsyncGenerator, AsyncIterator, Iterable from types import TracebackType from typing import Any, Self, TypeAlias from zarr.core.buffer import Buffer, BufferPrototype from zarr.core.common import BytesLike __all__ = ["ByteGetter", "ByteSetter", "Store", "set_or_delete"] @dataclass class RangeByteRequest: """Request a specific byte range""" start: int """The start of the byte range request (inclusive).""" end: int """The end of the byte range request (exclusive).""" @dataclass class OffsetByteRequest: """Request all bytes starting from a given byte offset""" offset: int """The byte offset for the offset range request.""" @dataclass class SuffixByteRequest: """Request up to the last `n` bytes""" suffix: int """The number of bytes from the suffix to request.""" ByteRequest: TypeAlias = RangeByteRequest | OffsetByteRequest | SuffixByteRequest class Store(ABC): """ Abstract base class for Zarr stores. """ _read_only: bool _is_open: bool def __init__(self, *, read_only: bool = False) -> None: self._is_open = False self._read_only = read_only @classmethod async def open(cls, *args: Any, **kwargs: Any) -> Self: """ Create and open the store. Parameters ---------- *args : Any Positional arguments to pass to the store constructor. **kwargs : Any Keyword arguments to pass to the store constructor. Returns ------- Store The opened store instance. """ store = cls(*args, **kwargs) await store._open() return store def __enter__(self) -> Self: """Enter a context manager that will close the store upon exiting.""" return self def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: """Close the store.""" self.close() async def _open(self) -> None: """ Open the store. Raises ------ ValueError If the store is already open. """ if self._is_open: raise ValueError("store is already open") self._is_open = True async def _ensure_open(self) -> None: """Open the store if it is not already open.""" if not self._is_open: await self._open() async def is_empty(self, prefix: str) -> bool: """ Check if the directory is empty. Parameters ---------- prefix : str Prefix of keys to check. Returns ------- bool True if the store is empty, False otherwise. """ if not self.supports_listing: raise NotImplementedError if prefix != "" and not prefix.endswith("/"): prefix += "/" async for _ in self.list_prefix(prefix): return False return True async def clear(self) -> None: """ Clear the store. Remove all keys and values from the store. """ if not self.supports_deletes: raise NotImplementedError if not self.supports_listing: raise NotImplementedError self._check_writable() await self.delete_dir("") @property def read_only(self) -> bool: """Is the store read-only?""" return self._read_only def _check_writable(self) -> None: """Raise an exception if the store is not writable.""" if self.read_only: raise ValueError("store was opened in read-only mode and does not support writing") @abstractmethod def __eq__(self, value: object) -> bool: """Equality comparison.""" ... @abstractmethod async def get( self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None, ) -> Buffer | None: """Retrieve the value associated with a given key. Parameters ---------- key : str prototype : BufferPrototype The prototype of the output buffer. Stores may support a default buffer prototype. byte_range : ByteRequest, optional ByteRequest may be one of the following. If not provided, all data associated with the key is retrieved. - RangeByteRequest(int, int): Request a specific range of bytes in the form (start, end). The end is exclusive. If the given range is zero-length or starts after the end of the object, an error will be returned. Additionally, if the range ends after the end of the object, the entire remainder of the object will be returned. Otherwise, the exact requested range will be returned. - OffsetByteRequest(int): Request all bytes starting from a given byte offset. This is equivalent to bytes={int}- as an HTTP header. - SuffixByteRequest(int): Request the last int bytes. Note that here, int is the size of the request, not the byte offset. This is equivalent to bytes=-{int} as an HTTP header. Returns ------- Buffer """ ... @abstractmethod async def get_partial_values( self, prototype: BufferPrototype, key_ranges: Iterable[tuple[str, ByteRequest | None]], ) -> list[Buffer | None]: """Retrieve possibly partial values from given key_ranges. Parameters ---------- prototype : BufferPrototype The prototype of the output buffer. Stores may support a default buffer prototype. key_ranges : Iterable[tuple[str, tuple[int | None, int | None]]] Ordered set of key, range pairs, a key may occur multiple times with different ranges Returns ------- list of values, in the order of the key_ranges, may contain null/none for missing keys """ ... @abstractmethod async def exists(self, key: str) -> bool: """Check if a key exists in the store. Parameters ---------- key : str Returns ------- bool """ ... @property @abstractmethod def supports_writes(self) -> bool: """Does the store support writes?""" ... @abstractmethod async def set(self, key: str, value: Buffer) -> None: """Store a (key, value) pair. Parameters ---------- key : str value : Buffer """ ... async def set_if_not_exists(self, key: str, value: Buffer) -> None: """ Store a key to ``value`` if the key is not already present. Parameters ---------- key : str value : Buffer """ # Note for implementers: the default implementation provided here # is not safe for concurrent writers. There's a race condition between # the `exists` check and the `set` where another writer could set some # value at `key` or delete `key`. if not await self.exists(key): await self.set(key, value) async def _set_many(self, values: Iterable[tuple[str, Buffer]]) -> None: """ Insert multiple (key, value) pairs into storage. """ await gather(*starmap(self.set, values)) @property @abstractmethod def supports_deletes(self) -> bool: """Does the store support deletes?""" ... @abstractmethod async def delete(self, key: str) -> None: """Remove a key from the store Parameters ---------- key : str """ ... @property @abstractmethod def supports_partial_writes(self) -> bool: """Does the store support partial writes?""" ... @abstractmethod async def set_partial_values( self, key_start_values: Iterable[tuple[str, int, BytesLike]] ) -> None: """Store values at a given key, starting at byte range_start. Parameters ---------- key_start_values : list[tuple[str, int, BytesLike]] set of key, range_start, values triples, a key may occur multiple times with different range_starts, range_starts (considering the length of the respective values) must not specify overlapping ranges for the same key """ ... @property @abstractmethod def supports_listing(self) -> bool: """Does the store support listing?""" ... @abstractmethod def list(self) -> AsyncIterator[str]: """Retrieve all keys in the store. Returns ------- AsyncIterator[str] """ # This method should be async, like overridden methods in child classes. # However, that's not straightforward: # https://stackoverflow.com/questions/68905848 @abstractmethod def list_prefix(self, prefix: str) -> AsyncIterator[str]: """ Retrieve all keys in the store that begin with a given prefix. Keys are returned relative to the root of the store. Parameters ---------- prefix : str Returns ------- AsyncIterator[str] """ # This method should be async, like overridden methods in child classes. # However, that's not straightforward: # https://stackoverflow.com/questions/68905848 @abstractmethod def list_dir(self, prefix: str) -> AsyncIterator[str]: """ Retrieve all keys and prefixes with a given prefix and which do not contain the character “/” after the given prefix. Parameters ---------- prefix : str Returns ------- AsyncIterator[str] """ # This method should be async, like overridden methods in child classes. # However, that's not straightforward: # https://stackoverflow.com/questions/68905848 async def delete_dir(self, prefix: str) -> None: """ Remove all keys and prefixes in the store that begin with a given prefix. """ if not self.supports_deletes: raise NotImplementedError if not self.supports_listing: raise NotImplementedError self._check_writable() if prefix != "" and not prefix.endswith("/"): prefix += "/" async for key in self.list_prefix(prefix): await self.delete(key) def close(self) -> None: """Close the store.""" self._is_open = False async def _get_many( self, requests: Iterable[tuple[str, BufferPrototype, ByteRequest | None]] ) -> AsyncGenerator[tuple[str, Buffer | None], None]: """ Retrieve a collection of objects from storage. In general this method does not guarantee that objects will be retrieved in the order in which they were requested, so this method yields tuple[str, Buffer | None] instead of just Buffer | None """ for req in requests: yield (req[0], await self.get(*req)) async def getsize(self, key: str) -> int: """ Return the size, in bytes, of a value in a Store. Parameters ---------- key : str Returns ------- nbytes : int The size of the value (in bytes). Raises ------ FileNotFoundError When the given key does not exist in the store. """ # Note to implementers: this default implementation is very inefficient since # it requires reading the entire object. Many systems will have ways to get the # size of an object without reading it. value = await self.get(key, prototype=default_buffer_prototype()) if value is None: raise FileNotFoundError(key) return len(value) async def getsize_prefix(self, prefix: str) -> int: """ Return the size, in bytes, of all values under a prefix. Parameters ---------- prefix : str The prefix of the directory to measure. Returns ------- nbytes : int The sum of the sizes of the values in the directory (in bytes). See Also -------- zarr.Array.nbytes_stored Store.getsize Notes ----- ``getsize_prefix`` is just provided as a potentially faster alternative to listing all the keys under a prefix calling :meth:`Store.getsize` on each. In general, ``prefix`` should be the path of an Array or Group in the Store. Implementations may differ on the behavior when some other ``prefix`` is provided. """ # TODO: Overlap listing keys with getsize calls. # Currently, we load the list of keys into memory and only then move # on to getting sizes. Ideally we would overlap those two, which should # improve tail latency and might reduce memory pressure (since not all keys # would be in memory at once). keys = [(x,) async for x in self.list_prefix(prefix)] limit = config.get("async.concurrency") sizes = await concurrent_map(keys, self.getsize, limit=limit) return sum(sizes) @runtime_checkable class ByteGetter(Protocol): async def get( self, prototype: BufferPrototype, byte_range: ByteRequest | None = None ) -> Buffer | None: ... @runtime_checkable class ByteSetter(Protocol): async def get( self, prototype: BufferPrototype, byte_range: ByteRequest | None = None ) -> Buffer | None: ... async def set(self, value: Buffer, byte_range: ByteRequest | None = None) -> None: ... async def delete(self) -> None: ... async def set_if_not_exists(self, default: Buffer) -> None: ... async def set_or_delete(byte_setter: ByteSetter, value: Buffer | None) -> None: """Set or delete a value in a byte setter Parameters ---------- byte_setter : ByteSetter value : Buffer | None Notes ----- If value is None, the key will be deleted. """ if value is None: await byte_setter.delete() else: await byte_setter.set(value) zarr-python-3.0.6/src/zarr/api/000077500000000000000000000000001476711733500163515ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/api/__init__.py000066400000000000000000000000001476711733500204500ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/api/asynchronous.py000066400000000000000000001345251476711733500214700ustar00rootroot00000000000000from __future__ import annotations import asyncio import dataclasses import warnings from typing import TYPE_CHECKING, Any, Literal, cast import numpy as np import numpy.typing as npt from typing_extensions import deprecated from zarr.core.array import Array, AsyncArray, create_array, get_array_metadata from zarr.core.array_spec import ArrayConfig, ArrayConfigLike, ArrayConfigParams from zarr.core.buffer import NDArrayLike from zarr.core.common import ( JSON, AccessModeLiteral, ChunkCoords, MemoryOrder, ZarrFormat, _default_zarr_format, _warn_order_kwarg, _warn_write_empty_chunks_kwarg, parse_dtype, ) from zarr.core.group import ( AsyncGroup, ConsolidatedMetadata, GroupMetadata, create_hierarchy, ) from zarr.core.metadata import ArrayMetadataDict, ArrayV2Metadata, ArrayV3Metadata from zarr.core.metadata.v2 import _default_compressor, _default_filters from zarr.errors import NodeTypeValidationError from zarr.storage._common import make_store_path if TYPE_CHECKING: from collections.abc import Iterable from zarr.abc.codec import Codec from zarr.core.chunk_key_encodings import ChunkKeyEncoding from zarr.storage import StoreLike # TODO: this type could use some more thought ArrayLike = AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | Array | npt.NDArray[Any] PathLike = str __all__ = [ "array", "consolidate_metadata", "copy", "copy_all", "copy_store", "create", "create_array", "create_hierarchy", "empty", "empty_like", "full", "full_like", "group", "load", "ones", "ones_like", "open", "open_array", "open_consolidated", "open_group", "open_like", "save", "save_array", "save_group", "tree", "zeros", "zeros_like", ] _READ_MODES: tuple[AccessModeLiteral, ...] = ("r", "r+", "a") _CREATE_MODES: tuple[AccessModeLiteral, ...] = ("a", "w", "w-") _OVERWRITE_MODES: tuple[AccessModeLiteral, ...] = ("a", "r+", "w") def _infer_overwrite(mode: AccessModeLiteral) -> bool: """ Check that an ``AccessModeLiteral`` is compatible with overwriting an existing Zarr node. """ return mode in _OVERWRITE_MODES def _get_shape_chunks(a: ArrayLike | Any) -> tuple[ChunkCoords | None, ChunkCoords | None]: """Helper function to get the shape and chunks from an array-like object""" shape = None chunks = None if hasattr(a, "shape") and isinstance(a.shape, tuple): shape = a.shape if hasattr(a, "chunks") and isinstance(a.chunks, tuple) and (len(a.chunks) == len(a.shape)): chunks = a.chunks elif hasattr(a, "chunklen"): # bcolz carray chunks = (a.chunklen,) + a.shape[1:] return shape, chunks def _like_args(a: ArrayLike, kwargs: dict[str, Any]) -> dict[str, Any]: """Set default values for shape and chunks if they are not present in the array-like object""" new = kwargs.copy() shape, chunks = _get_shape_chunks(a) if shape is not None: new["shape"] = shape if chunks is not None: new["chunks"] = chunks if hasattr(a, "dtype"): new["dtype"] = a.dtype if isinstance(a, AsyncArray): new["order"] = a.order if isinstance(a.metadata, ArrayV2Metadata): new["compressor"] = a.metadata.compressor new["filters"] = a.metadata.filters else: # TODO: Remove type: ignore statement when type inference improves. # mypy cannot correctly infer the type of a.metadata here for some reason. new["codecs"] = a.metadata.codecs # type: ignore[unreachable] else: # TODO: set default values compressor/codecs # to do this, we may need to evaluate if this is a v2 or v3 array # new["compressor"] = "default" pass return new def _handle_zarr_version_or_format( *, zarr_version: ZarrFormat | None, zarr_format: ZarrFormat | None ) -> ZarrFormat | None: """Handle the deprecated zarr_version kwarg and return zarr_format""" if zarr_format is not None and zarr_version is not None and zarr_format != zarr_version: raise ValueError( f"zarr_format {zarr_format} does not match zarr_version {zarr_version}, please only set one" ) if zarr_version is not None: warnings.warn( "zarr_version is deprecated, use zarr_format", DeprecationWarning, stacklevel=2 ) return zarr_version return zarr_format async def consolidate_metadata( store: StoreLike, path: str | None = None, zarr_format: ZarrFormat | None = None, ) -> AsyncGroup: """ Consolidate the metadata of all nodes in a hierarchy. Upon completion, the metadata of the root node in the Zarr hierarchy will be updated to include all the metadata of child nodes. Parameters ---------- store : StoreLike The store-like object whose metadata you wish to consolidate. path : str, optional A path to a group in the store to consolidate at. Only children below that group will be consolidated. By default, the root node is used so all the metadata in the store is consolidated. zarr_format : {2, 3, None}, optional The zarr format of the hierarchy. By default the zarr format is inferred. Returns ------- group: AsyncGroup The group, with the ``consolidated_metadata`` field set to include the metadata of each child node. """ store_path = await make_store_path(store, path=path) group = await AsyncGroup.open(store_path, zarr_format=zarr_format, use_consolidated=False) group.store_path.store._check_writable() members_metadata = {k: v.metadata async for k, v in group.members(max_depth=None)} # While consolidating, we want to be explicit about when child groups # are empty by inserting an empty dict for consolidated_metadata.metadata for k, v in members_metadata.items(): if isinstance(v, GroupMetadata) and v.consolidated_metadata is None: v = dataclasses.replace(v, consolidated_metadata=ConsolidatedMetadata(metadata={})) members_metadata[k] = v if any(m.zarr_format == 3 for m in members_metadata.values()): warnings.warn( "Consolidated metadata is currently not part in the Zarr format 3 specification. It " "may not be supported by other zarr implementations and may change in the future.", category=UserWarning, stacklevel=1, ) ConsolidatedMetadata._flat_to_nested(members_metadata) consolidated_metadata = ConsolidatedMetadata(metadata=members_metadata) metadata = dataclasses.replace(group.metadata, consolidated_metadata=consolidated_metadata) group = dataclasses.replace( group, metadata=metadata, ) await group._save_metadata() return group async def copy(*args: Any, **kwargs: Any) -> tuple[int, int, int]: raise NotImplementedError async def copy_all(*args: Any, **kwargs: Any) -> tuple[int, int, int]: raise NotImplementedError async def copy_store(*args: Any, **kwargs: Any) -> tuple[int, int, int]: raise NotImplementedError async def load( *, store: StoreLike, path: str | None = None, zarr_format: ZarrFormat | None = None, zarr_version: ZarrFormat | None = None, ) -> NDArrayLike | dict[str, NDArrayLike]: """Load data from an array or group into memory. Parameters ---------- store : Store or str Store or path to directory in file system or name of zip file. path : str or None, optional The path within the store from which to load. Returns ------- out If the path contains an array, out will be a numpy array. If the path contains a group, out will be a dict-like object where keys are array names and values are numpy arrays. See Also -------- save, savez Notes ----- If loading data from a group of arrays, data will not be immediately loaded into memory. Rather, arrays will be loaded into memory as they are requested. """ zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format) obj = await open(store=store, path=path, zarr_format=zarr_format) if isinstance(obj, AsyncArray): return await obj.getitem(slice(None)) else: raise NotImplementedError("loading groups not yet supported") async def open( *, store: StoreLike | None = None, mode: AccessModeLiteral = "a", zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, path: str | None = None, storage_options: dict[str, Any] | None = None, **kwargs: Any, # TODO: type kwargs as valid args to open_array ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | AsyncGroup: """Convenience function to open a group or array using file-mode-like semantics. Parameters ---------- store : Store or str, optional Store or path to directory in file system or name of zip file. mode : {'r', 'r+', 'a', 'w', 'w-'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist); 'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists). zarr_format : {2, 3, None}, optional The zarr format to use when saving. path : str or None, optional The path within the store to open. storage_options : dict If the store is backed by an fsspec-based implementation, then this dict will be passed to the Store constructor for that implementation. Ignored otherwise. **kwargs Additional parameters are passed through to :func:`zarr.creation.open_array` or :func:`zarr.hierarchy.open_group`. Returns ------- z : array or group Return type depends on what exists in the given store. """ zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format) store_path = await make_store_path(store, mode=mode, path=path, storage_options=storage_options) # TODO: the mode check below seems wrong! if "shape" not in kwargs and mode in {"a", "r", "r+", "w"}: try: metadata_dict = await get_array_metadata(store_path, zarr_format=zarr_format) # TODO: remove this cast when we fix typing for array metadata dicts _metadata_dict = cast(ArrayMetadataDict, metadata_dict) # for v2, the above would already have raised an exception if not an array zarr_format = _metadata_dict["zarr_format"] is_v3_array = zarr_format == 3 and _metadata_dict.get("node_type") == "array" if is_v3_array or zarr_format == 2: return AsyncArray(store_path=store_path, metadata=_metadata_dict) except (AssertionError, FileNotFoundError, NodeTypeValidationError): pass return await open_group(store=store_path, zarr_format=zarr_format, mode=mode, **kwargs) try: return await open_array(store=store_path, zarr_format=zarr_format, mode=mode, **kwargs) except (KeyError, NodeTypeValidationError): # KeyError for a missing key # NodeTypeValidationError for failing to parse node metadata as an array when it's # actually a group return await open_group(store=store_path, zarr_format=zarr_format, mode=mode, **kwargs) async def open_consolidated( *args: Any, use_consolidated: Literal[True] = True, **kwargs: Any ) -> AsyncGroup: """ Alias for :func:`open_group` with ``use_consolidated=True``. """ if use_consolidated is not True: raise TypeError( "'use_consolidated' must be 'True' in 'open_consolidated'. Use 'open' with " "'use_consolidated=False' to bypass consolidated metadata." ) return await open_group(*args, use_consolidated=use_consolidated, **kwargs) async def save( store: StoreLike, *args: NDArrayLike, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, path: str | None = None, **kwargs: Any, # TODO: type kwargs as valid args to save ) -> None: """Convenience function to save an array or group of arrays to the local file system. Parameters ---------- store : Store or str Store or path to directory in file system or name of zip file. *args : ndarray NumPy arrays with data to save. zarr_format : {2, 3, None}, optional The zarr format to use when saving. path : str or None, optional The path within the group where the arrays will be saved. **kwargs NumPy arrays with data to save. """ zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format) if len(args) == 0 and len(kwargs) == 0: raise ValueError("at least one array must be provided") if len(args) == 1 and len(kwargs) == 0: await save_array(store, args[0], zarr_format=zarr_format, path=path) else: await save_group(store, *args, zarr_format=zarr_format, path=path, **kwargs) async def save_array( store: StoreLike, arr: NDArrayLike, *, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, path: str | None = None, storage_options: dict[str, Any] | None = None, **kwargs: Any, # TODO: type kwargs as valid args to create ) -> None: """Convenience function to save a NumPy array to the local file system, following a similar API to the NumPy save() function. Parameters ---------- store : Store or str Store or path to directory in file system or name of zip file. arr : ndarray NumPy array with data to save. zarr_format : {2, 3, None}, optional The zarr format to use when saving (default is 3 if not specified). path : str or None, optional The path within the store where the array will be saved. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. **kwargs Passed through to :func:`create`, e.g., compressor. """ zarr_format = ( _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format) or _default_zarr_format() ) if not isinstance(arr, NDArrayLike): raise TypeError("arr argument must be numpy or other NDArrayLike array") mode = kwargs.pop("mode", "a") store_path = await make_store_path(store, path=path, mode=mode, storage_options=storage_options) if np.isscalar(arr): arr = np.array(arr) shape = arr.shape chunks = getattr(arr, "chunks", None) # for array-likes with chunks attribute overwrite = kwargs.pop("overwrite", None) or _infer_overwrite(mode) new = await AsyncArray._create( store_path, zarr_format=zarr_format, shape=shape, dtype=arr.dtype, chunks=chunks, overwrite=overwrite, **kwargs, ) await new.setitem(slice(None), arr) async def save_group( store: StoreLike, *args: NDArrayLike, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, path: str | None = None, storage_options: dict[str, Any] | None = None, **kwargs: NDArrayLike, ) -> None: """Convenience function to save several NumPy arrays to the local file system, following a similar API to the NumPy savez()/savez_compressed() functions. Parameters ---------- store : Store or str Store or path to directory in file system or name of zip file. *args : ndarray NumPy arrays with data to save. zarr_format : {2, 3, None}, optional The zarr format to use when saving. path : str or None, optional Path within the store where the group will be saved. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. **kwargs NumPy arrays with data to save. """ store_path = await make_store_path(store, path=path, mode="w", storage_options=storage_options) zarr_format = ( _handle_zarr_version_or_format( zarr_version=zarr_version, zarr_format=zarr_format, ) or _default_zarr_format() ) for arg in args: if not isinstance(arg, NDArrayLike): raise TypeError( "All arguments must be numpy or other NDArrayLike arrays (except store, path, storage_options, and zarr_format)" ) for k, v in kwargs.items(): if not isinstance(v, NDArrayLike): raise TypeError(f"Keyword argument '{k}' must be a numpy or other NDArrayLike array") if len(args) == 0 and len(kwargs) == 0: raise ValueError("at least one array must be provided") aws = [] for i, arr in enumerate(args): _path = f"{path}/arr_{i}" if path is not None else f"arr_{i}" aws.append( save_array( store_path, arr, zarr_format=zarr_format, path=_path, storage_options=storage_options, ) ) for k, arr in kwargs.items(): aws.append(save_array(store_path, arr, zarr_format=zarr_format, path=k)) await asyncio.gather(*aws) @deprecated("Use AsyncGroup.tree instead.") async def tree(grp: AsyncGroup, expand: bool | None = None, level: int | None = None) -> Any: """Provide a rich display of the hierarchy. .. deprecated:: 3.0.0 `zarr.tree()` is deprecated and will be removed in a future release. Use `group.tree()` instead. Parameters ---------- grp : Group Zarr or h5py group. expand : bool, optional Only relevant for HTML representation. If True, tree will be fully expanded. level : int, optional Maximum depth to descend into hierarchy. Returns ------- TreeRepr A pretty-printable object displaying the hierarchy. """ return await grp.tree(expand=expand, level=level) async def array( data: npt.ArrayLike, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array filled with `data`. Parameters ---------- data : array_like The data to fill the array with. **kwargs Passed through to :func:`create`. Returns ------- array : array The new array. """ # ensure data is array-like if not hasattr(data, "shape") or not hasattr(data, "dtype"): data = np.asanyarray(data) # setup dtype kw_dtype = kwargs.get("dtype") if kw_dtype is None: kwargs["dtype"] = data.dtype else: kwargs["dtype"] = kw_dtype # setup shape and chunks data_shape, data_chunks = _get_shape_chunks(data) kwargs["shape"] = data_shape kw_chunks = kwargs.get("chunks") if kw_chunks is None: kwargs["chunks"] = data_chunks else: kwargs["chunks"] = kw_chunks read_only = kwargs.pop("read_only", False) if read_only: raise ValueError("read_only=True is no longer supported when creating new arrays") # instantiate array z = await create(**kwargs) # fill with data await z.setitem(Ellipsis, data) return z async def group( *, # Note: this is a change from v2 store: StoreLike | None = None, overwrite: bool = False, chunk_store: StoreLike | None = None, # not used cache_attrs: bool | None = None, # not used, default changed synchronizer: Any | None = None, # not used path: str | None = None, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, meta_array: Any | None = None, # not used attributes: dict[str, JSON] | None = None, storage_options: dict[str, Any] | None = None, ) -> AsyncGroup: """Create a group. Parameters ---------- store : Store or str, optional Store or path to directory in file system. overwrite : bool, optional If True, delete any pre-existing data in `store` at `path` before creating the group. chunk_store : Store, optional Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata. cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. synchronizer : object, optional Array synchronizer. path : str, optional Group path within store. meta_array : array-like, optional An array instance to use for determining arrays to create and return to users. Use `numpy.empty(())` by default. zarr_format : {2, 3, None}, optional The zarr format to use when saving. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. Returns ------- g : group The new group. """ zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format) mode: AccessModeLiteral if overwrite: mode = "w" else: mode = "r+" store_path = await make_store_path(store, path=path, mode=mode, storage_options=storage_options) if chunk_store is not None: warnings.warn("chunk_store is not yet implemented", RuntimeWarning, stacklevel=2) if cache_attrs is not None: warnings.warn("cache_attrs is not yet implemented", RuntimeWarning, stacklevel=2) if synchronizer is not None: warnings.warn("synchronizer is not yet implemented", RuntimeWarning, stacklevel=2) if meta_array is not None: warnings.warn("meta_array is not yet implemented", RuntimeWarning, stacklevel=2) if attributes is None: attributes = {} try: return await AsyncGroup.open(store=store_path, zarr_format=zarr_format) except (KeyError, FileNotFoundError): _zarr_format = zarr_format or _default_zarr_format() return await AsyncGroup.from_store( store=store_path, zarr_format=_zarr_format, overwrite=overwrite, attributes=attributes, ) async def create_group( *, store: StoreLike, path: str | None = None, overwrite: bool = False, zarr_format: ZarrFormat | None = None, attributes: dict[str, Any] | None = None, storage_options: dict[str, Any] | None = None, ) -> AsyncGroup: """Create a group. Parameters ---------- store : Store or str Store or path to directory in file system. path : str, optional Group path within store. overwrite : bool, optional If True, pre-existing data at ``path`` will be deleted before creating the group. zarr_format : {2, 3, None}, optional The zarr format to use when saving. If no ``zarr_format`` is provided, the default format will be used. This default can be changed by modifying the value of ``default_zarr_format`` in :mod:`zarr.core.config`. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. Returns ------- AsyncGroup The new group. """ if zarr_format is None: zarr_format = _default_zarr_format() mode: Literal["a"] = "a" store_path = await make_store_path(store, path=path, mode=mode, storage_options=storage_options) return await AsyncGroup.from_store( store=store_path, zarr_format=zarr_format, overwrite=overwrite, attributes=attributes, ) async def open_group( store: StoreLike | None = None, *, # Note: this is a change from v2 mode: AccessModeLiteral = "a", cache_attrs: bool | None = None, # not used, default changed synchronizer: Any = None, # not used path: str | None = None, chunk_store: StoreLike | None = None, # not used storage_options: dict[str, Any] | None = None, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, meta_array: Any | None = None, # not used attributes: dict[str, JSON] | None = None, use_consolidated: bool | str | None = None, ) -> AsyncGroup: """Open a group using file-mode-like semantics. Parameters ---------- store : Store, str, or mapping, optional Store or path to directory in file system or name of zip file. Strings are interpreted as paths on the local file system and used as the ``root`` argument to :class:`zarr.storage.LocalStore`. Dictionaries are used as the ``store_dict`` argument in :class:`zarr.storage.MemoryStore``. By default (``store=None``) a new :class:`zarr.storage.MemoryStore` is created. mode : {'r', 'r+', 'a', 'w', 'w-'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist); 'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists). cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. synchronizer : object, optional Array synchronizer. path : str, optional Group path within store. chunk_store : Store or str, optional Store or path to directory in file system or name of zip file. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. meta_array : array-like, optional An array instance to use for determining arrays to create and return to users. Use `numpy.empty(())` by default. attributes : dict A dictionary of JSON-serializable values with user-defined attributes. use_consolidated : bool or str, default None Whether to use consolidated metadata. By default, consolidated metadata is used if it's present in the store (in the ``zarr.json`` for Zarr format 3 and in the ``.zmetadata`` file for Zarr format 2). To explicitly require consolidated metadata, set ``use_consolidated=True``, which will raise an exception if consolidated metadata is not found. To explicitly *not* use consolidated metadata, set ``use_consolidated=False``, which will fall back to using the regular, non consolidated metadata. Zarr format 2 allowed configuring the key storing the consolidated metadata (``.zmetadata`` by default). Specify the custom key as ``use_consolidated`` to load consolidated metadata from a non-default key. Returns ------- g : group The new group. """ zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format) if cache_attrs is not None: warnings.warn("cache_attrs is not yet implemented", RuntimeWarning, stacklevel=2) if synchronizer is not None: warnings.warn("synchronizer is not yet implemented", RuntimeWarning, stacklevel=2) if meta_array is not None: warnings.warn("meta_array is not yet implemented", RuntimeWarning, stacklevel=2) if chunk_store is not None: warnings.warn("chunk_store is not yet implemented", RuntimeWarning, stacklevel=2) store_path = await make_store_path(store, mode=mode, storage_options=storage_options, path=path) if attributes is None: attributes = {} try: if mode in _READ_MODES: return await AsyncGroup.open( store_path, zarr_format=zarr_format, use_consolidated=use_consolidated ) except (KeyError, FileNotFoundError): pass if mode in _CREATE_MODES: overwrite = _infer_overwrite(mode) _zarr_format = zarr_format or _default_zarr_format() return await AsyncGroup.from_store( store_path, zarr_format=_zarr_format, overwrite=overwrite, attributes=attributes, ) raise FileNotFoundError(f"Unable to find group: {store_path}") async def create( shape: ChunkCoords | int, *, # Note: this is a change from v2 chunks: ChunkCoords | int | None = None, # TODO: v2 allowed chunks=True dtype: npt.DTypeLike | None = None, compressor: dict[str, JSON] | None = None, # TODO: default and type change fill_value: Any | None = 0, # TODO: need type order: MemoryOrder | None = None, store: str | StoreLike | None = None, synchronizer: Any | None = None, overwrite: bool = False, path: PathLike | None = None, chunk_store: StoreLike | None = None, filters: list[dict[str, JSON]] | None = None, # TODO: type has changed cache_metadata: bool | None = None, cache_attrs: bool | None = None, read_only: bool | None = None, object_codec: Codec | None = None, # TODO: type has changed dimension_separator: Literal[".", "/"] | None = None, write_empty_chunks: bool | None = None, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, meta_array: Any | None = None, # TODO: need type attributes: dict[str, JSON] | None = None, # v3 only chunk_shape: ChunkCoords | int | None = None, chunk_key_encoding: ( ChunkKeyEncoding | tuple[Literal["default"], Literal[".", "/"]] | tuple[Literal["v2"], Literal[".", "/"]] | None ) = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, storage_options: dict[str, Any] | None = None, config: ArrayConfigLike | None = None, **kwargs: Any, ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional The shape of the array's chunks. Zarr format 2 only. Zarr format 3 arrays should use `chunk_shape` instead. If not specified, default values are guessed based on the shape and dtype. dtype : str or dtype, optional NumPy dtype. chunk_shape : int or tuple of ints, optional The shape of the Array's chunks (default is None). Zarr format 3 only. Zarr format 2 arrays should use `chunks` instead. chunk_key_encoding : ChunkKeyEncoding, optional A specification of how the chunk keys are represented in storage. Zarr format 3 only. Zarr format 2 arrays should use `dimension_separator` instead. Default is ``("default", "/")``. codecs : Sequence of Codecs or dicts, optional An iterable of Codec or dict serializations of Codecs. The elements of this collection specify the transformation from array values to stored bytes. Zarr format 3 only. Zarr format 2 arrays should use ``filters`` and ``compressor`` instead. If no codecs are provided, default codecs will be used: - For numeric arrays, the default is ``BytesCodec`` and ``ZstdCodec``. - For Unicode strings, the default is ``VLenUTF8Codec`` and ``ZstdCodec``. - For bytes or objects, the default is ``VLenBytesCodec`` and ``ZstdCodec``. These defaults can be changed by modifying the value of ``array.v3_default_filters``, ``array.v3_default_serializer`` and ``array.v3_default_compressors`` in :mod:`zarr.core.config`. compressor : Codec, optional Primary compressor to compress chunk data. Zarr format 2 only. Zarr format 3 arrays should use ``codecs`` instead. If neither ``compressor`` nor ``filters`` are provided, a default compressor will be used: - For numeric arrays, the default is ``ZstdCodec``. - For Unicode strings, the default is ``VLenUTF8Codec``. - For bytes or objects, the default is ``VLenBytesCodec``. These defaults can be changed by modifying the value of ``array.v2_default_compressor`` in :mod:`zarr.core.config`. fill_value : object Default value to use for uninitialized portions of the array. order : {'C', 'F'}, optional Deprecated in favor of the ``config`` keyword argument. Pass ``{'order': }`` to ``create`` instead of using this parameter. Memory layout to be used within each chunk. If not specified, the ``array.order`` parameter in the global config will be used. store : Store or str Store or path to directory in file system or name of zip file. synchronizer : object, optional Array synchronizer. overwrite : bool, optional If True, delete all pre-existing data in `store` at `path` before creating the array. path : str, optional Path under which array is stored. chunk_store : MutableMapping, optional Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata. filters : sequence of Codecs, optional Sequence of filters to use to encode chunk data prior to compression. Zarr format 2 only. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v2_default_filters`` in :mod:`zarr.core.config`. cache_metadata : bool, optional If True, array configuration metadata will be cached for the lifetime of the object. If False, array metadata will be reloaded prior to all data access and modification operations (may incur overhead depending on storage and data access pattern). cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. read_only : bool, optional True if array should be protected against modification. object_codec : Codec, optional A codec to encode object arrays, only needed if dtype=object. dimension_separator : {'.', '/'}, optional Separator placed between the dimensions of a chunk. Zarr format 2 only. Zarr format 3 arrays should use ``chunk_key_encoding`` instead. Default is ".". write_empty_chunks : bool, optional Deprecated in favor of the ``config`` keyword argument. Pass ``{'write_empty_chunks': }`` to ``create`` instead of using this parameter. If True, all chunks will be stored regardless of their contents. If False, each chunk is compared to the array's fill value prior to storing. If a chunk is uniformly equal to the fill value, then that chunk is not be stored, and the store entry for that chunk's key is deleted. zarr_format : {2, 3, None}, optional The zarr format to use when saving. Default is 3. meta_array : array-like, optional An array instance to use for determining arrays to create and return to users. Use `numpy.empty(())` by default. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. config : ArrayConfig or ArrayConfigLike, optional Runtime configuration of the array. If provided, will override the default values from `zarr.config.array`. Returns ------- z : array The array. """ zarr_format = ( _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format) or _default_zarr_format() ) if zarr_format == 2: if chunks is None: chunks = shape dtype = parse_dtype(dtype, zarr_format) if not filters: filters = _default_filters(dtype) if not compressor: compressor = _default_compressor(dtype) elif zarr_format == 3 and chunk_shape is None: # type: ignore[redundant-expr] if chunks is not None: chunk_shape = chunks chunks = None else: chunk_shape = shape if synchronizer is not None: warnings.warn("synchronizer is not yet implemented", RuntimeWarning, stacklevel=2) if chunk_store is not None: warnings.warn("chunk_store is not yet implemented", RuntimeWarning, stacklevel=2) if cache_metadata is not None: warnings.warn("cache_metadata is not yet implemented", RuntimeWarning, stacklevel=2) if cache_attrs is not None: warnings.warn("cache_attrs is not yet implemented", RuntimeWarning, stacklevel=2) if object_codec is not None: warnings.warn("object_codec is not yet implemented", RuntimeWarning, stacklevel=2) if read_only is not None: warnings.warn("read_only is not yet implemented", RuntimeWarning, stacklevel=2) if dimension_separator is not None and zarr_format == 3: raise ValueError( "dimension_separator is not supported for zarr format 3, use chunk_key_encoding instead" ) if order is not None: _warn_order_kwarg() if write_empty_chunks is not None: _warn_write_empty_chunks_kwarg() if meta_array is not None: warnings.warn("meta_array is not yet implemented", RuntimeWarning, stacklevel=2) mode = kwargs.pop("mode", None) if mode is None: mode = "a" store_path = await make_store_path(store, path=path, mode=mode, storage_options=storage_options) config_dict: ArrayConfigParams = {} if write_empty_chunks is not None: if config is not None: msg = ( "Both write_empty_chunks and config keyword arguments are set. " "This is redundant. When both are set, write_empty_chunks will be ignored and " "config will be used." ) warnings.warn(UserWarning(msg), stacklevel=1) config_dict["write_empty_chunks"] = write_empty_chunks if order is not None: if config is not None: msg = ( "Both order and config keyword arguments are set. " "This is redundant. When both are set, order will be ignored and " "config will be used." ) warnings.warn(UserWarning(msg), stacklevel=1) config_dict["order"] = order config_parsed = ArrayConfig.from_dict(config_dict) return await AsyncArray._create( store_path, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor, fill_value=fill_value, overwrite=overwrite, filters=filters, dimension_separator=dimension_separator, zarr_format=zarr_format, chunk_shape=chunk_shape, chunk_key_encoding=chunk_key_encoding, codecs=codecs, dimension_names=dimension_names, attributes=attributes, config=config_parsed, **kwargs, ) async def empty( shape: ChunkCoords, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an empty array with the specified shape. The contents will be filled with the array's fill value or zeros if no fill value is provided. Parameters ---------- shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Notes ----- The contents of an empty Zarr array are not defined. On attempting to retrieve data from an empty Zarr array, any values may be returned, and these are not guaranteed to be stable from one access to the next. """ return await create(shape=shape, fill_value=None, **kwargs) async def empty_like( a: ArrayLike, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an empty array like `a`. The contents will be filled with the array's fill value or zeros if no fill value is provided. Parameters ---------- a : array-like The array to create an empty array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. Notes ----- The contents of an empty Zarr array are not defined. On attempting to retrieve data from an empty Zarr array, any values may be returned, and these are not guaranteed to be stable from one access to the next. """ like_kwargs = _like_args(a, kwargs) return await empty(**like_kwargs) # TODO: add type annotations for fill_value and kwargs async def full( shape: ChunkCoords, fill_value: Any, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array, with `fill_value` being used as the default value for uninitialized portions of the array. Parameters ---------- shape : int or tuple of int Shape of the empty array. fill_value : scalar Fill value. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return await create(shape=shape, fill_value=fill_value, **kwargs) # TODO: add type annotations for kwargs async def full_like( a: ArrayLike, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create a filled array like `a`. Parameters ---------- a : array-like The array to create an empty array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ like_kwargs = _like_args(a, kwargs) if isinstance(a, AsyncArray): like_kwargs.setdefault("fill_value", a.metadata.fill_value) return await full(**like_kwargs) async def ones( shape: ChunkCoords, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array, with one being used as the default value for uninitialized portions of the array. Parameters ---------- shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return await create(shape=shape, fill_value=1, **kwargs) async def ones_like( a: ArrayLike, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array of ones like `a`. Parameters ---------- a : array-like The array to create an empty array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ like_kwargs = _like_args(a, kwargs) return await ones(**like_kwargs) async def open_array( *, # note: this is a change from v2 store: StoreLike | None = None, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, path: PathLike = "", storage_options: dict[str, Any] | None = None, **kwargs: Any, # TODO: type kwargs as valid args to save ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Open an array using file-mode-like semantics. Parameters ---------- store : Store or str Store or path to directory in file system or name of zip file. zarr_version : {2, 3, None}, optional The zarr format to use when saving. Deprecated in favor of zarr_format. zarr_format : {2, 3, None}, optional The zarr format to use when saving. path : str, optional Path in store to array. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. **kwargs Any keyword arguments to pass to :func:`create`. Returns ------- AsyncArray The opened array. """ mode = kwargs.pop("mode", None) store_path = await make_store_path(store, path=path, mode=mode, storage_options=storage_options) zarr_format = _handle_zarr_version_or_format(zarr_version=zarr_version, zarr_format=zarr_format) if "order" in kwargs: _warn_order_kwarg() if "write_empty_chunks" in kwargs: _warn_write_empty_chunks_kwarg() try: return await AsyncArray.open(store_path, zarr_format=zarr_format) except FileNotFoundError: if not store_path.read_only and mode in _CREATE_MODES: overwrite = _infer_overwrite(mode) _zarr_format = zarr_format or _default_zarr_format() return await create( store=store_path, zarr_format=_zarr_format, overwrite=overwrite, **kwargs, ) raise async def open_like( a: ArrayLike, path: str, **kwargs: Any ) -> AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata]: """Open a persistent array like `a`. Parameters ---------- a : Array The shape and data-type of a define these same attributes of the returned array. path : str The path to the new array. **kwargs Any keyword arguments to pass to the array constructor. Returns ------- AsyncArray The opened array. """ like_kwargs = _like_args(a, kwargs) if isinstance(a, (AsyncArray | Array)): kwargs.setdefault("fill_value", a.metadata.fill_value) return await open_array(path=path, **like_kwargs) async def zeros( shape: ChunkCoords, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array, with zero being used as the default value for uninitialized portions of the array. Parameters ---------- shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return await create(shape=shape, fill_value=0, **kwargs) async def zeros_like( a: ArrayLike, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array of zeros like `a`. Parameters ---------- a : array-like The array to create an empty array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ like_kwargs = _like_args(a, kwargs) return await zeros(**like_kwargs) zarr-python-3.0.6/src/zarr/api/synchronous.py000066400000000000000000001133621476711733500213230ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING, Any, Literal from typing_extensions import deprecated import zarr.api.asynchronous as async_api import zarr.core.array from zarr._compat import _deprecate_positional_args from zarr.core.array import Array, AsyncArray from zarr.core.group import Group from zarr.core.sync import sync from zarr.core.sync_group import create_hierarchy if TYPE_CHECKING: from collections.abc import Iterable import numpy as np import numpy.typing as npt from zarr.abc.codec import Codec from zarr.api.asynchronous import ArrayLike, PathLike from zarr.core.array import ( CompressorsLike, FiltersLike, SerializerLike, ShardsLike, ) from zarr.core.array_spec import ArrayConfigLike from zarr.core.buffer import NDArrayLike from zarr.core.chunk_key_encodings import ChunkKeyEncoding, ChunkKeyEncodingLike from zarr.core.common import ( JSON, AccessModeLiteral, ChunkCoords, MemoryOrder, ShapeLike, ZarrFormat, ) from zarr.storage import StoreLike __all__ = [ "array", "consolidate_metadata", "copy", "copy_all", "copy_store", "create", "create_array", "create_hierarchy", "empty", "empty_like", "full", "full_like", "group", "load", "ones", "ones_like", "open", "open_array", "open_consolidated", "open_group", "open_like", "save", "save_array", "save_group", "tree", "zeros", "zeros_like", ] def consolidate_metadata( store: StoreLike, path: str | None = None, zarr_format: ZarrFormat | None = None, ) -> Group: """ Consolidate the metadata of all nodes in a hierarchy. Upon completion, the metadata of the root node in the Zarr hierarchy will be updated to include all the metadata of child nodes. Parameters ---------- store : StoreLike The store-like object whose metadata you wish to consolidate. path : str, optional A path to a group in the store to consolidate at. Only children below that group will be consolidated. By default, the root node is used so all the metadata in the store is consolidated. zarr_format : {2, 3, None}, optional The zarr format of the hierarchy. By default the zarr format is inferred. Returns ------- group: Group The group, with the ``consolidated_metadata`` field set to include the metadata of each child node. """ return Group(sync(async_api.consolidate_metadata(store, path=path, zarr_format=zarr_format))) def copy(*args: Any, **kwargs: Any) -> tuple[int, int, int]: return sync(async_api.copy(*args, **kwargs)) def copy_all(*args: Any, **kwargs: Any) -> tuple[int, int, int]: return sync(async_api.copy_all(*args, **kwargs)) def copy_store(*args: Any, **kwargs: Any) -> tuple[int, int, int]: return sync(async_api.copy_store(*args, **kwargs)) def load( store: StoreLike, path: str | None = None, zarr_format: ZarrFormat | None = None, zarr_version: ZarrFormat | None = None, ) -> NDArrayLike | dict[str, NDArrayLike]: """Load data from an array or group into memory. Parameters ---------- store : Store or str Store or path to directory in file system or name of zip file. path : str or None, optional The path within the store from which to load. Returns ------- out If the path contains an array, out will be a numpy array. If the path contains a group, out will be a dict-like object where keys are array names and values are numpy arrays. See Also -------- save, savez Notes ----- If loading data from a group of arrays, data will not be immediately loaded into memory. Rather, arrays will be loaded into memory as they are requested. """ return sync( async_api.load(store=store, zarr_version=zarr_version, zarr_format=zarr_format, path=path) ) @_deprecate_positional_args def open( store: StoreLike | None = None, *, mode: AccessModeLiteral = "a", zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, path: str | None = None, storage_options: dict[str, Any] | None = None, **kwargs: Any, # TODO: type kwargs as valid args to async_api.open ) -> Array | Group: """Open a group or array using file-mode-like semantics. Parameters ---------- store : Store or str, optional Store or path to directory in file system or name of zip file. mode : {'r', 'r+', 'a', 'w', 'w-'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist); 'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists). zarr_format : {2, 3, None}, optional The zarr format to use when saving. path : str or None, optional The path within the store to open. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. **kwargs Additional parameters are passed through to :func:`zarr.api.asynchronous.open_array` or :func:`zarr.api.asynchronous.open_group`. Returns ------- z : array or group Return type depends on what exists in the given store. """ obj = sync( async_api.open( store=store, mode=mode, zarr_version=zarr_version, zarr_format=zarr_format, path=path, storage_options=storage_options, **kwargs, ) ) if isinstance(obj, AsyncArray): return Array(obj) else: return Group(obj) def open_consolidated(*args: Any, use_consolidated: Literal[True] = True, **kwargs: Any) -> Group: """ Alias for :func:`open_group` with ``use_consolidated=True``. """ return Group( sync(async_api.open_consolidated(*args, use_consolidated=use_consolidated, **kwargs)) ) def save( store: StoreLike, *args: NDArrayLike, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, path: str | None = None, **kwargs: Any, # TODO: type kwargs as valid args to async_api.save ) -> None: """Save an array or group of arrays to the local file system. Parameters ---------- store : Store or str Store or path to directory in file system or name of zip file. *args : ndarray NumPy arrays with data to save. zarr_format : {2, 3, None}, optional The zarr format to use when saving. path : str or None, optional The path within the group where the arrays will be saved. **kwargs NumPy arrays with data to save. """ return sync( async_api.save( store, *args, zarr_version=zarr_version, zarr_format=zarr_format, path=path, **kwargs ) ) @_deprecate_positional_args def save_array( store: StoreLike, arr: NDArrayLike, *, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, path: str | None = None, storage_options: dict[str, Any] | None = None, **kwargs: Any, # TODO: type kwargs as valid args to async_api.save_array ) -> None: """Save a NumPy array to the local file system. Follows a similar API to the NumPy save() function. Parameters ---------- store : Store or str Store or path to directory in file system or name of zip file. arr : ndarray NumPy array with data to save. zarr_format : {2, 3, None}, optional The zarr format to use when saving. path : str or None, optional The path within the store where the array will be saved. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. **kwargs Passed through to :func:`create`, e.g., compressor. """ return sync( async_api.save_array( store=store, arr=arr, zarr_version=zarr_version, zarr_format=zarr_format, path=path, storage_options=storage_options, **kwargs, ) ) def save_group( store: StoreLike, *args: NDArrayLike, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, path: str | None = None, storage_options: dict[str, Any] | None = None, **kwargs: NDArrayLike, ) -> None: """Save several NumPy arrays to the local file system. Follows a similar API to the NumPy savez()/savez_compressed() functions. Parameters ---------- store : Store or str Store or path to directory in file system or name of zip file. *args : ndarray NumPy arrays with data to save. zarr_format : {2, 3, None}, optional The zarr format to use when saving. path : str or None, optional Path within the store where the group will be saved. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. **kwargs NumPy arrays with data to save. """ return sync( async_api.save_group( store, *args, zarr_version=zarr_version, zarr_format=zarr_format, path=path, storage_options=storage_options, **kwargs, ) ) @deprecated("Use Group.tree instead.") def tree(grp: Group, expand: bool | None = None, level: int | None = None) -> Any: """Provide a rich display of the hierarchy. .. deprecated:: 3.0.0 `zarr.tree()` is deprecated and will be removed in a future release. Use `group.tree()` instead. Parameters ---------- grp : Group Zarr or h5py group. expand : bool, optional Only relevant for HTML representation. If True, tree will be fully expanded. level : int, optional Maximum depth to descend into hierarchy. Returns ------- TreeRepr A pretty-printable object displaying the hierarchy. """ return sync(async_api.tree(grp._async_group, expand=expand, level=level)) # TODO: add type annotations for kwargs def array(data: npt.ArrayLike, **kwargs: Any) -> Array: """Create an array filled with `data`. Parameters ---------- data : array_like The data to fill the array with. **kwargs Passed through to :func:`create`. Returns ------- array : Array The new array. """ return Array(sync(async_api.array(data=data, **kwargs))) @_deprecate_positional_args def group( store: StoreLike | None = None, *, overwrite: bool = False, chunk_store: StoreLike | None = None, # not used cache_attrs: bool | None = None, # not used, default changed synchronizer: Any | None = None, # not used path: str | None = None, zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, meta_array: Any | None = None, # not used attributes: dict[str, JSON] | None = None, storage_options: dict[str, Any] | None = None, ) -> Group: """Create a group. Parameters ---------- store : Store or str, optional Store or path to directory in file system. overwrite : bool, optional If True, delete any pre-existing data in `store` at `path` before creating the group. chunk_store : Store, optional Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata. cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. synchronizer : object, optional Array synchronizer. path : str, optional Group path within store. meta_array : array-like, optional An array instance to use for determining arrays to create and return to users. Use `numpy.empty(())` by default. zarr_format : {2, 3, None}, optional The zarr format to use when saving. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. Returns ------- g : Group The new group. """ return Group( sync( async_api.group( store=store, overwrite=overwrite, chunk_store=chunk_store, cache_attrs=cache_attrs, synchronizer=synchronizer, path=path, zarr_version=zarr_version, zarr_format=zarr_format, meta_array=meta_array, attributes=attributes, storage_options=storage_options, ) ) ) @_deprecate_positional_args def open_group( store: StoreLike | None = None, *, mode: AccessModeLiteral = "a", cache_attrs: bool | None = None, # default changed, not used in async api synchronizer: Any = None, # not used in async api path: str | None = None, chunk_store: StoreLike | None = None, # not used in async api storage_options: dict[str, Any] | None = None, # not used in async api zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, meta_array: Any | None = None, # not used in async api attributes: dict[str, JSON] | None = None, use_consolidated: bool | str | None = None, ) -> Group: """Open a group using file-mode-like semantics. Parameters ---------- store : Store, str, or mapping, optional Store or path to directory in file system or name of zip file. Strings are interpreted as paths on the local file system and used as the ``root`` argument to :class:`zarr.storage.LocalStore`. Dictionaries are used as the ``store_dict`` argument in :class:`zarr.storage.MemoryStore``. By default (``store=None``) a new :class:`zarr.storage.MemoryStore` is created. mode : {'r', 'r+', 'a', 'w', 'w-'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist); 'a' means read/write (create if doesn't exist); 'w' means create (overwrite if exists); 'w-' means create (fail if exists). cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. synchronizer : object, optional Array synchronizer. path : str, optional Group path within store. chunk_store : Store or str, optional Store or path to directory in file system or name of zip file. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. meta_array : array-like, optional An array instance to use for determining arrays to create and return to users. Use `numpy.empty(())` by default. attributes : dict A dictionary of JSON-serializable values with user-defined attributes. use_consolidated : bool or str, default None Whether to use consolidated metadata. By default, consolidated metadata is used if it's present in the store (in the ``zarr.json`` for Zarr format 3 and in the ``.zmetadata`` file for Zarr format 2). To explicitly require consolidated metadata, set ``use_consolidated=True``, which will raise an exception if consolidated metadata is not found. To explicitly *not* use consolidated metadata, set ``use_consolidated=False``, which will fall back to using the regular, non consolidated metadata. Zarr format 2 allows configuring the key storing the consolidated metadata (``.zmetadata`` by default). Specify the custom key as ``use_consolidated`` to load consolidated metadata from a non-default key. Returns ------- g : Group The new group. """ return Group( sync( async_api.open_group( store=store, mode=mode, cache_attrs=cache_attrs, synchronizer=synchronizer, path=path, chunk_store=chunk_store, storage_options=storage_options, zarr_version=zarr_version, zarr_format=zarr_format, meta_array=meta_array, attributes=attributes, use_consolidated=use_consolidated, ) ) ) def create_group( store: StoreLike, *, path: str | None = None, zarr_format: ZarrFormat | None = None, overwrite: bool = False, attributes: dict[str, Any] | None = None, storage_options: dict[str, Any] | None = None, ) -> Group: """Create a group. Parameters ---------- store : Store or str Store or path to directory in file system. path : str, optional Group path within store. overwrite : bool, optional If True, pre-existing data at ``path`` will be deleted before creating the group. zarr_format : {2, 3, None}, optional The zarr format to use when saving. If no ``zarr_format`` is provided, the default format will be used. This default can be changed by modifying the value of ``default_zarr_format`` in :mod:`zarr.core.config`. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. Returns ------- Group The new group. """ return Group( sync( async_api.create_group( store=store, path=path, overwrite=overwrite, storage_options=storage_options, zarr_format=zarr_format, attributes=attributes, ) ) ) # TODO: add type annotations for kwargs def create( shape: ChunkCoords | int, *, # Note: this is a change from v2 chunks: ChunkCoords | int | bool | None = None, dtype: npt.DTypeLike | None = None, compressor: dict[str, JSON] | None = None, # TODO: default and type change fill_value: Any | None = 0, # TODO: need type order: MemoryOrder | None = None, store: str | StoreLike | None = None, synchronizer: Any | None = None, overwrite: bool = False, path: PathLike | None = None, chunk_store: StoreLike | None = None, filters: list[dict[str, JSON]] | None = None, # TODO: type has changed cache_metadata: bool | None = None, cache_attrs: bool | None = None, read_only: bool | None = None, object_codec: Codec | None = None, # TODO: type has changed dimension_separator: Literal[".", "/"] | None = None, write_empty_chunks: bool | None = None, # TODO: default has changed zarr_version: ZarrFormat | None = None, # deprecated zarr_format: ZarrFormat | None = None, meta_array: Any | None = None, # TODO: need type attributes: dict[str, JSON] | None = None, # v3 only chunk_shape: ChunkCoords | int | None = None, chunk_key_encoding: ( ChunkKeyEncoding | tuple[Literal["default"], Literal[".", "/"]] | tuple[Literal["v2"], Literal[".", "/"]] | None ) = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, storage_options: dict[str, Any] | None = None, config: ArrayConfigLike | None = None, **kwargs: Any, ) -> Array: """Create an array. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. If True, will be guessed from `shape` and `dtype`. If False, will be set to `shape`, i.e., single chunk for the whole array. If an int, the chunk size in each dimension will be given by the value of `chunks`. Default is True. dtype : str or dtype, optional NumPy dtype. compressor : Codec, optional Primary compressor. fill_value : object Default value to use for uninitialized portions of the array. order : {'C', 'F'}, optional Deprecated in favor of the ``config`` keyword argument. Pass ``{'order': }`` to ``create`` instead of using this parameter. Memory layout to be used within each chunk. If not specified, the ``array.order`` parameter in the global config will be used. store : Store or str Store or path to directory in file system or name of zip file. synchronizer : object, optional Array synchronizer. overwrite : bool, optional If True, delete all pre-existing data in `store` at `path` before creating the array. path : str, optional Path under which array is stored. chunk_store : MutableMapping, optional Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata. filters : sequence of Codecs, optional Sequence of filters to use to encode chunk data prior to compression. cache_metadata : bool, optional If True, array configuration metadata will be cached for the lifetime of the object. If False, array metadata will be reloaded prior to all data access and modification operations (may incur overhead depending on storage and data access pattern). cache_attrs : bool, optional If True (default), user attributes will be cached for attribute read operations. If False, user attributes are reloaded from the store prior to all attribute read operations. read_only : bool, optional True if array should be protected against modification. object_codec : Codec, optional A codec to encode object arrays, only needed if dtype=object. dimension_separator : {'.', '/'}, optional Separator placed between the dimensions of a chunk. write_empty_chunks : bool, optional Deprecated in favor of the ``config`` keyword argument. Pass ``{'write_empty_chunks': }`` to ``create`` instead of using this parameter. If True, all chunks will be stored regardless of their contents. If False, each chunk is compared to the array's fill value prior to storing. If a chunk is uniformly equal to the fill value, then that chunk is not be stored, and the store entry for that chunk's key is deleted. zarr_format : {2, 3, None}, optional The zarr format to use when saving. meta_array : array-like, optional An array instance to use for determining arrays to create and return to users. Use `numpy.empty(())` by default. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. config : ArrayConfigLike, optional Runtime configuration of the array. If provided, will override the default values from `zarr.config.array`. Returns ------- z : Array The array. """ return Array( sync( async_api.create( shape=shape, chunks=chunks, dtype=dtype, compressor=compressor, fill_value=fill_value, order=order, store=store, synchronizer=synchronizer, overwrite=overwrite, path=path, chunk_store=chunk_store, filters=filters, cache_metadata=cache_metadata, cache_attrs=cache_attrs, read_only=read_only, object_codec=object_codec, dimension_separator=dimension_separator, write_empty_chunks=write_empty_chunks, zarr_version=zarr_version, zarr_format=zarr_format, meta_array=meta_array, attributes=attributes, chunk_shape=chunk_shape, chunk_key_encoding=chunk_key_encoding, codecs=codecs, dimension_names=dimension_names, storage_options=storage_options, config=config, **kwargs, ) ) ) def create_array( store: str | StoreLike, *, name: str | None = None, shape: ShapeLike | None = None, dtype: npt.DTypeLike | None = None, data: np.ndarray[Any, np.dtype[Any]] | None = None, chunks: ChunkCoords | Literal["auto"] = "auto", shards: ShardsLike | None = None, filters: FiltersLike = "auto", compressors: CompressorsLike = "auto", serializer: SerializerLike = "auto", fill_value: Any | None = None, order: MemoryOrder | None = None, zarr_format: ZarrFormat | None = 3, attributes: dict[str, JSON] | None = None, chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None = None, dimension_names: Iterable[str] | None = None, storage_options: dict[str, Any] | None = None, overwrite: bool = False, config: ArrayConfigLike | None = None, ) -> Array: """Create an array. This function wraps :func:`zarr.core.array.create_array`. Parameters ---------- store : str or Store Store or path to directory in file system or name of zip file. name : str or None, optional The name of the array within the store. If ``name`` is ``None``, the array will be located at the root of the store. shape : ChunkCoords, optional Shape of the array. Can be ``None`` if ``data`` is provided. dtype : npt.DTypeLike, optional Data type of the array. Can be ``None`` if ``data`` is provided. data : np.ndarray, optional Array-like data to use for initializing the array. If this parameter is provided, the ``shape`` and ``dtype`` parameters must be identical to ``data.shape`` and ``data.dtype``, or ``None``. chunks : ChunkCoords, optional Chunk shape of the array. If not specified, default are guessed based on the shape and dtype. shards : ChunkCoords, optional Shard shape of the array. The default value of ``None`` results in no sharding at all. filters : Iterable[Codec], optional Iterable of filters to apply to each chunk of the array, in order, before serializing that chunk to bytes. For Zarr format 3, a "filter" is a codec that takes an array and returns an array, and these values must be instances of ``ArrayArrayCodec``, or dict representations of ``ArrayArrayCodec``. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v3_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. For Zarr format 2, a "filter" can be any numcodecs codec; you should ensure that the the order if your filters is consistent with the behavior of each filter. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v2_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. compressors : Iterable[Codec], optional List of compressors to apply to the array. Compressors are applied in order, and after any filters are applied (if any are specified) and the data is serialized into bytes. For Zarr format 3, a "compressor" is a codec that takes a bytestream, and returns another bytestream. Multiple compressors my be provided for Zarr format 3. If no ``compressors`` are provided, a default set of compressors will be used. These defaults can be changed by modifying the value of ``array.v3_default_compressors`` in :mod:`zarr.core.config`. Use ``None`` to omit default compressors. For Zarr format 2, a "compressor" can be any numcodecs codec. Only a single compressor may be provided for Zarr format 2. If no ``compressor`` is provided, a default compressor will be used. in :mod:`zarr.core.config`. Use ``None`` to omit the default compressor. serializer : dict[str, JSON] | ArrayBytesCodec, optional Array-to-bytes codec to use for encoding the array data. Zarr format 3 only. Zarr format 2 arrays use implicit array-to-bytes conversion. If no ``serializer`` is provided, a default serializer will be used. These defaults can be changed by modifying the value of ``array.v3_default_serializer`` in :mod:`zarr.core.config`. fill_value : Any, optional Fill value for the array. order : {"C", "F"}, optional The memory of the array (default is "C"). For Zarr format 2, this parameter sets the memory order of the array. For Zarr format 3, this parameter is deprecated, because memory order is a runtime parameter for Zarr format 3 arrays. The recommended way to specify the memory order for Zarr format 3 arrays is via the ``config`` parameter, e.g. ``{'config': 'C'}``. If no ``order`` is provided, a default order will be used. This default can be changed by modifying the value of ``array.order`` in :mod:`zarr.core.config`. zarr_format : {2, 3}, optional The zarr format to use when saving. attributes : dict, optional Attributes for the array. chunk_key_encoding : ChunkKeyEncoding, optional A specification of how the chunk keys are represented in storage. For Zarr format 3, the default is ``{"name": "default", "separator": "/"}}``. For Zarr format 2, the default is ``{"name": "v2", "separator": "."}}``. dimension_names : Iterable[str], optional The names of the dimensions (default is None). Zarr format 3 only. Zarr format 2 arrays should not use this parameter. storage_options : dict, optional If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. overwrite : bool, default False Whether to overwrite an array with the same name in the store, if one exists. config : ArrayConfigLike, optional Runtime configuration for the array. Returns ------- Array The array. Examples -------- >>> import zarr >>> store = zarr.storage.MemoryStore(mode='w') >>> arr = await zarr.create_array( >>> store=store, >>> shape=(100,100), >>> chunks=(10,10), >>> dtype='i4', >>> fill_value=0) """ return Array( sync( zarr.core.array.create_array( store, name=name, shape=shape, dtype=dtype, data=data, chunks=chunks, shards=shards, filters=filters, compressors=compressors, serializer=serializer, fill_value=fill_value, order=order, zarr_format=zarr_format, attributes=attributes, chunk_key_encoding=chunk_key_encoding, dimension_names=dimension_names, storage_options=storage_options, overwrite=overwrite, config=config, ) ) ) # TODO: add type annotations for kwargs def empty(shape: ChunkCoords, **kwargs: Any) -> Array: """Create an empty array with the specified shape. The contents will be filled with the array's fill value or zeros if no fill value is provided. Parameters ---------- shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. Notes ----- The contents of an empty Zarr array are not defined. On attempting to retrieve data from an empty Zarr array, any values may be returned, and these are not guaranteed to be stable from one access to the next. """ return Array(sync(async_api.empty(shape, **kwargs))) # TODO: move ArrayLike to common module # TODO: add type annotations for kwargs def empty_like(a: ArrayLike, **kwargs: Any) -> Array: """Create an empty array like another array. The contents will be filled with the array's fill value or zeros if no fill value is provided. Parameters ---------- a : array-like The array to create an empty array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. Notes ----- The contents of an empty Zarr array are not defined. On attempting to retrieve data from an empty Zarr array, any values may be returned, and these are not guaranteed to be stable from one access to the next. """ return Array(sync(async_api.empty_like(a, **kwargs))) # TODO: add type annotations for kwargs and fill_value def full(shape: ChunkCoords, fill_value: Any, **kwargs: Any) -> Array: """Create an array with a default fill value. Parameters ---------- shape : int or tuple of int Shape of the empty array. fill_value : scalar Fill value. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(sync(async_api.full(shape=shape, fill_value=fill_value, **kwargs))) # TODO: move ArrayLike to common module # TODO: add type annotations for kwargs def full_like(a: ArrayLike, **kwargs: Any) -> Array: """Create a filled array like another array. Parameters ---------- a : array-like The array to create an empty array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(sync(async_api.full_like(a, **kwargs))) # TODO: add type annotations for kwargs def ones(shape: ChunkCoords, **kwargs: Any) -> Array: """Create an array with a fill value of one. Parameters ---------- shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(sync(async_api.ones(shape, **kwargs))) # TODO: add type annotations for kwargs def ones_like(a: ArrayLike, **kwargs: Any) -> Array: """Create an array of ones like another array. Parameters ---------- a : array-like The array to create an empty array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(sync(async_api.ones_like(a, **kwargs))) # TODO: update this once async_api.open_array is fully implemented def open_array( store: StoreLike | None = None, *, zarr_version: ZarrFormat | None = None, path: PathLike = "", storage_options: dict[str, Any] | None = None, **kwargs: Any, ) -> Array: """Open an array using file-mode-like semantics. Parameters ---------- store : Store or str Store or path to directory in file system or name of zip file. zarr_version : {2, 3, None}, optional The zarr format to use when saving. path : str, optional Path in store to array. storage_options : dict If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. **kwargs Any keyword arguments to pass to ``create``. Returns ------- AsyncArray The opened array. """ return Array( sync( async_api.open_array( store=store, zarr_version=zarr_version, path=path, storage_options=storage_options, **kwargs, ) ) ) # TODO: add type annotations for kwargs def open_like(a: ArrayLike, path: str, **kwargs: Any) -> Array: """Open a persistent array like another array. Parameters ---------- a : Array The shape and data-type of a define these same attributes of the returned array. path : str The path to the new array. **kwargs Any keyword arguments to pass to the array constructor. Returns ------- AsyncArray The opened array. """ return Array(sync(async_api.open_like(a, path=path, **kwargs))) # TODO: add type annotations for kwargs def zeros(shape: ChunkCoords, **kwargs: Any) -> Array: """Create an array with a fill value of zero. Parameters ---------- shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(sync(async_api.zeros(shape=shape, **kwargs))) # TODO: add type annotations for kwargs def zeros_like(a: ArrayLike, **kwargs: Any) -> Array: """Create an array of zeros like another array. Parameters ---------- a : array-like The array to create an empty array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(sync(async_api.zeros_like(a, **kwargs))) zarr-python-3.0.6/src/zarr/codecs/000077500000000000000000000000001476711733500170405ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/codecs/__init__.py000066400000000000000000000013421476711733500211510ustar00rootroot00000000000000from __future__ import annotations from zarr.codecs.blosc import BloscCname, BloscCodec, BloscShuffle from zarr.codecs.bytes import BytesCodec, Endian from zarr.codecs.crc32c_ import Crc32cCodec from zarr.codecs.gzip import GzipCodec from zarr.codecs.sharding import ShardingCodec, ShardingCodecIndexLocation from zarr.codecs.transpose import TransposeCodec from zarr.codecs.vlen_utf8 import VLenBytesCodec, VLenUTF8Codec from zarr.codecs.zstd import ZstdCodec __all__ = [ "BloscCname", "BloscCodec", "BloscShuffle", "BytesCodec", "Crc32cCodec", "Endian", "GzipCodec", "ShardingCodec", "ShardingCodecIndexLocation", "TransposeCodec", "VLenBytesCodec", "VLenUTF8Codec", "ZstdCodec", ] zarr-python-3.0.6/src/zarr/codecs/_v2.py000066400000000000000000000070061476711733500201030ustar00rootroot00000000000000from __future__ import annotations import asyncio from dataclasses import dataclass from typing import TYPE_CHECKING import numcodecs import numpy as np from numcodecs.compat import ensure_bytes, ensure_ndarray_like from zarr.abc.codec import ArrayBytesCodec from zarr.registry import get_ndbuffer_class if TYPE_CHECKING: import numcodecs.abc from zarr.core.array_spec import ArraySpec from zarr.core.buffer import Buffer, NDBuffer @dataclass(frozen=True) class V2Codec(ArrayBytesCodec): filters: tuple[numcodecs.abc.Codec, ...] | None compressor: numcodecs.abc.Codec | None is_fixed_size = False async def _decode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> NDBuffer: cdata = chunk_bytes.as_array_like() # decompress if self.compressor: chunk = await asyncio.to_thread(self.compressor.decode, cdata) else: chunk = cdata # apply filters if self.filters: for f in reversed(self.filters): chunk = await asyncio.to_thread(f.decode, chunk) # view as numpy array with correct dtype chunk = ensure_ndarray_like(chunk) # special case object dtype, because incorrect handling can lead to # segfaults and other bad things happening if chunk_spec.dtype != object: try: chunk = chunk.view(chunk_spec.dtype) except TypeError: # this will happen if the dtype of the chunk # does not match the dtype of the array spec i.g. if # the dtype of the chunk_spec is a string dtype, but the chunk # is an object array. In this case, we need to convert the object # array to the correct dtype. chunk = np.array(chunk).astype(chunk_spec.dtype) elif chunk.dtype != object: # If we end up here, someone must have hacked around with the filters. # We cannot deal with object arrays unless there is an object # codec in the filter chain, i.e., a filter that converts from object # array to something else during encoding, and converts back to object # array during decoding. raise RuntimeError("cannot read object array without object codec") # ensure correct chunk shape chunk = chunk.reshape(-1, order="A") chunk = chunk.reshape(chunk_spec.shape, order=chunk_spec.order) return get_ndbuffer_class().from_ndarray_like(chunk) async def _encode_single( self, chunk_array: NDBuffer, chunk_spec: ArraySpec, ) -> Buffer | None: chunk = chunk_array.as_ndarray_like() # ensure contiguous and correct order chunk = chunk.astype(chunk_spec.dtype, order=chunk_spec.order, copy=False) # apply filters if self.filters: for f in self.filters: chunk = await asyncio.to_thread(f.encode, chunk) # check object encoding if ensure_ndarray_like(chunk).dtype == object: raise RuntimeError("cannot write object array without object codec") # compress if self.compressor: cdata = await asyncio.to_thread(self.compressor.encode, chunk) else: cdata = chunk cdata = ensure_bytes(cdata) return chunk_spec.prototype.buffer.from_bytes(cdata) def compute_encoded_size(self, _input_byte_length: int, _chunk_spec: ArraySpec) -> int: raise NotImplementedError zarr-python-3.0.6/src/zarr/codecs/blosc.py000066400000000000000000000137051476711733500205220ustar00rootroot00000000000000from __future__ import annotations import asyncio from dataclasses import dataclass, replace from enum import Enum from functools import cached_property from typing import TYPE_CHECKING import numcodecs from numcodecs.blosc import Blosc from zarr.abc.codec import BytesBytesCodec from zarr.core.buffer.cpu import as_numpy_array_wrapper from zarr.core.common import JSON, parse_enum, parse_named_configuration from zarr.registry import register_codec if TYPE_CHECKING: from typing import Self from zarr.core.array_spec import ArraySpec from zarr.core.buffer import Buffer class BloscShuffle(Enum): """ Enum for shuffle filter used by blosc. """ noshuffle = "noshuffle" shuffle = "shuffle" bitshuffle = "bitshuffle" @classmethod def from_int(cls, num: int) -> BloscShuffle: blosc_shuffle_int_to_str = { 0: "noshuffle", 1: "shuffle", 2: "bitshuffle", } if num not in blosc_shuffle_int_to_str: raise ValueError(f"Value must be between 0 and 2. Got {num}.") return BloscShuffle[blosc_shuffle_int_to_str[num]] class BloscCname(Enum): """ Enum for compression library used by blosc. """ lz4 = "lz4" lz4hc = "lz4hc" blosclz = "blosclz" zstd = "zstd" snappy = "snappy" zlib = "zlib" # See https://zarr.readthedocs.io/en/stable/user-guide/performance.html#configuring-blosc numcodecs.blosc.use_threads = False def parse_typesize(data: JSON) -> int: if isinstance(data, int): if data > 0: return data else: raise ValueError( f"Value must be greater than 0. Got {data}, which is less or equal to 0." ) raise TypeError(f"Value must be an int. Got {type(data)} instead.") # todo: real validation def parse_clevel(data: JSON) -> int: if isinstance(data, int): return data raise TypeError(f"Value should be an int. Got {type(data)} instead.") def parse_blocksize(data: JSON) -> int: if isinstance(data, int): return data raise TypeError(f"Value should be an int. Got {type(data)} instead.") @dataclass(frozen=True) class BloscCodec(BytesBytesCodec): is_fixed_size = False typesize: int | None cname: BloscCname = BloscCname.zstd clevel: int = 5 shuffle: BloscShuffle | None = BloscShuffle.noshuffle blocksize: int = 0 def __init__( self, *, typesize: int | None = None, cname: BloscCname | str = BloscCname.zstd, clevel: int = 5, shuffle: BloscShuffle | str | None = None, blocksize: int = 0, ) -> None: typesize_parsed = parse_typesize(typesize) if typesize is not None else None cname_parsed = parse_enum(cname, BloscCname) clevel_parsed = parse_clevel(clevel) shuffle_parsed = parse_enum(shuffle, BloscShuffle) if shuffle is not None else None blocksize_parsed = parse_blocksize(blocksize) object.__setattr__(self, "typesize", typesize_parsed) object.__setattr__(self, "cname", cname_parsed) object.__setattr__(self, "clevel", clevel_parsed) object.__setattr__(self, "shuffle", shuffle_parsed) object.__setattr__(self, "blocksize", blocksize_parsed) @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: _, configuration_parsed = parse_named_configuration(data, "blosc") return cls(**configuration_parsed) # type: ignore[arg-type] def to_dict(self) -> dict[str, JSON]: if self.typesize is None: raise ValueError("`typesize` needs to be set for serialization.") if self.shuffle is None: raise ValueError("`shuffle` needs to be set for serialization.") return { "name": "blosc", "configuration": { "typesize": self.typesize, "cname": self.cname.value, "clevel": self.clevel, "shuffle": self.shuffle.value, "blocksize": self.blocksize, }, } def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self: dtype = array_spec.dtype new_codec = self if new_codec.typesize is None: new_codec = replace(new_codec, typesize=dtype.itemsize) if new_codec.shuffle is None: new_codec = replace( new_codec, shuffle=(BloscShuffle.bitshuffle if dtype.itemsize == 1 else BloscShuffle.shuffle), ) return new_codec @cached_property def _blosc_codec(self) -> Blosc: if self.shuffle is None: raise ValueError("`shuffle` needs to be set for decoding and encoding.") map_shuffle_str_to_int = { BloscShuffle.noshuffle: 0, BloscShuffle.shuffle: 1, BloscShuffle.bitshuffle: 2, } config_dict = { "cname": self.cname.name, "clevel": self.clevel, "shuffle": map_shuffle_str_to_int[self.shuffle], "blocksize": self.blocksize, } return Blosc.from_config(config_dict) async def _decode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> Buffer: return await asyncio.to_thread( as_numpy_array_wrapper, self._blosc_codec.decode, chunk_bytes, chunk_spec.prototype ) async def _encode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> Buffer | None: # Since blosc only support host memory, we convert the input and output of the encoding # between numpy array and buffer return await asyncio.to_thread( lambda chunk: chunk_spec.prototype.buffer.from_bytes( self._blosc_codec.encode(chunk.as_numpy_array()) ), chunk_bytes, ) def compute_encoded_size(self, _input_byte_length: int, _chunk_spec: ArraySpec) -> int: raise NotImplementedError register_codec("blosc", BloscCodec) zarr-python-3.0.6/src/zarr/codecs/bytes.py000066400000000000000000000077251476711733500205530ustar00rootroot00000000000000from __future__ import annotations import sys from dataclasses import dataclass, replace from enum import Enum from typing import TYPE_CHECKING import numpy as np from zarr.abc.codec import ArrayBytesCodec from zarr.core.buffer import Buffer, NDArrayLike, NDBuffer from zarr.core.common import JSON, parse_enum, parse_named_configuration from zarr.registry import register_codec if TYPE_CHECKING: from typing import Self from zarr.core.array_spec import ArraySpec class Endian(Enum): """ Enum for endian type used by bytes codec. """ big = "big" little = "little" default_system_endian = Endian(sys.byteorder) @dataclass(frozen=True) class BytesCodec(ArrayBytesCodec): is_fixed_size = True endian: Endian | None def __init__(self, *, endian: Endian | str | None = default_system_endian) -> None: endian_parsed = None if endian is None else parse_enum(endian, Endian) object.__setattr__(self, "endian", endian_parsed) @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: _, configuration_parsed = parse_named_configuration( data, "bytes", require_configuration=False ) configuration_parsed = configuration_parsed or {} return cls(**configuration_parsed) # type: ignore[arg-type] def to_dict(self) -> dict[str, JSON]: if self.endian is None: return {"name": "bytes"} else: return {"name": "bytes", "configuration": {"endian": self.endian.value}} def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self: if array_spec.dtype.itemsize == 0: if self.endian is not None: return replace(self, endian=None) elif self.endian is None: raise ValueError( "The `endian` configuration needs to be specified for multi-byte data types." ) return self async def _decode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> NDBuffer: assert isinstance(chunk_bytes, Buffer) if chunk_spec.dtype.itemsize > 0: if self.endian == Endian.little: prefix = "<" else: prefix = ">" dtype = np.dtype(f"{prefix}{chunk_spec.dtype.str[1:]}") else: dtype = np.dtype(f"|{chunk_spec.dtype.str[1:]}") as_array_like = chunk_bytes.as_array_like() if isinstance(as_array_like, NDArrayLike): as_nd_array_like = as_array_like else: as_nd_array_like = np.asanyarray(as_array_like) chunk_array = chunk_spec.prototype.nd_buffer.from_ndarray_like( as_nd_array_like.view(dtype=dtype) ) # ensure correct chunk shape if chunk_array.shape != chunk_spec.shape: chunk_array = chunk_array.reshape( chunk_spec.shape, ) return chunk_array async def _encode_single( self, chunk_array: NDBuffer, chunk_spec: ArraySpec, ) -> Buffer | None: assert isinstance(chunk_array, NDBuffer) if ( chunk_array.dtype.itemsize > 1 and self.endian is not None and self.endian != chunk_array.byteorder ): # type-ignore is a numpy bug # see https://github.com/numpy/numpy/issues/26473 new_dtype = chunk_array.dtype.newbyteorder(self.endian.name) # type: ignore[arg-type] chunk_array = chunk_array.astype(new_dtype) nd_array = chunk_array.as_ndarray_like() # Flatten the nd-array (only copy if needed) and reinterpret as bytes nd_array = nd_array.ravel().view(dtype="b") return chunk_spec.prototype.buffer.from_array_like(nd_array) def compute_encoded_size(self, input_byte_length: int, _chunk_spec: ArraySpec) -> int: return input_byte_length register_codec("bytes", BytesCodec) # compatibility with earlier versions of ZEP1 register_codec("endian", BytesCodec) zarr-python-3.0.6/src/zarr/codecs/crc32c_.py000066400000000000000000000042411476711733500206310ustar00rootroot00000000000000from __future__ import annotations from dataclasses import dataclass from typing import TYPE_CHECKING, cast import numpy as np import typing_extensions from crc32c import crc32c from zarr.abc.codec import BytesBytesCodec from zarr.core.common import JSON, parse_named_configuration from zarr.registry import register_codec if TYPE_CHECKING: from typing import Self from zarr.core.array_spec import ArraySpec from zarr.core.buffer import Buffer @dataclass(frozen=True) class Crc32cCodec(BytesBytesCodec): is_fixed_size = True @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: parse_named_configuration(data, "crc32c", require_configuration=False) return cls() def to_dict(self) -> dict[str, JSON]: return {"name": "crc32c"} async def _decode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> Buffer: data = chunk_bytes.as_numpy_array() crc32_bytes = data[-4:] inner_bytes = data[:-4] # Need to do a manual cast until https://github.com/numpy/numpy/issues/26783 is resolved computed_checksum = np.uint32(crc32c(cast(typing_extensions.Buffer, inner_bytes))).tobytes() stored_checksum = bytes(crc32_bytes) if computed_checksum != stored_checksum: raise ValueError( f"Stored and computed checksum do not match. Stored: {stored_checksum!r}. Computed: {computed_checksum!r}." ) return chunk_spec.prototype.buffer.from_array_like(inner_bytes) async def _encode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> Buffer | None: data = chunk_bytes.as_numpy_array() # Calculate the checksum and "cast" it to a numpy array checksum = np.array([crc32c(cast(typing_extensions.Buffer, data))], dtype=np.uint32) # Append the checksum (as bytes) to the data return chunk_spec.prototype.buffer.from_array_like(np.append(data, checksum.view("b"))) def compute_encoded_size(self, input_byte_length: int, _chunk_spec: ArraySpec) -> int: return input_byte_length + 4 register_codec("crc32c", Crc32cCodec) zarr-python-3.0.6/src/zarr/codecs/gzip.py000066400000000000000000000041141476711733500203630ustar00rootroot00000000000000from __future__ import annotations import asyncio from dataclasses import dataclass from typing import TYPE_CHECKING from numcodecs.gzip import GZip from zarr.abc.codec import BytesBytesCodec from zarr.core.buffer.cpu import as_numpy_array_wrapper from zarr.core.common import JSON, parse_named_configuration from zarr.registry import register_codec if TYPE_CHECKING: from typing import Self from zarr.core.array_spec import ArraySpec from zarr.core.buffer import Buffer def parse_gzip_level(data: JSON) -> int: if not isinstance(data, (int)): raise TypeError(f"Expected int, got {type(data)}") if data not in range(10): raise ValueError( f"Expected an integer from the inclusive range (0, 9). Got {data} instead." ) return data @dataclass(frozen=True) class GzipCodec(BytesBytesCodec): is_fixed_size = False level: int = 5 def __init__(self, *, level: int = 5) -> None: level_parsed = parse_gzip_level(level) object.__setattr__(self, "level", level_parsed) @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: _, configuration_parsed = parse_named_configuration(data, "gzip") return cls(**configuration_parsed) # type: ignore[arg-type] def to_dict(self) -> dict[str, JSON]: return {"name": "gzip", "configuration": {"level": self.level}} async def _decode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> Buffer: return await asyncio.to_thread( as_numpy_array_wrapper, GZip(self.level).decode, chunk_bytes, chunk_spec.prototype ) async def _encode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> Buffer | None: return await asyncio.to_thread( as_numpy_array_wrapper, GZip(self.level).encode, chunk_bytes, chunk_spec.prototype ) def compute_encoded_size( self, _input_byte_length: int, _chunk_spec: ArraySpec, ) -> int: raise NotImplementedError register_codec("gzip", GzipCodec) zarr-python-3.0.6/src/zarr/codecs/sharding.py000066400000000000000000000647351476711733500212300ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Iterable, Mapping, MutableMapping from dataclasses import dataclass, field, replace from enum import Enum from functools import lru_cache from operator import itemgetter from typing import TYPE_CHECKING, Any, NamedTuple, cast import numpy as np import numpy.typing as npt from zarr.abc.codec import ( ArrayBytesCodec, ArrayBytesCodecPartialDecodeMixin, ArrayBytesCodecPartialEncodeMixin, Codec, CodecPipeline, ) from zarr.abc.store import ( ByteGetter, ByteRequest, ByteSetter, RangeByteRequest, SuffixByteRequest, ) from zarr.codecs.bytes import BytesCodec from zarr.codecs.crc32c_ import Crc32cCodec from zarr.core.array_spec import ArrayConfig, ArraySpec from zarr.core.buffer import ( Buffer, BufferPrototype, NDBuffer, default_buffer_prototype, numpy_buffer_prototype, ) from zarr.core.chunk_grids import ChunkGrid, RegularChunkGrid from zarr.core.common import ( ChunkCoords, ChunkCoordsLike, parse_enum, parse_named_configuration, parse_shapelike, product, ) from zarr.core.indexing import ( BasicIndexer, SelectorTuple, c_order_iter, get_indexer, morton_order_iter, ) from zarr.core.metadata.v3 import parse_codecs from zarr.registry import get_ndbuffer_class, get_pipeline_class, register_codec if TYPE_CHECKING: from collections.abc import Awaitable, Callable, Iterator from typing import Self from zarr.core.common import JSON MAX_UINT_64 = 2**64 - 1 ShardMapping = Mapping[ChunkCoords, Buffer] ShardMutableMapping = MutableMapping[ChunkCoords, Buffer] class ShardingCodecIndexLocation(Enum): """ Enum for index location used by the sharding codec. """ start = "start" end = "end" def parse_index_location(data: object) -> ShardingCodecIndexLocation: return parse_enum(data, ShardingCodecIndexLocation) @dataclass(frozen=True) class _ShardingByteGetter(ByteGetter): shard_dict: ShardMapping chunk_coords: ChunkCoords async def get( self, prototype: BufferPrototype, byte_range: ByteRequest | None = None ) -> Buffer | None: assert byte_range is None, "byte_range is not supported within shards" assert prototype == default_buffer_prototype(), ( f"prototype is not supported within shards currently. diff: {prototype} != {default_buffer_prototype()}" ) return self.shard_dict.get(self.chunk_coords) @dataclass(frozen=True) class _ShardingByteSetter(_ShardingByteGetter, ByteSetter): shard_dict: ShardMutableMapping async def set(self, value: Buffer, byte_range: ByteRequest | None = None) -> None: assert byte_range is None, "byte_range is not supported within shards" self.shard_dict[self.chunk_coords] = value async def delete(self) -> None: del self.shard_dict[self.chunk_coords] async def set_if_not_exists(self, default: Buffer) -> None: self.shard_dict.setdefault(self.chunk_coords, default) class _ShardIndex(NamedTuple): # dtype uint64, shape (chunks_per_shard_0, chunks_per_shard_1, ..., 2) offsets_and_lengths: npt.NDArray[np.uint64] @property def chunks_per_shard(self) -> ChunkCoords: result = tuple(self.offsets_and_lengths.shape[0:-1]) # The cast is required until https://github.com/numpy/numpy/pull/27211 is merged return cast(ChunkCoords, result) def _localize_chunk(self, chunk_coords: ChunkCoords) -> ChunkCoords: return tuple( chunk_i % shard_i for chunk_i, shard_i in zip(chunk_coords, self.offsets_and_lengths.shape, strict=False) ) def is_all_empty(self) -> bool: return bool(np.array_equiv(self.offsets_and_lengths, MAX_UINT_64)) def get_full_chunk_map(self) -> npt.NDArray[np.bool_]: return np.not_equal(self.offsets_and_lengths[..., 0], MAX_UINT_64) def get_chunk_slice(self, chunk_coords: ChunkCoords) -> tuple[int, int] | None: localized_chunk = self._localize_chunk(chunk_coords) chunk_start, chunk_len = self.offsets_and_lengths[localized_chunk] if (chunk_start, chunk_len) == (MAX_UINT_64, MAX_UINT_64): return None else: return (int(chunk_start), int(chunk_start + chunk_len)) def set_chunk_slice(self, chunk_coords: ChunkCoords, chunk_slice: slice | None) -> None: localized_chunk = self._localize_chunk(chunk_coords) if chunk_slice is None: self.offsets_and_lengths[localized_chunk] = (MAX_UINT_64, MAX_UINT_64) else: self.offsets_and_lengths[localized_chunk] = ( chunk_slice.start, chunk_slice.stop - chunk_slice.start, ) def is_dense(self, chunk_byte_length: int) -> bool: sorted_offsets_and_lengths = sorted( [ (offset, length) for offset, length in self.offsets_and_lengths if offset != MAX_UINT_64 ], key=itemgetter(0), ) # Are all non-empty offsets unique? if len( {offset for offset, _ in sorted_offsets_and_lengths if offset != MAX_UINT_64} ) != len(sorted_offsets_and_lengths): return False return all( offset % chunk_byte_length == 0 and length == chunk_byte_length for offset, length in sorted_offsets_and_lengths ) @classmethod def create_empty(cls, chunks_per_shard: ChunkCoords) -> _ShardIndex: offsets_and_lengths = np.zeros(chunks_per_shard + (2,), dtype=" _ShardReader: shard_index_size = codec._shard_index_size(chunks_per_shard) obj = cls() obj.buf = buf if codec.index_location == ShardingCodecIndexLocation.start: shard_index_bytes = obj.buf[:shard_index_size] else: shard_index_bytes = obj.buf[-shard_index_size:] obj.index = await codec._decode_shard_index(shard_index_bytes, chunks_per_shard) return obj @classmethod def create_empty( cls, chunks_per_shard: ChunkCoords, buffer_prototype: BufferPrototype | None = None ) -> _ShardReader: if buffer_prototype is None: buffer_prototype = default_buffer_prototype() index = _ShardIndex.create_empty(chunks_per_shard) obj = cls() obj.buf = buffer_prototype.buffer.create_zero_length() obj.index = index return obj def __getitem__(self, chunk_coords: ChunkCoords) -> Buffer: chunk_byte_slice = self.index.get_chunk_slice(chunk_coords) if chunk_byte_slice: return self.buf[chunk_byte_slice[0] : chunk_byte_slice[1]] raise KeyError def __len__(self) -> int: return int(self.index.offsets_and_lengths.size / 2) def __iter__(self) -> Iterator[ChunkCoords]: return c_order_iter(self.index.offsets_and_lengths.shape[:-1]) def is_empty(self) -> bool: return self.index.is_all_empty() class _ShardBuilder(_ShardReader, ShardMutableMapping): buf: Buffer index: _ShardIndex @classmethod def merge_with_morton_order( cls, chunks_per_shard: ChunkCoords, tombstones: set[ChunkCoords], *shard_dicts: ShardMapping, ) -> _ShardBuilder: obj = cls.create_empty(chunks_per_shard) for chunk_coords in morton_order_iter(chunks_per_shard): if chunk_coords in tombstones: continue for shard_dict in shard_dicts: maybe_value = shard_dict.get(chunk_coords, None) if maybe_value is not None: obj[chunk_coords] = maybe_value break return obj @classmethod def create_empty( cls, chunks_per_shard: ChunkCoords, buffer_prototype: BufferPrototype | None = None ) -> _ShardBuilder: if buffer_prototype is None: buffer_prototype = default_buffer_prototype() obj = cls() obj.buf = buffer_prototype.buffer.create_zero_length() obj.index = _ShardIndex.create_empty(chunks_per_shard) return obj def __setitem__(self, chunk_coords: ChunkCoords, value: Buffer) -> None: chunk_start = len(self.buf) chunk_length = len(value) self.buf += value self.index.set_chunk_slice(chunk_coords, slice(chunk_start, chunk_start + chunk_length)) def __delitem__(self, chunk_coords: ChunkCoords) -> None: raise NotImplementedError async def finalize( self, index_location: ShardingCodecIndexLocation, index_encoder: Callable[[_ShardIndex], Awaitable[Buffer]], ) -> Buffer: index_bytes = await index_encoder(self.index) if index_location == ShardingCodecIndexLocation.start: empty_chunks_mask = self.index.offsets_and_lengths[..., 0] == MAX_UINT_64 self.index.offsets_and_lengths[~empty_chunks_mask, 0] += len(index_bytes) index_bytes = await index_encoder(self.index) # encode again with corrected offsets out_buf = index_bytes + self.buf else: out_buf = self.buf + index_bytes return out_buf @dataclass(frozen=True) class _MergingShardBuilder(ShardMutableMapping): old_dict: _ShardReader new_dict: _ShardBuilder tombstones: set[ChunkCoords] = field(default_factory=set) def __getitem__(self, chunk_coords: ChunkCoords) -> Buffer: chunk_bytes_maybe = self.new_dict.get(chunk_coords) if chunk_bytes_maybe is not None: return chunk_bytes_maybe return self.old_dict[chunk_coords] def __setitem__(self, chunk_coords: ChunkCoords, value: Buffer) -> None: self.new_dict[chunk_coords] = value def __delitem__(self, chunk_coords: ChunkCoords) -> None: self.tombstones.add(chunk_coords) def __len__(self) -> int: return self.old_dict.__len__() def __iter__(self) -> Iterator[ChunkCoords]: return self.old_dict.__iter__() def is_empty(self) -> bool: full_chunk_coords_map = self.old_dict.index.get_full_chunk_map() full_chunk_coords_map = np.logical_or( full_chunk_coords_map, self.new_dict.index.get_full_chunk_map() ) for tombstone in self.tombstones: full_chunk_coords_map[tombstone] = False return bool(np.array_equiv(full_chunk_coords_map, False)) async def finalize( self, index_location: ShardingCodecIndexLocation, index_encoder: Callable[[_ShardIndex], Awaitable[Buffer]], ) -> Buffer: shard_builder = _ShardBuilder.merge_with_morton_order( self.new_dict.index.chunks_per_shard, self.tombstones, self.new_dict, self.old_dict, ) return await shard_builder.finalize(index_location, index_encoder) @dataclass(frozen=True) class ShardingCodec( ArrayBytesCodec, ArrayBytesCodecPartialDecodeMixin, ArrayBytesCodecPartialEncodeMixin ): chunk_shape: ChunkCoords codecs: tuple[Codec, ...] index_codecs: tuple[Codec, ...] index_location: ShardingCodecIndexLocation = ShardingCodecIndexLocation.end def __init__( self, *, chunk_shape: ChunkCoordsLike, codecs: Iterable[Codec | dict[str, JSON]] = (BytesCodec(),), index_codecs: Iterable[Codec | dict[str, JSON]] = (BytesCodec(), Crc32cCodec()), index_location: ShardingCodecIndexLocation | str = ShardingCodecIndexLocation.end, ) -> None: chunk_shape_parsed = parse_shapelike(chunk_shape) codecs_parsed = parse_codecs(codecs) index_codecs_parsed = parse_codecs(index_codecs) index_location_parsed = parse_index_location(index_location) object.__setattr__(self, "chunk_shape", chunk_shape_parsed) object.__setattr__(self, "codecs", codecs_parsed) object.__setattr__(self, "index_codecs", index_codecs_parsed) object.__setattr__(self, "index_location", index_location_parsed) # Use instance-local lru_cache to avoid memory leaks object.__setattr__(self, "_get_chunk_spec", lru_cache()(self._get_chunk_spec)) object.__setattr__(self, "_get_index_chunk_spec", lru_cache()(self._get_index_chunk_spec)) object.__setattr__(self, "_get_chunks_per_shard", lru_cache()(self._get_chunks_per_shard)) # todo: typedict return type def __getstate__(self) -> dict[str, Any]: return self.to_dict() def __setstate__(self, state: dict[str, Any]) -> None: config = state["configuration"] object.__setattr__(self, "chunk_shape", parse_shapelike(config["chunk_shape"])) object.__setattr__(self, "codecs", parse_codecs(config["codecs"])) object.__setattr__(self, "index_codecs", parse_codecs(config["index_codecs"])) object.__setattr__(self, "index_location", parse_index_location(config["index_location"])) # Use instance-local lru_cache to avoid memory leaks object.__setattr__(self, "_get_chunk_spec", lru_cache()(self._get_chunk_spec)) object.__setattr__(self, "_get_index_chunk_spec", lru_cache()(self._get_index_chunk_spec)) object.__setattr__(self, "_get_chunks_per_shard", lru_cache()(self._get_chunks_per_shard)) @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: _, configuration_parsed = parse_named_configuration(data, "sharding_indexed") return cls(**configuration_parsed) # type: ignore[arg-type] @property def codec_pipeline(self) -> CodecPipeline: return get_pipeline_class().from_codecs(self.codecs) def to_dict(self) -> dict[str, JSON]: return { "name": "sharding_indexed", "configuration": { "chunk_shape": self.chunk_shape, "codecs": tuple(s.to_dict() for s in self.codecs), "index_codecs": tuple(s.to_dict() for s in self.index_codecs), "index_location": self.index_location.value, }, } def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self: shard_spec = self._get_chunk_spec(array_spec) evolved_codecs = tuple(c.evolve_from_array_spec(array_spec=shard_spec) for c in self.codecs) if evolved_codecs != self.codecs: return replace(self, codecs=evolved_codecs) return self def validate(self, *, shape: ChunkCoords, dtype: np.dtype[Any], chunk_grid: ChunkGrid) -> None: if len(self.chunk_shape) != len(shape): raise ValueError( "The shard's `chunk_shape` and array's `shape` need to have the same number of dimensions." ) if not isinstance(chunk_grid, RegularChunkGrid): raise TypeError("Sharding is only compatible with regular chunk grids.") if not all( s % c == 0 for s, c in zip( chunk_grid.chunk_shape, self.chunk_shape, strict=False, ) ): raise ValueError( "The array's `chunk_shape` needs to be divisible by the shard's inner `chunk_shape`." ) async def _decode_single( self, shard_bytes: Buffer, shard_spec: ArraySpec, ) -> NDBuffer: shard_shape = shard_spec.shape chunk_shape = self.chunk_shape chunks_per_shard = self._get_chunks_per_shard(shard_spec) chunk_spec = self._get_chunk_spec(shard_spec) indexer = BasicIndexer( tuple(slice(0, s) for s in shard_shape), shape=shard_shape, chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape), ) # setup output array out = chunk_spec.prototype.nd_buffer.create( shape=shard_shape, dtype=shard_spec.dtype, order=shard_spec.order, fill_value=0 ) shard_dict = await _ShardReader.from_bytes(shard_bytes, self, chunks_per_shard) if shard_dict.index.is_all_empty(): out.fill(shard_spec.fill_value) return out # decoding chunks and writing them into the output buffer await self.codec_pipeline.read( [ ( _ShardingByteGetter(shard_dict, chunk_coords), chunk_spec, chunk_selection, out_selection, is_complete_shard, ) for chunk_coords, chunk_selection, out_selection, is_complete_shard in indexer ], out, ) return out async def _decode_partial_single( self, byte_getter: ByteGetter, selection: SelectorTuple, shard_spec: ArraySpec, ) -> NDBuffer | None: shard_shape = shard_spec.shape chunk_shape = self.chunk_shape chunks_per_shard = self._get_chunks_per_shard(shard_spec) chunk_spec = self._get_chunk_spec(shard_spec) indexer = get_indexer( selection, shape=shard_shape, chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape), ) # setup output array out = shard_spec.prototype.nd_buffer.create( shape=indexer.shape, dtype=shard_spec.dtype, order=shard_spec.order, fill_value=0 ) indexed_chunks = list(indexer) all_chunk_coords = {chunk_coords for chunk_coords, *_ in indexed_chunks} # reading bytes of all requested chunks shard_dict: ShardMapping = {} if self._is_total_shard(all_chunk_coords, chunks_per_shard): # read entire shard shard_dict_maybe = await self._load_full_shard_maybe( byte_getter=byte_getter, prototype=chunk_spec.prototype, chunks_per_shard=chunks_per_shard, ) if shard_dict_maybe is None: return None shard_dict = shard_dict_maybe else: # read some chunks within the shard shard_index = await self._load_shard_index_maybe(byte_getter, chunks_per_shard) if shard_index is None: return None shard_dict = {} for chunk_coords in all_chunk_coords: chunk_byte_slice = shard_index.get_chunk_slice(chunk_coords) if chunk_byte_slice: chunk_bytes = await byte_getter.get( prototype=chunk_spec.prototype, byte_range=RangeByteRequest(chunk_byte_slice[0], chunk_byte_slice[1]), ) if chunk_bytes: shard_dict[chunk_coords] = chunk_bytes # decoding chunks and writing them into the output buffer await self.codec_pipeline.read( [ ( _ShardingByteGetter(shard_dict, chunk_coords), chunk_spec, chunk_selection, out_selection, is_complete_shard, ) for chunk_coords, chunk_selection, out_selection, is_complete_shard in indexer ], out, ) if hasattr(indexer, "sel_shape"): return out.reshape(indexer.sel_shape) else: return out async def _encode_single( self, shard_array: NDBuffer, shard_spec: ArraySpec, ) -> Buffer | None: shard_shape = shard_spec.shape chunk_shape = self.chunk_shape chunks_per_shard = self._get_chunks_per_shard(shard_spec) chunk_spec = self._get_chunk_spec(shard_spec) indexer = list( BasicIndexer( tuple(slice(0, s) for s in shard_shape), shape=shard_shape, chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape), ) ) shard_builder = _ShardBuilder.create_empty(chunks_per_shard) await self.codec_pipeline.write( [ ( _ShardingByteSetter(shard_builder, chunk_coords), chunk_spec, chunk_selection, out_selection, is_complete_shard, ) for chunk_coords, chunk_selection, out_selection, is_complete_shard in indexer ], shard_array, ) return await shard_builder.finalize(self.index_location, self._encode_shard_index) async def _encode_partial_single( self, byte_setter: ByteSetter, shard_array: NDBuffer, selection: SelectorTuple, shard_spec: ArraySpec, ) -> None: shard_shape = shard_spec.shape chunk_shape = self.chunk_shape chunks_per_shard = self._get_chunks_per_shard(shard_spec) chunk_spec = self._get_chunk_spec(shard_spec) shard_dict = _MergingShardBuilder( await self._load_full_shard_maybe( byte_getter=byte_setter, prototype=chunk_spec.prototype, chunks_per_shard=chunks_per_shard, ) or _ShardReader.create_empty(chunks_per_shard), _ShardBuilder.create_empty(chunks_per_shard), ) indexer = list( get_indexer( selection, shape=shard_shape, chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape) ) ) await self.codec_pipeline.write( [ ( _ShardingByteSetter(shard_dict, chunk_coords), chunk_spec, chunk_selection, out_selection, is_complete_shard, ) for chunk_coords, chunk_selection, out_selection, is_complete_shard in indexer ], shard_array, ) if shard_dict.is_empty(): await byte_setter.delete() else: await byte_setter.set( await shard_dict.finalize( self.index_location, self._encode_shard_index, ) ) def _is_total_shard( self, all_chunk_coords: set[ChunkCoords], chunks_per_shard: ChunkCoords ) -> bool: return len(all_chunk_coords) == product(chunks_per_shard) and all( chunk_coords in all_chunk_coords for chunk_coords in c_order_iter(chunks_per_shard) ) async def _decode_shard_index( self, index_bytes: Buffer, chunks_per_shard: ChunkCoords ) -> _ShardIndex: index_array = next( iter( await get_pipeline_class() .from_codecs(self.index_codecs) .decode( [(index_bytes, self._get_index_chunk_spec(chunks_per_shard))], ) ) ) assert index_array is not None return _ShardIndex(index_array.as_numpy_array()) async def _encode_shard_index(self, index: _ShardIndex) -> Buffer: index_bytes = next( iter( await get_pipeline_class() .from_codecs(self.index_codecs) .encode( [ ( get_ndbuffer_class().from_numpy_array(index.offsets_and_lengths), self._get_index_chunk_spec(index.chunks_per_shard), ) ], ) ) ) assert index_bytes is not None assert isinstance(index_bytes, Buffer) return index_bytes def _shard_index_size(self, chunks_per_shard: ChunkCoords) -> int: return ( get_pipeline_class() .from_codecs(self.index_codecs) .compute_encoded_size( 16 * product(chunks_per_shard), self._get_index_chunk_spec(chunks_per_shard) ) ) def _get_index_chunk_spec(self, chunks_per_shard: ChunkCoords) -> ArraySpec: return ArraySpec( shape=chunks_per_shard + (2,), dtype=np.dtype(" ArraySpec: return ArraySpec( shape=self.chunk_shape, dtype=shard_spec.dtype, fill_value=shard_spec.fill_value, config=shard_spec.config, prototype=shard_spec.prototype, ) def _get_chunks_per_shard(self, shard_spec: ArraySpec) -> ChunkCoords: return tuple( s // c for s, c in zip( shard_spec.shape, self.chunk_shape, strict=False, ) ) async def _load_shard_index_maybe( self, byte_getter: ByteGetter, chunks_per_shard: ChunkCoords ) -> _ShardIndex | None: shard_index_size = self._shard_index_size(chunks_per_shard) if self.index_location == ShardingCodecIndexLocation.start: index_bytes = await byte_getter.get( prototype=numpy_buffer_prototype(), byte_range=RangeByteRequest(0, shard_index_size), ) else: index_bytes = await byte_getter.get( prototype=numpy_buffer_prototype(), byte_range=SuffixByteRequest(shard_index_size) ) if index_bytes is not None: return await self._decode_shard_index(index_bytes, chunks_per_shard) return None async def _load_shard_index( self, byte_getter: ByteGetter, chunks_per_shard: ChunkCoords ) -> _ShardIndex: return ( await self._load_shard_index_maybe(byte_getter, chunks_per_shard) ) or _ShardIndex.create_empty(chunks_per_shard) async def _load_full_shard_maybe( self, byte_getter: ByteGetter, prototype: BufferPrototype, chunks_per_shard: ChunkCoords ) -> _ShardReader | None: shard_bytes = await byte_getter.get(prototype=prototype) return ( await _ShardReader.from_bytes(shard_bytes, self, chunks_per_shard) if shard_bytes else None ) def compute_encoded_size(self, input_byte_length: int, shard_spec: ArraySpec) -> int: chunks_per_shard = self._get_chunks_per_shard(shard_spec) return input_byte_length + self._shard_index_size(chunks_per_shard) register_codec("sharding_indexed", ShardingCodec) zarr-python-3.0.6/src/zarr/codecs/transpose.py000066400000000000000000000077021476711733500214360ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Iterable from dataclasses import dataclass, replace from typing import TYPE_CHECKING, cast import numpy as np from zarr.abc.codec import ArrayArrayCodec from zarr.core.array_spec import ArraySpec from zarr.core.common import JSON, ChunkCoordsLike, parse_named_configuration from zarr.registry import register_codec if TYPE_CHECKING: from typing import Any, Self from zarr.core.buffer import NDBuffer from zarr.core.chunk_grids import ChunkGrid def parse_transpose_order(data: JSON | Iterable[int]) -> tuple[int, ...]: if not isinstance(data, Iterable): raise TypeError(f"Expected an iterable. Got {data} instead.") if not all(isinstance(a, int) for a in data): raise TypeError(f"Expected an iterable of integers. Got {data} instead.") return tuple(cast(Iterable[int], data)) @dataclass(frozen=True) class TransposeCodec(ArrayArrayCodec): is_fixed_size = True order: tuple[int, ...] def __init__(self, *, order: ChunkCoordsLike) -> None: order_parsed = parse_transpose_order(order) object.__setattr__(self, "order", order_parsed) @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: _, configuration_parsed = parse_named_configuration(data, "transpose") return cls(**configuration_parsed) # type: ignore[arg-type] def to_dict(self) -> dict[str, JSON]: return {"name": "transpose", "configuration": {"order": tuple(self.order)}} def validate(self, shape: tuple[int, ...], dtype: np.dtype[Any], chunk_grid: ChunkGrid) -> None: if len(self.order) != len(shape): raise ValueError( f"The `order` tuple needs have as many entries as there are dimensions in the array. Got {self.order}." ) if len(self.order) != len(set(self.order)): raise ValueError( f"There must not be duplicates in the `order` tuple. Got {self.order}." ) if not all(0 <= x < len(shape) for x in self.order): raise ValueError( f"All entries in the `order` tuple must be between 0 and the number of dimensions in the array. Got {self.order}." ) def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self: ndim = array_spec.ndim if len(self.order) != ndim: raise ValueError( f"The `order` tuple needs have as many entries as there are dimensions in the array. Got {self.order}." ) if len(self.order) != len(set(self.order)): raise ValueError( f"There must not be duplicates in the `order` tuple. Got {self.order}." ) if not all(0 <= x < ndim for x in self.order): raise ValueError( f"All entries in the `order` tuple must be between 0 and the number of dimensions in the array. Got {self.order}." ) order = tuple(self.order) if order != self.order: return replace(self, order=order) return self def resolve_metadata(self, chunk_spec: ArraySpec) -> ArraySpec: return ArraySpec( shape=tuple(chunk_spec.shape[self.order[i]] for i in range(chunk_spec.ndim)), dtype=chunk_spec.dtype, fill_value=chunk_spec.fill_value, config=chunk_spec.config, prototype=chunk_spec.prototype, ) async def _decode_single( self, chunk_array: NDBuffer, chunk_spec: ArraySpec, ) -> NDBuffer: inverse_order = np.argsort(self.order) return chunk_array.transpose(inverse_order) async def _encode_single( self, chunk_array: NDBuffer, _chunk_spec: ArraySpec, ) -> NDBuffer | None: return chunk_array.transpose(self.order) def compute_encoded_size(self, input_byte_length: int, _chunk_spec: ArraySpec) -> int: return input_byte_length register_codec("transpose", TransposeCodec) zarr-python-3.0.6/src/zarr/codecs/vlen_utf8.py000066400000000000000000000110611476711733500213230ustar00rootroot00000000000000from __future__ import annotations from dataclasses import dataclass from typing import TYPE_CHECKING from warnings import warn import numpy as np from numcodecs.vlen import VLenBytes, VLenUTF8 from zarr.abc.codec import ArrayBytesCodec from zarr.core.buffer import Buffer, NDBuffer from zarr.core.common import JSON, parse_named_configuration from zarr.core.strings import cast_to_string_dtype from zarr.registry import register_codec if TYPE_CHECKING: from typing import Self from zarr.core.array_spec import ArraySpec # can use a global because there are no parameters _vlen_utf8_codec = VLenUTF8() _vlen_bytes_codec = VLenBytes() @dataclass(frozen=True) class VLenUTF8Codec(ArrayBytesCodec): def __init__(self) -> None: warn( "The codec `vlen-utf8` is currently not part in the Zarr format 3 specification. It " "may not be supported by other zarr implementations and may change in the future.", category=UserWarning, stacklevel=2, ) super().__init__() @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: _, configuration_parsed = parse_named_configuration( data, "vlen-utf8", require_configuration=False ) configuration_parsed = configuration_parsed or {} return cls(**configuration_parsed) def to_dict(self) -> dict[str, JSON]: return {"name": "vlen-utf8", "configuration": {}} def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self: return self async def _decode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> NDBuffer: assert isinstance(chunk_bytes, Buffer) raw_bytes = chunk_bytes.as_array_like() decoded = _vlen_utf8_codec.decode(raw_bytes) assert decoded.dtype == np.object_ decoded.shape = chunk_spec.shape # coming out of the code, we know this is safe, so don't issue a warning as_string_dtype = cast_to_string_dtype(decoded, safe=True) return chunk_spec.prototype.nd_buffer.from_numpy_array(as_string_dtype) async def _encode_single( self, chunk_array: NDBuffer, chunk_spec: ArraySpec, ) -> Buffer | None: assert isinstance(chunk_array, NDBuffer) return chunk_spec.prototype.buffer.from_bytes( _vlen_utf8_codec.encode(chunk_array.as_numpy_array()) ) def compute_encoded_size(self, input_byte_length: int, _chunk_spec: ArraySpec) -> int: # what is input_byte_length for an object dtype? raise NotImplementedError("compute_encoded_size is not implemented for VLen codecs") @dataclass(frozen=True) class VLenBytesCodec(ArrayBytesCodec): def __init__(self) -> None: warn( "The codec `vlen-bytes` is currently not part in the Zarr format 3 specification. It " "may not be supported by other zarr implementations and may change in the future.", category=UserWarning, stacklevel=2, ) super().__init__() @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: _, configuration_parsed = parse_named_configuration( data, "vlen-bytes", require_configuration=False ) configuration_parsed = configuration_parsed or {} return cls(**configuration_parsed) def to_dict(self) -> dict[str, JSON]: return {"name": "vlen-bytes", "configuration": {}} def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self: return self async def _decode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> NDBuffer: assert isinstance(chunk_bytes, Buffer) raw_bytes = chunk_bytes.as_array_like() decoded = _vlen_bytes_codec.decode(raw_bytes) assert decoded.dtype == np.object_ decoded.shape = chunk_spec.shape return chunk_spec.prototype.nd_buffer.from_numpy_array(decoded) async def _encode_single( self, chunk_array: NDBuffer, chunk_spec: ArraySpec, ) -> Buffer | None: assert isinstance(chunk_array, NDBuffer) return chunk_spec.prototype.buffer.from_bytes( _vlen_bytes_codec.encode(chunk_array.as_numpy_array()) ) def compute_encoded_size(self, input_byte_length: int, _chunk_spec: ArraySpec) -> int: # what is input_byte_length for an object dtype? raise NotImplementedError("compute_encoded_size is not implemented for VLen codecs") register_codec("vlen-utf8", VLenUTF8Codec) register_codec("vlen-bytes", VLenBytesCodec) zarr-python-3.0.6/src/zarr/codecs/zstd.py000066400000000000000000000057771476711733500204160ustar00rootroot00000000000000from __future__ import annotations import asyncio from dataclasses import dataclass from functools import cached_property from typing import TYPE_CHECKING import numcodecs from numcodecs.zstd import Zstd from packaging.version import Version from zarr.abc.codec import BytesBytesCodec from zarr.core.buffer.cpu import as_numpy_array_wrapper from zarr.core.common import JSON, parse_named_configuration from zarr.registry import register_codec if TYPE_CHECKING: from typing import Self from zarr.core.array_spec import ArraySpec from zarr.core.buffer import Buffer def parse_zstd_level(data: JSON) -> int: if isinstance(data, int): if data >= 23: raise ValueError(f"Value must be less than or equal to 22. Got {data} instead.") return data raise TypeError(f"Got value with type {type(data)}, but expected an int.") def parse_checksum(data: JSON) -> bool: if isinstance(data, bool): return data raise TypeError(f"Expected bool. Got {type(data)}.") @dataclass(frozen=True) class ZstdCodec(BytesBytesCodec): is_fixed_size = True level: int = 0 checksum: bool = False def __init__(self, *, level: int = 0, checksum: bool = False) -> None: # numcodecs 0.13.0 introduces the checksum attribute for the zstd codec _numcodecs_version = Version(numcodecs.__version__) if _numcodecs_version < Version("0.13.0"): raise RuntimeError( "numcodecs version >= 0.13.0 is required to use the zstd codec. " f"Version {_numcodecs_version} is currently installed." ) level_parsed = parse_zstd_level(level) checksum_parsed = parse_checksum(checksum) object.__setattr__(self, "level", level_parsed) object.__setattr__(self, "checksum", checksum_parsed) @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: _, configuration_parsed = parse_named_configuration(data, "zstd") return cls(**configuration_parsed) # type: ignore[arg-type] def to_dict(self) -> dict[str, JSON]: return {"name": "zstd", "configuration": {"level": self.level, "checksum": self.checksum}} @cached_property def _zstd_codec(self) -> Zstd: config_dict = {"level": self.level, "checksum": self.checksum} return Zstd.from_config(config_dict) async def _decode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> Buffer: return await asyncio.to_thread( as_numpy_array_wrapper, self._zstd_codec.decode, chunk_bytes, chunk_spec.prototype ) async def _encode_single( self, chunk_bytes: Buffer, chunk_spec: ArraySpec, ) -> Buffer | None: return await asyncio.to_thread( as_numpy_array_wrapper, self._zstd_codec.encode, chunk_bytes, chunk_spec.prototype ) def compute_encoded_size(self, _input_byte_length: int, _chunk_spec: ArraySpec) -> int: raise NotImplementedError register_codec("zstd", ZstdCodec) zarr-python-3.0.6/src/zarr/convenience.py000066400000000000000000000013461476711733500204520ustar00rootroot00000000000000""" Convenience helpers. .. warning:: This sub-module is deprecated. All functions here are defined in the top level zarr namespace instead. """ import warnings from zarr.api.synchronous import ( consolidate_metadata, copy, copy_all, copy_store, load, open, open_consolidated, save, save_array, save_group, tree, ) __all__ = [ "consolidate_metadata", "copy", "copy_all", "copy_store", "load", "open", "open_consolidated", "save", "save_array", "save_group", "tree", ] warnings.warn( "zarr.convenience is deprecated. " "Import these functions from the top level zarr. namespace instead.", DeprecationWarning, stacklevel=2, ) zarr-python-3.0.6/src/zarr/core/000077500000000000000000000000001476711733500165305ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/core/__init__.py000066400000000000000000000004331476711733500206410ustar00rootroot00000000000000""" The ``zarr.core`` module is considered private API and should not be imported directly by 3rd-party code. """ from __future__ import annotations from zarr.core.buffer import Buffer, NDBuffer # noqa: F401 from zarr.core.codec_pipeline import BatchedCodecPipeline # noqa: F401 zarr-python-3.0.6/src/zarr/core/_info.py000066400000000000000000000113351476711733500201770ustar00rootroot00000000000000import dataclasses import textwrap from typing import Any, Literal import numcodecs.abc import numpy as np from zarr.abc.codec import ArrayArrayCodec, ArrayBytesCodec, BytesBytesCodec from zarr.core.common import ZarrFormat from zarr.core.metadata.v3 import DataType @dataclasses.dataclass(kw_only=True) class GroupInfo: """ Visual summary for a Group. Note that this method and its properties is not part of Zarr's public API. """ _name: str _type: Literal["Group"] = "Group" _zarr_format: ZarrFormat _read_only: bool _store_type: str _count_members: int | None = None _count_arrays: int | None = None _count_groups: int | None = None def __repr__(self) -> str: template = textwrap.dedent("""\ Name : {_name} Type : {_type} Zarr format : {_zarr_format} Read-only : {_read_only} Store type : {_store_type}""") if self._count_members is not None: template += "\nNo. members : {_count_members}" if self._count_arrays is not None: template += "\nNo. arrays : {_count_arrays}" if self._count_groups is not None: template += "\nNo. groups : {_count_groups}" return template.format(**dataclasses.asdict(self)) def human_readable_size(size: int) -> str: if size < 2**10: return f"{size}" elif size < 2**20: return f"{size / float(2**10):.1f}K" elif size < 2**30: return f"{size / float(2**20):.1f}M" elif size < 2**40: return f"{size / float(2**30):.1f}G" elif size < 2**50: return f"{size / float(2**40):.1f}T" else: return f"{size / float(2**50):.1f}P" def byte_info(size: int) -> str: if size < 2**10: return str(size) else: return f"{size} ({human_readable_size(size)})" @dataclasses.dataclass(kw_only=True) class ArrayInfo: """ Visual summary for an Array. Note that this method and its properties is not part of Zarr's public API. """ _type: Literal["Array"] = "Array" _zarr_format: ZarrFormat _data_type: np.dtype[Any] | DataType _shape: tuple[int, ...] _shard_shape: tuple[int, ...] | None = None _chunk_shape: tuple[int, ...] | None = None _order: Literal["C", "F"] _read_only: bool _store_type: str _filters: tuple[numcodecs.abc.Codec, ...] | tuple[ArrayArrayCodec, ...] = () _serializer: ArrayBytesCodec | None = None _compressors: tuple[numcodecs.abc.Codec, ...] | tuple[BytesBytesCodec, ...] = () _count_bytes: int | None = None _count_bytes_stored: int | None = None _count_chunks_initialized: int | None = None def __repr__(self) -> str: template = textwrap.dedent("""\ Type : {_type} Zarr format : {_zarr_format} Data type : {_data_type} Shape : {_shape}""") if self._shard_shape is not None: template += textwrap.dedent(""" Shard shape : {_shard_shape}""") template += textwrap.dedent(""" Chunk shape : {_chunk_shape} Order : {_order} Read-only : {_read_only} Store type : {_store_type}""") # We can't use dataclasses.asdict, because we only want a shallow dict kwargs = {field.name: getattr(self, field.name) for field in dataclasses.fields(self)} if self._chunk_shape is None: # for non-regular chunk grids kwargs["chunk_shape"] = "" template += "\nFilters : {_filters}" if self._serializer is not None: template += "\nSerializer : {_serializer}" template += "\nCompressors : {_compressors}" if self._count_bytes is not None: template += "\nNo. bytes : {_count_bytes}" kwargs["_count_bytes"] = byte_info(self._count_bytes) if self._count_bytes_stored is not None: template += "\nNo. bytes stored : {_count_bytes_stored}" kwargs["_count_stored"] = byte_info(self._count_bytes_stored) if ( self._count_bytes is not None and self._count_bytes_stored is not None and self._count_bytes_stored > 0 ): template += "\nStorage ratio : {_storage_ratio}" kwargs["_storage_ratio"] = f"{self._count_bytes / self._count_bytes_stored:.1f}" if self._count_chunks_initialized is not None: if self._shard_shape is not None: template += "\nShards Initialized : {_count_chunks_initialized}" else: template += "\nChunks Initialized : {_count_chunks_initialized}" return template.format(**kwargs) zarr-python-3.0.6/src/zarr/core/_tree.py000066400000000000000000000040521476711733500202010ustar00rootroot00000000000000import io import os from collections.abc import Sequence from typing import Any from zarr.core.group import AsyncGroup try: import rich import rich.console import rich.tree except ImportError as e: raise ImportError("'rich' is required for Group.tree") from e class TreeRepr: """ A simple object with a tree-like repr for the Zarr Group. Note that this object and it's implementation isn't considered part of Zarr's public API. """ def __init__(self, tree: rich.tree.Tree) -> None: self._tree = tree def __repr__(self) -> str: color_system = os.environ.get("OVERRIDE_COLOR_SYSTEM", rich.get_console().color_system) console = rich.console.Console(file=io.StringIO(), color_system=color_system) console.print(self._tree) return str(console.file.getvalue()) def _repr_mimebundle_( self, include: Sequence[str], exclude: Sequence[str], **kwargs: Any, ) -> dict[str, str]: # For jupyter support. # Unsure why mypy infers the return type to by Any return self._tree._repr_mimebundle_(include=include, exclude=exclude, **kwargs) # type: ignore[no-any-return] async def group_tree_async(group: AsyncGroup, max_depth: int | None = None) -> TreeRepr: tree = rich.tree.Tree(label=f"[bold]{group.name}[/bold]") nodes = {"": tree} members = sorted([x async for x in group.members(max_depth=max_depth)]) for key, node in members: if key.count("/") == 0: parent_key = "" else: parent_key = key.rsplit("/", 1)[0] parent = nodes[parent_key] # We want what the spec calls the node "name", the part excluding all leading # /'s and path segments. But node.name includes all that, so we build it here. name = key.rsplit("/")[-1] if isinstance(node, AsyncGroup): label = f"[bold]{name}[/bold]" else: label = f"[bold]{name}[/bold] {node.shape} {node.dtype}" nodes[key] = parent.add(label) return TreeRepr(tree) zarr-python-3.0.6/src/zarr/core/array.py000066400000000000000000004662041476711733500202340ustar00rootroot00000000000000from __future__ import annotations import json import warnings from asyncio import gather from collections.abc import Iterable from dataclasses import dataclass, field, replace from itertools import starmap from logging import getLogger from typing import ( TYPE_CHECKING, Any, Generic, Literal, TypeAlias, TypedDict, cast, overload, ) from warnings import warn import numcodecs import numcodecs.abc import numpy as np import numpy.typing as npt from typing_extensions import deprecated from zarr._compat import _deprecate_positional_args from zarr.abc.codec import ArrayArrayCodec, ArrayBytesCodec, BytesBytesCodec, Codec from zarr.abc.store import Store, set_or_delete from zarr.codecs._v2 import V2Codec from zarr.core._info import ArrayInfo from zarr.core.array_spec import ArrayConfig, ArrayConfigLike, parse_array_config from zarr.core.attributes import Attributes from zarr.core.buffer import ( BufferPrototype, NDArrayLike, NDBuffer, default_buffer_prototype, ) from zarr.core.buffer.cpu import buffer_prototype as cpu_buffer_prototype from zarr.core.chunk_grids import RegularChunkGrid, _auto_partition, normalize_chunks from zarr.core.chunk_key_encodings import ( ChunkKeyEncoding, ChunkKeyEncodingLike, DefaultChunkKeyEncoding, V2ChunkKeyEncoding, ) from zarr.core.common import ( JSON, ZARR_JSON, ZARRAY_JSON, ZATTRS_JSON, ChunkCoords, MemoryOrder, ShapeLike, ZarrFormat, _default_zarr_format, _warn_order_kwarg, concurrent_map, parse_dtype, parse_order, parse_shapelike, product, ) from zarr.core.config import config as zarr_config from zarr.core.indexing import ( BasicIndexer, BasicSelection, BlockIndex, BlockIndexer, CoordinateIndexer, CoordinateSelection, Fields, Indexer, MaskIndexer, MaskSelection, OIndex, OrthogonalIndexer, OrthogonalSelection, Selection, VIndex, _iter_grid, ceildiv, check_fields, check_no_multi_fields, is_pure_fancy_indexing, is_pure_orthogonal_indexing, is_scalar, pop_fields, ) from zarr.core.metadata import ( ArrayMetadata, ArrayMetadataDict, ArrayV2Metadata, ArrayV2MetadataDict, ArrayV3Metadata, ArrayV3MetadataDict, T_ArrayMetadata, ) from zarr.core.metadata.v2 import ( _default_compressor, _default_filters, parse_compressor, parse_filters, ) from zarr.core.metadata.v3 import DataType, parse_node_type_array from zarr.core.sync import sync from zarr.errors import MetadataValidationError from zarr.registry import ( _parse_array_array_codec, _parse_array_bytes_codec, _parse_bytes_bytes_codec, get_pipeline_class, ) from zarr.storage._common import StorePath, ensure_no_existing_node, make_store_path if TYPE_CHECKING: from collections.abc import Iterator, Sequence from typing import Self from zarr.abc.codec import CodecPipeline from zarr.codecs.sharding import ShardingCodecIndexLocation from zarr.core.group import AsyncGroup from zarr.storage import StoreLike # Array and AsyncArray are defined in the base ``zarr`` namespace __all__ = ["create_codec_pipeline", "parse_array_metadata"] logger = getLogger(__name__) def parse_array_metadata(data: Any) -> ArrayMetadata: if isinstance(data, ArrayMetadata): return data elif isinstance(data, dict): if data["zarr_format"] == 3: meta_out = ArrayV3Metadata.from_dict(data) if len(meta_out.storage_transformers) > 0: msg = ( f"Array metadata contains storage transformers: {meta_out.storage_transformers}." "Arrays with storage transformers are not supported in zarr-python at this time." ) raise ValueError(msg) return meta_out elif data["zarr_format"] == 2: return ArrayV2Metadata.from_dict(data) raise TypeError def create_codec_pipeline(metadata: ArrayMetadata) -> CodecPipeline: if isinstance(metadata, ArrayV3Metadata): return get_pipeline_class().from_codecs(metadata.codecs) elif isinstance(metadata, ArrayV2Metadata): v2_codec = V2Codec(filters=metadata.filters, compressor=metadata.compressor) return get_pipeline_class().from_codecs([v2_codec]) else: raise TypeError async def get_array_metadata( store_path: StorePath, zarr_format: ZarrFormat | None = 3 ) -> dict[str, JSON]: if zarr_format == 2: zarray_bytes, zattrs_bytes = await gather( (store_path / ZARRAY_JSON).get(prototype=cpu_buffer_prototype), (store_path / ZATTRS_JSON).get(prototype=cpu_buffer_prototype), ) if zarray_bytes is None: raise FileNotFoundError(store_path) elif zarr_format == 3: zarr_json_bytes = await (store_path / ZARR_JSON).get(prototype=cpu_buffer_prototype) if zarr_json_bytes is None: raise FileNotFoundError(store_path) elif zarr_format is None: zarr_json_bytes, zarray_bytes, zattrs_bytes = await gather( (store_path / ZARR_JSON).get(prototype=cpu_buffer_prototype), (store_path / ZARRAY_JSON).get(prototype=cpu_buffer_prototype), (store_path / ZATTRS_JSON).get(prototype=cpu_buffer_prototype), ) if zarr_json_bytes is not None and zarray_bytes is not None: # warn and favor v3 msg = f"Both zarr.json (Zarr format 3) and .zarray (Zarr format 2) metadata objects exist at {store_path}. Zarr v3 will be used." warnings.warn(msg, stacklevel=1) if zarr_json_bytes is None and zarray_bytes is None: raise FileNotFoundError(store_path) # set zarr_format based on which keys were found if zarr_json_bytes is not None: zarr_format = 3 else: zarr_format = 2 else: raise MetadataValidationError("zarr_format", "2, 3, or None", zarr_format) metadata_dict: dict[str, JSON] if zarr_format == 2: # V2 arrays are comprised of a .zarray and .zattrs objects assert zarray_bytes is not None metadata_dict = json.loads(zarray_bytes.to_bytes()) zattrs_dict = json.loads(zattrs_bytes.to_bytes()) if zattrs_bytes is not None else {} metadata_dict["attributes"] = zattrs_dict else: # V3 arrays are comprised of a zarr.json object assert zarr_json_bytes is not None metadata_dict = json.loads(zarr_json_bytes.to_bytes()) parse_node_type_array(metadata_dict.get("node_type")) return metadata_dict @dataclass(frozen=True) class AsyncArray(Generic[T_ArrayMetadata]): """ An asynchronous array class representing a chunked array stored in a Zarr store. Parameters ---------- metadata : ArrayMetadata The metadata of the array. store_path : StorePath The path to the Zarr store. config : ArrayConfigLike, optional The runtime configuration of the array, by default None. Attributes ---------- metadata : ArrayMetadata The metadata of the array. store_path : StorePath The path to the Zarr store. codec_pipeline : CodecPipeline The codec pipeline used for encoding and decoding chunks. _config : ArrayConfig The runtime configuration of the array. """ metadata: T_ArrayMetadata store_path: StorePath codec_pipeline: CodecPipeline = field(init=False) _config: ArrayConfig @overload def __init__( self: AsyncArray[ArrayV2Metadata], metadata: ArrayV2Metadata | ArrayV2MetadataDict, store_path: StorePath, config: ArrayConfigLike | None = None, ) -> None: ... @overload def __init__( self: AsyncArray[ArrayV3Metadata], metadata: ArrayV3Metadata | ArrayV3MetadataDict, store_path: StorePath, config: ArrayConfigLike | None = None, ) -> None: ... def __init__( self, metadata: ArrayMetadata | ArrayMetadataDict, store_path: StorePath, config: ArrayConfigLike | None = None, ) -> None: if isinstance(metadata, dict): zarr_format = metadata["zarr_format"] # TODO: remove this when we extensively type the dict representation of metadata _metadata = cast(dict[str, JSON], metadata) if zarr_format == 2: metadata = ArrayV2Metadata.from_dict(_metadata) elif zarr_format == 3: metadata = ArrayV3Metadata.from_dict(_metadata) else: raise ValueError(f"Invalid zarr_format: {zarr_format}. Expected 2 or 3") metadata_parsed = parse_array_metadata(metadata) config_parsed = parse_array_config(config) object.__setattr__(self, "metadata", metadata_parsed) object.__setattr__(self, "store_path", store_path) object.__setattr__(self, "_config", config_parsed) object.__setattr__(self, "codec_pipeline", create_codec_pipeline(metadata=metadata_parsed)) # this overload defines the function signature when zarr_format is 2 @overload @classmethod async def create( cls, store: StoreLike, *, # v2 and v3 shape: ShapeLike, dtype: npt.DTypeLike, zarr_format: Literal[2], fill_value: Any | None = None, attributes: dict[str, JSON] | None = None, chunks: ShapeLike | None = None, dimension_separator: Literal[".", "/"] | None = None, order: MemoryOrder | None = None, filters: list[dict[str, JSON]] | None = None, compressor: dict[str, JSON] | None = None, # runtime overwrite: bool = False, data: npt.ArrayLike | None = None, config: ArrayConfigLike | None = None, ) -> AsyncArray[ArrayV2Metadata]: ... # this overload defines the function signature when zarr_format is 3 @overload @classmethod async def create( cls, store: StoreLike, *, # v2 and v3 shape: ShapeLike, dtype: npt.DTypeLike, zarr_format: Literal[3], fill_value: Any | None = None, attributes: dict[str, JSON] | None = None, # v3 only chunk_shape: ShapeLike | None = None, chunk_key_encoding: ( ChunkKeyEncoding | tuple[Literal["default"], Literal[".", "/"]] | tuple[Literal["v2"], Literal[".", "/"]] | None ) = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, # runtime overwrite: bool = False, data: npt.ArrayLike | None = None, config: ArrayConfigLike | None = None, ) -> AsyncArray[ArrayV3Metadata]: ... @overload @classmethod async def create( cls, store: StoreLike, *, # v2 and v3 shape: ShapeLike, dtype: npt.DTypeLike, zarr_format: Literal[3] = 3, fill_value: Any | None = None, attributes: dict[str, JSON] | None = None, # v3 only chunk_shape: ShapeLike | None = None, chunk_key_encoding: ( ChunkKeyEncoding | tuple[Literal["default"], Literal[".", "/"]] | tuple[Literal["v2"], Literal[".", "/"]] | None ) = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, # runtime overwrite: bool = False, data: npt.ArrayLike | None = None, config: ArrayConfigLike | None = None, ) -> AsyncArray[ArrayV3Metadata]: ... @overload @classmethod async def create( cls, store: StoreLike, *, # v2 and v3 shape: ShapeLike, dtype: npt.DTypeLike, zarr_format: ZarrFormat, fill_value: Any | None = None, attributes: dict[str, JSON] | None = None, # v3 only chunk_shape: ShapeLike | None = None, chunk_key_encoding: ( ChunkKeyEncoding | tuple[Literal["default"], Literal[".", "/"]] | tuple[Literal["v2"], Literal[".", "/"]] | None ) = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, # v2 only chunks: ShapeLike | None = None, dimension_separator: Literal[".", "/"] | None = None, order: MemoryOrder | None = None, filters: list[dict[str, JSON]] | None = None, compressor: dict[str, JSON] | None = None, # runtime overwrite: bool = False, data: npt.ArrayLike | None = None, config: ArrayConfigLike | None = None, ) -> AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata]: ... @classmethod @deprecated("Use zarr.api.asynchronous.create_array instead.") @_deprecate_positional_args async def create( cls, store: StoreLike, *, # v2 and v3 shape: ShapeLike, dtype: npt.DTypeLike, zarr_format: ZarrFormat = 3, fill_value: Any | None = None, attributes: dict[str, JSON] | None = None, # v3 only chunk_shape: ShapeLike | None = None, chunk_key_encoding: ( ChunkKeyEncodingLike | tuple[Literal["default"], Literal[".", "/"]] | tuple[Literal["v2"], Literal[".", "/"]] | None ) = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, # v2 only chunks: ShapeLike | None = None, dimension_separator: Literal[".", "/"] | None = None, order: MemoryOrder | None = None, filters: list[dict[str, JSON]] | None = None, compressor: dict[str, JSON] | None = None, # runtime overwrite: bool = False, data: npt.ArrayLike | None = None, config: ArrayConfigLike | None = None, ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Method to create a new asynchronous array instance. .. deprecated:: 3.0.0 Deprecated in favor of :func:`zarr.api.asynchronous.create_array`. Parameters ---------- store : StoreLike The store where the array will be created. shape : ShapeLike The shape of the array. dtype : npt.DTypeLike The data type of the array. zarr_format : ZarrFormat, optional The Zarr format version (default is 3). fill_value : Any, optional The fill value of the array (default is None). attributes : dict[str, JSON], optional The attributes of the array (default is None). chunk_shape : ChunkCoords, optional The shape of the array's chunks Zarr format 3 only. Zarr format 2 arrays should use `chunks` instead. If not specified, default are guessed based on the shape and dtype. chunk_key_encoding : ChunkKeyEncodingLike, optional A specification of how the chunk keys are represented in storage. Zarr format 3 only. Zarr format 2 arrays should use `dimension_separator` instead. Default is ``("default", "/")``. codecs : Sequence of Codecs or dicts, optional An iterable of Codec or dict serializations of Codecs. The elements of this collection specify the transformation from array values to stored bytes. Zarr format 3 only. Zarr format 2 arrays should use ``filters`` and ``compressor`` instead. If no codecs are provided, default codecs will be used: - For numeric arrays, the default is ``BytesCodec`` and ``ZstdCodec``. - For Unicode strings, the default is ``VLenUTF8Codec`` and ``ZstdCodec``. - For bytes or objects, the default is ``VLenBytesCodec`` and ``ZstdCodec``. These defaults can be changed by modifying the value of ``array.v3_default_filters``, ``array.v3_default_serializer`` and ``array.v3_default_compressors`` in :mod:`zarr.core.config`. dimension_names : Iterable[str], optional The names of the dimensions (default is None). Zarr format 3 only. Zarr format 2 arrays should not use this parameter. chunks : ShapeLike, optional The shape of the array's chunks. Zarr format 2 only. Zarr format 3 arrays should use ``chunk_shape`` instead. If not specified, default are guessed based on the shape and dtype. dimension_separator : Literal[".", "/"], optional The dimension separator (default is "."). Zarr format 2 only. Zarr format 3 arrays should use ``chunk_key_encoding`` instead. order : Literal["C", "F"], optional The memory of the array (default is "C"). If ``zarr_format`` is 2, this parameter sets the memory order of the array. If `zarr_format`` is 3, then this parameter is deprecated, because memory order is a runtime parameter for Zarr 3 arrays. The recommended way to specify the memory order for Zarr 3 arrays is via the ``config`` parameter, e.g. ``{'config': 'C'}``. filters : list[dict[str, JSON]], optional Sequence of filters to use to encode chunk data prior to compression. Zarr format 2 only. Zarr format 3 arrays should use ``codecs`` instead. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v2_default_filters`` in :mod:`zarr.core.config`. compressor : dict[str, JSON], optional The compressor used to compress the data (default is None). Zarr format 2 only. Zarr format 3 arrays should use ``codecs`` instead. If no ``compressor`` is provided, a default compressor will be used: - For numeric arrays, the default is ``ZstdCodec``. - For Unicode strings, the default is ``VLenUTF8Codec``. - For bytes or objects, the default is ``VLenBytesCodec``. These defaults can be changed by modifying the value of ``array.v2_default_compressor`` in :mod:`zarr.core.config`. overwrite : bool, optional Whether to raise an error if the store already exists (default is False). data : npt.ArrayLike, optional The data to be inserted into the array (default is None). config : ArrayConfigLike, optional Runtime configuration for the array. Returns ------- AsyncArray The created asynchronous array instance. """ return await cls._create( store, # v2 and v3 shape=shape, dtype=dtype, zarr_format=zarr_format, fill_value=fill_value, attributes=attributes, # v3 only chunk_shape=chunk_shape, chunk_key_encoding=chunk_key_encoding, codecs=codecs, dimension_names=dimension_names, # v2 only chunks=chunks, dimension_separator=dimension_separator, order=order, filters=filters, compressor=compressor, # runtime overwrite=overwrite, data=data, config=config, ) @classmethod async def _create( cls, store: StoreLike, *, # v2 and v3 shape: ShapeLike, dtype: npt.DTypeLike, zarr_format: ZarrFormat = 3, fill_value: Any | None = None, attributes: dict[str, JSON] | None = None, # v3 only chunk_shape: ShapeLike | None = None, chunk_key_encoding: ( ChunkKeyEncodingLike | tuple[Literal["default"], Literal[".", "/"]] | tuple[Literal["v2"], Literal[".", "/"]] | None ) = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, # v2 only chunks: ShapeLike | None = None, dimension_separator: Literal[".", "/"] | None = None, order: MemoryOrder | None = None, filters: list[dict[str, JSON]] | None = None, compressor: dict[str, JSON] | None = None, # runtime overwrite: bool = False, data: npt.ArrayLike | None = None, config: ArrayConfigLike | None = None, ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Method to create a new asynchronous array instance. See :func:`AsyncArray.create` for more details. Deprecated in favor of :func:`zarr.api.asynchronous.create_array`. """ store_path = await make_store_path(store) dtype_parsed = parse_dtype(dtype, zarr_format) shape = parse_shapelike(shape) if chunks is not None and chunk_shape is not None: raise ValueError("Only one of chunk_shape or chunks can be provided.") if chunks: _chunks = normalize_chunks(chunks, shape, dtype_parsed.itemsize) else: _chunks = normalize_chunks(chunk_shape, shape, dtype_parsed.itemsize) config_parsed = parse_array_config(config) result: AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata] if zarr_format == 3: if dimension_separator is not None: raise ValueError( "dimension_separator cannot be used for arrays with zarr_format 3. Use chunk_key_encoding instead." ) if filters is not None: raise ValueError( "filters cannot be used for arrays with zarr_format 3. Use array-to-array codecs instead." ) if compressor is not None: raise ValueError( "compressor cannot be used for arrays with zarr_format 3. Use bytes-to-bytes codecs instead." ) if order is not None: _warn_order_kwarg() result = await cls._create_v3( store_path, shape=shape, dtype=dtype_parsed, chunk_shape=_chunks, fill_value=fill_value, chunk_key_encoding=chunk_key_encoding, codecs=codecs, dimension_names=dimension_names, attributes=attributes, overwrite=overwrite, config=config_parsed, ) elif zarr_format == 2: if codecs is not None: raise ValueError( "codecs cannot be used for arrays with zarr_format 2. Use filters and compressor instead." ) if chunk_key_encoding is not None: raise ValueError( "chunk_key_encoding cannot be used for arrays with zarr_format 2. Use dimension_separator instead." ) if dimension_names is not None: raise ValueError("dimension_names cannot be used for arrays with zarr_format 2.") if order is None: order_parsed = parse_order(zarr_config.get("array.order")) else: order_parsed = order result = await cls._create_v2( store_path, shape=shape, dtype=dtype_parsed, chunks=_chunks, dimension_separator=dimension_separator, fill_value=fill_value, order=order_parsed, config=config_parsed, filters=filters, compressor=compressor, attributes=attributes, overwrite=overwrite, ) else: raise ValueError(f"Insupported zarr_format. Got: {zarr_format}") if data is not None: # insert user-provided data await result.setitem(..., data) return result @staticmethod def _create_metadata_v3( shape: ShapeLike, dtype: np.dtype[Any], chunk_shape: ChunkCoords, fill_value: Any | None = None, chunk_key_encoding: ChunkKeyEncodingLike | None = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, attributes: dict[str, JSON] | None = None, ) -> ArrayV3Metadata: """ Create an instance of ArrayV3Metadata. """ shape = parse_shapelike(shape) codecs = list(codecs) if codecs is not None else _get_default_codecs(np.dtype(dtype)) chunk_key_encoding_parsed: ChunkKeyEncodingLike if chunk_key_encoding is None: chunk_key_encoding_parsed = {"name": "default", "separator": "/"} else: chunk_key_encoding_parsed = chunk_key_encoding if dtype.kind in "UTS": warn( f"The dtype `{dtype}` is currently not part in the Zarr format 3 specification. It " "may not be supported by other zarr implementations and may change in the future.", category=UserWarning, stacklevel=2, ) chunk_grid_parsed = RegularChunkGrid(chunk_shape=chunk_shape) return ArrayV3Metadata( shape=shape, data_type=dtype, chunk_grid=chunk_grid_parsed, chunk_key_encoding=chunk_key_encoding_parsed, fill_value=fill_value, codecs=codecs, dimension_names=tuple(dimension_names) if dimension_names else None, attributes=attributes or {}, ) @classmethod async def _create_v3( cls, store_path: StorePath, *, shape: ShapeLike, dtype: np.dtype[Any], chunk_shape: ChunkCoords, config: ArrayConfig, fill_value: Any | None = None, chunk_key_encoding: ( ChunkKeyEncodingLike | tuple[Literal["default"], Literal[".", "/"]] | tuple[Literal["v2"], Literal[".", "/"]] | None ) = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, attributes: dict[str, JSON] | None = None, overwrite: bool = False, ) -> AsyncArray[ArrayV3Metadata]: if overwrite: if store_path.store.supports_deletes: await store_path.delete_dir() else: await ensure_no_existing_node(store_path, zarr_format=3) else: await ensure_no_existing_node(store_path, zarr_format=3) if isinstance(chunk_key_encoding, tuple): chunk_key_encoding = ( V2ChunkKeyEncoding(separator=chunk_key_encoding[1]) if chunk_key_encoding[0] == "v2" else DefaultChunkKeyEncoding(separator=chunk_key_encoding[1]) ) metadata = cls._create_metadata_v3( shape=shape, dtype=dtype, chunk_shape=chunk_shape, fill_value=fill_value, chunk_key_encoding=chunk_key_encoding, codecs=codecs, dimension_names=dimension_names, attributes=attributes, ) array = cls(metadata=metadata, store_path=store_path, config=config) await array._save_metadata(metadata, ensure_parents=True) return array @staticmethod def _create_metadata_v2( shape: ChunkCoords, dtype: np.dtype[Any], chunks: ChunkCoords, order: MemoryOrder, dimension_separator: Literal[".", "/"] | None = None, fill_value: float | None = None, filters: Iterable[dict[str, JSON] | numcodecs.abc.Codec] | None = None, compressor: dict[str, JSON] | numcodecs.abc.Codec | None = None, attributes: dict[str, JSON] | None = None, ) -> ArrayV2Metadata: if dimension_separator is None: dimension_separator = "." dtype = parse_dtype(dtype, zarr_format=2) # inject VLenUTF8 for str dtype if not already present if np.issubdtype(dtype, np.str_): filters = filters or [] from numcodecs.vlen import VLenUTF8 if not any(isinstance(x, VLenUTF8) or x["id"] == "vlen-utf8" for x in filters): filters = list(filters) + [VLenUTF8()] return ArrayV2Metadata( shape=shape, dtype=np.dtype(dtype), chunks=chunks, order=order, dimension_separator=dimension_separator, fill_value=fill_value, compressor=compressor, filters=filters, attributes=attributes, ) @classmethod async def _create_v2( cls, store_path: StorePath, *, shape: ChunkCoords, dtype: np.dtype[Any], chunks: ChunkCoords, order: MemoryOrder, config: ArrayConfig, dimension_separator: Literal[".", "/"] | None = None, fill_value: float | None = None, filters: Iterable[dict[str, JSON] | numcodecs.abc.Codec] | None = None, compressor: dict[str, JSON] | numcodecs.abc.Codec | None = None, attributes: dict[str, JSON] | None = None, overwrite: bool = False, ) -> AsyncArray[ArrayV2Metadata]: if overwrite: if store_path.store.supports_deletes: await store_path.delete_dir() else: await ensure_no_existing_node(store_path, zarr_format=2) else: await ensure_no_existing_node(store_path, zarr_format=2) metadata = cls._create_metadata_v2( shape=shape, dtype=dtype, chunks=chunks, order=order, dimension_separator=dimension_separator, fill_value=fill_value, filters=filters, compressor=compressor, attributes=attributes, ) array = cls(metadata=metadata, store_path=store_path, config=config) await array._save_metadata(metadata, ensure_parents=True) return array @classmethod def from_dict( cls, store_path: StorePath, data: dict[str, JSON], ) -> AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata]: """ Create a Zarr array from a dictionary, with support for both Zarr format 2 and 3 metadata. Parameters ---------- store_path : StorePath The path within the store where the array should be created. data : dict A dictionary representing the array data. This dictionary should include necessary metadata for the array, such as shape, dtype, and other attributes. The format of the metadata will determine whether a Zarr format 2 or 3 array is created. Returns ------- AsyncArray[ArrayV3Metadata] or AsyncArray[ArrayV2Metadata] The created Zarr array, either using Zarr format 2 or 3 metadata based on the provided data. Raises ------ ValueError If the dictionary data is invalid or incompatible with either Zarr format 2 or 3 array creation. """ metadata = parse_array_metadata(data) return cls(metadata=metadata, store_path=store_path) @classmethod async def open( cls, store: StoreLike, zarr_format: ZarrFormat | None = 3, ) -> AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata]: """ Async method to open an existing Zarr array from a given store. Parameters ---------- store : StoreLike The store containing the Zarr array. zarr_format : ZarrFormat | None, optional The Zarr format version (default is 3). Returns ------- AsyncArray The opened Zarr array. Examples -------- >>> import zarr >>> store = zarr.storage.MemoryStore(mode='w') >>> async_arr = await AsyncArray.open(store) # doctest: +ELLIPSIS """ store_path = await make_store_path(store) metadata_dict = await get_array_metadata(store_path, zarr_format=zarr_format) # TODO: remove this cast when we have better type hints _metadata_dict = cast(ArrayV3MetadataDict, metadata_dict) return cls(store_path=store_path, metadata=_metadata_dict) @property def store(self) -> Store: return self.store_path.store @property def ndim(self) -> int: """Returns the number of dimensions in the Array. Returns ------- int The number of dimensions in the Array. """ return len(self.metadata.shape) @property def shape(self) -> ChunkCoords: """Returns the shape of the Array. Returns ------- tuple The shape of the Array. """ return self.metadata.shape @property def chunks(self) -> ChunkCoords: """Returns the chunk shape of the Array. If sharding is used the inner chunk shape is returned. Only defined for arrays using using `RegularChunkGrid`. If array doesn't use `RegularChunkGrid`, `NotImplementedError` is raised. Returns ------- ChunkCoords: The chunk shape of the Array. """ return self.metadata.chunks @property def shards(self) -> ChunkCoords | None: """Returns the shard shape of the Array. Returns None if sharding is not used. Only defined for arrays using using `RegularChunkGrid`. If array doesn't use `RegularChunkGrid`, `NotImplementedError` is raised. Returns ------- ChunkCoords: The shard shape of the Array. """ return self.metadata.shards @property def size(self) -> int: """Returns the total number of elements in the array Returns ------- int Total number of elements in the array """ return np.prod(self.metadata.shape).item() @property def filters(self) -> tuple[numcodecs.abc.Codec, ...] | tuple[ArrayArrayCodec, ...]: """ Filters that are applied to each chunk of the array, in order, before serializing that chunk to bytes. """ if self.metadata.zarr_format == 2: filters = self.metadata.filters if filters is None: return () return filters return tuple( codec for codec in self.metadata.inner_codecs if isinstance(codec, ArrayArrayCodec) ) @property def serializer(self) -> ArrayBytesCodec | None: """ Array-to-bytes codec to use for serializing the chunks into bytes. """ if self.metadata.zarr_format == 2: return None return next( codec for codec in self.metadata.inner_codecs if isinstance(codec, ArrayBytesCodec) ) @property @deprecated("Use AsyncArray.compressors instead.") def compressor(self) -> numcodecs.abc.Codec | None: """ Compressor that is applied to each chunk of the array. .. deprecated:: 3.0.0 `array.compressor` is deprecated and will be removed in a future release. Use `array.compressors` instead. """ if self.metadata.zarr_format == 2: return self.metadata.compressor raise TypeError("`compressor` is not available for Zarr format 3 arrays.") @property def compressors(self) -> tuple[numcodecs.abc.Codec, ...] | tuple[BytesBytesCodec, ...]: """ Compressors that are applied to each chunk of the array. Compressors are applied in order, and after any filters are applied (if any are specified) and the data is serialized into bytes. """ if self.metadata.zarr_format == 2: if self.metadata.compressor is not None: return (self.metadata.compressor,) return () return tuple( codec for codec in self.metadata.inner_codecs if isinstance(codec, BytesBytesCodec) ) @property def dtype(self) -> np.dtype[Any]: """Returns the data type of the array. Returns ------- np.dtype Data type of the array """ return self.metadata.dtype @property def order(self) -> MemoryOrder: """Returns the memory order of the array. Returns ------- bool Memory order of the array """ return self._config.order @property def attrs(self) -> dict[str, JSON]: """Returns the attributes of the array. Returns ------- dict Attributes of the array """ return self.metadata.attributes @property def read_only(self) -> bool: """Returns True if the array is read-only. Returns ------- bool True if the array is read-only """ # Backwards compatibility for 2.x return self.store_path.read_only @property def path(self) -> str: """Storage path. Returns ------- str The path to the array in the Zarr store. """ return self.store_path.path @property def name(self) -> str: """Array name following h5py convention. Returns ------- str The name of the array. """ # follow h5py convention: add leading slash name = self.path if not name.startswith("/"): name = "/" + name return name @property def basename(self) -> str: """Final component of name. Returns ------- str The basename or final component of the array name. """ return self.name.split("/")[-1] @property def cdata_shape(self) -> ChunkCoords: """ The shape of the chunk grid for this array. Returns ------- Tuple[int] The shape of the chunk grid for this array. """ return tuple(starmap(ceildiv, zip(self.shape, self.chunks, strict=False))) @property def nchunks(self) -> int: """ The number of chunks in the stored representation of this array. Returns ------- int The total number of chunks in the array. """ return product(self.cdata_shape) async def nchunks_initialized(self) -> int: """ Calculate the number of chunks that have been initialized, i.e. the number of chunks that have been persisted to the storage backend. Returns ------- nchunks_initialized : int The number of chunks that have been initialized. Notes ----- On :class:`AsyncArray` this is an asynchronous method, unlike the (synchronous) property :attr:`Array.nchunks_initialized`. Examples -------- >>> arr = await zarr.api.asynchronous.create(shape=(10,), chunks=(2,)) >>> await arr.nchunks_initialized() 0 >>> await arr.setitem(slice(5), 1) >>> await arr.nchunks_initialized() 3 """ return len(await chunks_initialized(self)) async def nbytes_stored(self) -> int: return await self.store_path.store.getsize_prefix(self.store_path.path) def _iter_chunk_coords( self, *, origin: Sequence[int] | None = None, selection_shape: Sequence[int] | None = None ) -> Iterator[ChunkCoords]: """ Create an iterator over the coordinates of chunks in chunk grid space. If the `origin` keyword is used, iteration will start at the chunk index specified by `origin`. The default behavior is to start at the origin of the grid coordinate space. If the `selection_shape` keyword is used, iteration will be bounded over a contiguous region ranging from `[origin, origin selection_shape]`, where the upper bound is exclusive as per python indexing conventions. Parameters ---------- origin : Sequence[int] | None, default=None The origin of the selection relative to the array's chunk grid. selection_shape : Sequence[int] | None, default=None The shape of the selection in chunk grid coordinates. Yields ------ chunk_coords: ChunkCoords The coordinates of each chunk in the selection. """ return _iter_grid(self.cdata_shape, origin=origin, selection_shape=selection_shape) def _iter_chunk_keys( self, *, origin: Sequence[int] | None = None, selection_shape: Sequence[int] | None = None ) -> Iterator[str]: """ Iterate over the storage keys of each chunk, relative to an optional origin, and optionally limited to a contiguous region in chunk grid coordinates. Parameters ---------- origin : Sequence[int] | None, default=None The origin of the selection relative to the array's chunk grid. selection_shape : Sequence[int] | None, default=None The shape of the selection in chunk grid coordinates. Yields ------ key: str The storage key of each chunk in the selection. """ # Iterate over the coordinates of chunks in chunk grid space. for k in self._iter_chunk_coords(origin=origin, selection_shape=selection_shape): # Encode the chunk key from the chunk coordinates. yield self.metadata.encode_chunk_key(k) def _iter_chunk_regions( self, *, origin: Sequence[int] | None = None, selection_shape: Sequence[int] | None = None ) -> Iterator[tuple[slice, ...]]: """ Iterate over the regions spanned by each chunk. Parameters ---------- origin : Sequence[int] | None, default=None The origin of the selection relative to the array's chunk grid. selection_shape : Sequence[int] | None, default=None The shape of the selection in chunk grid coordinates. Yields ------ region: tuple[slice, ...] A tuple of slice objects representing the region spanned by each chunk in the selection. """ for cgrid_position in self._iter_chunk_coords( origin=origin, selection_shape=selection_shape ): out: tuple[slice, ...] = () for c_pos, c_shape in zip(cgrid_position, self.chunks, strict=False): start = c_pos * c_shape stop = start + c_shape out += (slice(start, stop, 1),) yield out @property def nbytes(self) -> int: """ The total number of bytes that can be stored in the chunks of this array. Notes ----- This value is calculated by multiplying the number of elements in the array and the size of each element, the latter of which is determined by the dtype of the array. For this reason, ``nbytes`` will likely be inaccurate for arrays with variable-length dtypes. It is not possible to determine the size of an array with variable-length elements from the shape and dtype alone. """ return self.size * self.dtype.itemsize async def _get_selection( self, indexer: Indexer, *, prototype: BufferPrototype, out: NDBuffer | None = None, fields: Fields | None = None, ) -> NDArrayLike: # check fields are sensible out_dtype = check_fields(fields, self.dtype) # setup output buffer if out is not None: if isinstance(out, NDBuffer): out_buffer = out else: raise TypeError(f"out argument needs to be an NDBuffer. Got {type(out)!r}") if out_buffer.shape != indexer.shape: raise ValueError( f"shape of out argument doesn't match. Expected {indexer.shape}, got {out.shape}" ) else: out_buffer = prototype.nd_buffer.create( shape=indexer.shape, dtype=out_dtype, order=self._config.order, fill_value=self.metadata.fill_value, ) if product(indexer.shape) > 0: # need to use the order from the metadata for v2 _config = self._config if self.metadata.zarr_format == 2: _config = replace(_config, order=self.metadata.order) # reading chunks and decoding them await self.codec_pipeline.read( [ ( self.store_path / self.metadata.encode_chunk_key(chunk_coords), self.metadata.get_chunk_spec(chunk_coords, _config, prototype=prototype), chunk_selection, out_selection, is_complete_chunk, ) for chunk_coords, chunk_selection, out_selection, is_complete_chunk in indexer ], out_buffer, drop_axes=indexer.drop_axes, ) return out_buffer.as_ndarray_like() async def getitem( self, selection: BasicSelection, *, prototype: BufferPrototype | None = None, ) -> NDArrayLike: """ Asynchronous function that retrieves a subset of the array's data based on the provided selection. Parameters ---------- selection : BasicSelection A selection object specifying the subset of data to retrieve. prototype : BufferPrototype, optional A buffer prototype to use for the retrieved data (default is None). Returns ------- NDArrayLike The retrieved subset of the array's data. Examples -------- >>> import zarr >>> store = zarr.storage.MemoryStore(mode='w') >>> async_arr = await zarr.api.asynchronous.create_array( ... store=store, ... shape=(100,100), ... chunks=(10,10), ... dtype='i4', ... fill_value=0) >>> await async_arr.getitem((0,1)) # doctest: +ELLIPSIS array(0, dtype=int32) """ if prototype is None: prototype = default_buffer_prototype() indexer = BasicIndexer( selection, shape=self.metadata.shape, chunk_grid=self.metadata.chunk_grid, ) return await self._get_selection(indexer, prototype=prototype) async def _save_metadata(self, metadata: ArrayMetadata, ensure_parents: bool = False) -> None: """ Asynchronously save the array metadata. """ to_save = metadata.to_buffer_dict(cpu_buffer_prototype) awaitables = [set_or_delete(self.store_path / key, value) for key, value in to_save.items()] if ensure_parents: # To enable zarr.create(store, path="a/b/c"), we need to create all the intermediate groups. parents = _build_parents(self) for parent in parents: awaitables.extend( [ (parent.store_path / key).set_if_not_exists(value) for key, value in parent.metadata.to_buffer_dict( cpu_buffer_prototype ).items() ] ) await gather(*awaitables) async def _set_selection( self, indexer: Indexer, value: npt.ArrayLike, *, prototype: BufferPrototype, fields: Fields | None = None, ) -> None: # check fields are sensible check_fields(fields, self.dtype) fields = check_no_multi_fields(fields) # check value shape if np.isscalar(value): array_like = prototype.buffer.create_zero_length().as_array_like() if isinstance(array_like, np._typing._SupportsArrayFunc): # TODO: need to handle array types that don't support __array_function__ # like PyTorch and JAX array_like_ = cast(np._typing._SupportsArrayFunc, array_like) value = np.asanyarray(value, dtype=self.metadata.dtype, like=array_like_) else: if not hasattr(value, "shape"): value = np.asarray(value, self.metadata.dtype) # assert ( # value.shape == indexer.shape # ), f"shape of value doesn't match indexer shape. Expected {indexer.shape}, got {value.shape}" if not hasattr(value, "dtype") or value.dtype.name != self.metadata.dtype.name: if hasattr(value, "astype"): # Handle things that are already NDArrayLike more efficiently value = value.astype(dtype=self.metadata.dtype, order="A") else: value = np.array(value, dtype=self.metadata.dtype, order="A") value = cast(NDArrayLike, value) # We accept any ndarray like object from the user and convert it # to a NDBuffer (or subclass). From this point onwards, we only pass # Buffer and NDBuffer between components. value_buffer = prototype.nd_buffer.from_ndarray_like(value) # need to use the order from the metadata for v2 _config = self._config if self.metadata.zarr_format == 2: _config = replace(_config, order=self.metadata.order) # merging with existing data and encoding chunks await self.codec_pipeline.write( [ ( self.store_path / self.metadata.encode_chunk_key(chunk_coords), self.metadata.get_chunk_spec(chunk_coords, _config, prototype), chunk_selection, out_selection, is_complete_chunk, ) for chunk_coords, chunk_selection, out_selection, is_complete_chunk in indexer ], value_buffer, drop_axes=indexer.drop_axes, ) async def setitem( self, selection: BasicSelection, value: npt.ArrayLike, prototype: BufferPrototype | None = None, ) -> None: """ Asynchronously set values in the array using basic indexing. Parameters ---------- selection : BasicSelection The selection defining the region of the array to set. value : numpy.typing.ArrayLike The values to be written into the selected region of the array. prototype : BufferPrototype or None, optional A prototype buffer that defines the structure and properties of the array chunks being modified. If None, the default buffer prototype is used. Default is None. Returns ------- None This method does not return any value. Raises ------ IndexError If the selection is out of bounds for the array. ValueError If the values are not compatible with the array's dtype or shape. Notes ----- - This method is asynchronous and should be awaited. - Supports basic indexing, where the selection is contiguous and does not involve advanced indexing. """ if prototype is None: prototype = default_buffer_prototype() indexer = BasicIndexer( selection, shape=self.metadata.shape, chunk_grid=self.metadata.chunk_grid, ) return await self._set_selection(indexer, value, prototype=prototype) async def resize(self, new_shape: ShapeLike, delete_outside_chunks: bool = True) -> None: """ Asynchronously resize the array to a new shape. Parameters ---------- new_shape : ChunkCoords The desired new shape of the array. delete_outside_chunks : bool, optional If True (default), chunks that fall outside the new shape will be deleted. If False, the data in those chunks will be preserved. Returns ------- AsyncArray The resized array. Raises ------ ValueError If the new shape is incompatible with the current array's chunking configuration. Notes ----- - This method is asynchronous and should be awaited. """ new_shape = parse_shapelike(new_shape) assert len(new_shape) == len(self.metadata.shape) new_metadata = self.metadata.update_shape(new_shape) if delete_outside_chunks: # Remove all chunks outside of the new shape old_chunk_coords = set(self.metadata.chunk_grid.all_chunk_coords(self.metadata.shape)) new_chunk_coords = set(self.metadata.chunk_grid.all_chunk_coords(new_shape)) async def _delete_key(key: str) -> None: await (self.store_path / key).delete() await concurrent_map( [ (self.metadata.encode_chunk_key(chunk_coords),) for chunk_coords in old_chunk_coords.difference(new_chunk_coords) ], _delete_key, zarr_config.get("async.concurrency"), ) # Write new metadata await self._save_metadata(new_metadata) # Update metadata (in place) object.__setattr__(self, "metadata", new_metadata) async def append(self, data: npt.ArrayLike, axis: int = 0) -> ChunkCoords: """Append `data` to `axis`. Parameters ---------- data : array-like Data to be appended. axis : int Axis along which to append. Returns ------- new_shape : tuple Notes ----- The size of all dimensions other than `axis` must match between this array and `data`. """ # ensure data is array-like if not hasattr(data, "shape"): data = np.asanyarray(data) self_shape_preserved = tuple(s for i, s in enumerate(self.shape) if i != axis) data_shape_preserved = tuple(s for i, s in enumerate(data.shape) if i != axis) if self_shape_preserved != data_shape_preserved: raise ValueError( f"shape of data to append is not compatible with the array. " f"The shape of the data is ({data_shape_preserved})" f"and the shape of the array is ({self_shape_preserved})." "All dimensions must match except for the dimension being " "appended." ) # remember old shape old_shape = self.shape # determine new shape new_shape = tuple( self.shape[i] if i != axis else self.shape[i] + data.shape[i] for i in range(len(self.shape)) ) # resize await self.resize(new_shape) # store data append_selection = tuple( slice(None) if i != axis else slice(old_shape[i], new_shape[i]) for i in range(len(self.shape)) ) await self.setitem(append_selection, data) return new_shape async def update_attributes(self, new_attributes: dict[str, JSON]) -> Self: """ Asynchronously update the array's attributes. Parameters ---------- new_attributes : dict of str to JSON A dictionary of new attributes to update or add to the array. The keys represent attribute names, and the values must be JSON-compatible. Returns ------- AsyncArray The array with the updated attributes. Raises ------ ValueError If the attributes are invalid or incompatible with the array's metadata. Notes ----- - This method is asynchronous and should be awaited. - The updated attributes will be merged with existing attributes, and any conflicts will be overwritten by the new values. """ self.metadata.attributes.update(new_attributes) # Write new metadata await self._save_metadata(self.metadata) return self def __repr__(self) -> str: return f"" @property def info(self) -> Any: """ Return the statically known information for an array. Returns ------- ArrayInfo See Also -------- AsyncArray.info_complete All information about a group, including dynamic information like the number of bytes and chunks written. Examples -------- >>> arr = await zarr.api.asynchronous.create( ... path="array", shape=(3, 4, 5), chunks=(2, 2, 2)) ... ) >>> arr.info Type : Array Zarr format : 3 Data type : DataType.float64 Shape : (3, 4, 5) Chunk shape : (2, 2, 2) Order : C Read-only : False Store type : MemoryStore Codecs : [{'endian': }] No. bytes : 480 """ return self._info() async def info_complete(self) -> Any: """ Return all the information for an array, including dynamic information like a storage size. In addition to the static information, this provides - The count of chunks initialized - The sum of the bytes written Returns ------- ArrayInfo See Also -------- AsyncArray.info A property giving just the statically known information about an array. """ return self._info( await self.nchunks_initialized(), await self.store_path.store.getsize_prefix(self.store_path.path), ) def _info( self, count_chunks_initialized: int | None = None, count_bytes_stored: int | None = None ) -> Any: _data_type: np.dtype[Any] | DataType if isinstance(self.metadata, ArrayV2Metadata): _data_type = self.metadata.dtype else: _data_type = self.metadata.data_type return ArrayInfo( _zarr_format=self.metadata.zarr_format, _data_type=_data_type, _shape=self.shape, _order=self.order, _shard_shape=self.shards, _chunk_shape=self.chunks, _read_only=self.read_only, _compressors=self.compressors, _filters=self.filters, _serializer=self.serializer, _store_type=type(self.store_path.store).__name__, _count_bytes=self.nbytes, _count_bytes_stored=count_bytes_stored, _count_chunks_initialized=count_chunks_initialized, ) # TODO: Array can be a frozen data class again once property setters (e.g. shape) are removed @dataclass(frozen=False) class Array: """Instantiate an array from an initialized store.""" _async_array: AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata] @classmethod @deprecated("Use zarr.create_array instead.") @_deprecate_positional_args def create( cls, store: StoreLike, *, # v2 and v3 shape: ChunkCoords, dtype: npt.DTypeLike, zarr_format: ZarrFormat = 3, fill_value: Any | None = None, attributes: dict[str, JSON] | None = None, # v3 only chunk_shape: ChunkCoords | None = None, chunk_key_encoding: ( ChunkKeyEncoding | tuple[Literal["default"], Literal[".", "/"]] | tuple[Literal["v2"], Literal[".", "/"]] | None ) = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, # v2 only chunks: ChunkCoords | None = None, dimension_separator: Literal[".", "/"] | None = None, order: MemoryOrder | None = None, filters: list[dict[str, JSON]] | None = None, compressor: dict[str, JSON] | None = None, # runtime overwrite: bool = False, config: ArrayConfigLike | None = None, ) -> Array: """Creates a new Array instance from an initialized store. .. deprecated:: 3.0.0 Deprecated in favor of :func:`zarr.create_array`. Parameters ---------- store : StoreLike The array store that has already been initialized. shape : ChunkCoords The shape of the array. dtype : npt.DTypeLike The data type of the array. chunk_shape : ChunkCoords, optional The shape of the Array's chunks. Zarr format 3 only. Zarr format 2 arrays should use `chunks` instead. If not specified, default are guessed based on the shape and dtype. chunk_key_encoding : ChunkKeyEncodingLike, optional A specification of how the chunk keys are represented in storage. Zarr format 3 only. Zarr format 2 arrays should use `dimension_separator` instead. Default is ``("default", "/")``. codecs : Sequence of Codecs or dicts, optional An iterable of Codec or dict serializations of Codecs. The elements of this collection specify the transformation from array values to stored bytes. Zarr format 3 only. Zarr format 2 arrays should use ``filters`` and ``compressor`` instead. If no codecs are provided, default codecs will be used: - For numeric arrays, the default is ``BytesCodec`` and ``ZstdCodec``. - For Unicode strings, the default is ``VLenUTF8Codec`` and ``ZstdCodec``. - For bytes or objects, the default is ``VLenBytesCodec`` and ``ZstdCodec``. These defaults can be changed by modifying the value of ``array.v3_default_filters``, ``array.v3_default_serializer`` and ``array.v3_default_compressors`` in :mod:`zarr.core.config`. dimension_names : Iterable[str], optional The names of the dimensions (default is None). Zarr format 3 only. Zarr format 2 arrays should not use this parameter. chunks : ChunkCoords, optional The shape of the array's chunks. Zarr format 2 only. Zarr format 3 arrays should use ``chunk_shape`` instead. If not specified, default are guessed based on the shape and dtype. dimension_separator : Literal[".", "/"], optional The dimension separator (default is "."). Zarr format 2 only. Zarr format 3 arrays should use ``chunk_key_encoding`` instead. order : Literal["C", "F"], optional The memory of the array (default is "C"). If ``zarr_format`` is 2, this parameter sets the memory order of the array. If `zarr_format`` is 3, then this parameter is deprecated, because memory order is a runtime parameter for Zarr 3 arrays. The recommended way to specify the memory order for Zarr 3 arrays is via the ``config`` parameter, e.g. ``{'order': 'C'}``. filters : list[dict[str, JSON]], optional Sequence of filters to use to encode chunk data prior to compression. Zarr format 2 only. Zarr format 3 arrays should use ``codecs`` instead. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v2_default_filters`` in :mod:`zarr.core.config`. compressor : dict[str, JSON], optional Primary compressor to compress chunk data. Zarr format 2 only. Zarr format 3 arrays should use ``codecs`` instead. If no ``compressor`` is provided, a default compressor will be used: - For numeric arrays, the default is ``ZstdCodec``. - For Unicode strings, the default is ``VLenUTF8Codec``. - For bytes or objects, the default is ``VLenBytesCodec``. These defaults can be changed by modifying the value of ``array.v2_default_compressor`` in :mod:`zarr.core.config`. overwrite : bool, optional Whether to raise an error if the store already exists (default is False). Returns ------- Array Array created from the store. """ return cls._create( store, # v2 and v3 shape=shape, dtype=dtype, zarr_format=zarr_format, attributes=attributes, fill_value=fill_value, # v3 only chunk_shape=chunk_shape, chunk_key_encoding=chunk_key_encoding, codecs=codecs, dimension_names=dimension_names, # v2 only chunks=chunks, dimension_separator=dimension_separator, order=order, filters=filters, compressor=compressor, # runtime overwrite=overwrite, config=config, ) @classmethod def _create( cls, store: StoreLike, *, # v2 and v3 shape: ChunkCoords, dtype: npt.DTypeLike, zarr_format: ZarrFormat = 3, fill_value: Any | None = None, attributes: dict[str, JSON] | None = None, # v3 only chunk_shape: ChunkCoords | None = None, chunk_key_encoding: ( ChunkKeyEncoding | tuple[Literal["default"], Literal[".", "/"]] | tuple[Literal["v2"], Literal[".", "/"]] | None ) = None, codecs: Iterable[Codec | dict[str, JSON]] | None = None, dimension_names: Iterable[str] | None = None, # v2 only chunks: ChunkCoords | None = None, dimension_separator: Literal[".", "/"] | None = None, order: MemoryOrder | None = None, filters: list[dict[str, JSON]] | None = None, compressor: dict[str, JSON] | None = None, # runtime overwrite: bool = False, config: ArrayConfigLike | None = None, ) -> Array: """Creates a new Array instance from an initialized store. See :func:`Array.create` for more details. Deprecated in favor of :func:`zarr.create_array`. """ async_array = sync( AsyncArray._create( store=store, shape=shape, dtype=dtype, zarr_format=zarr_format, attributes=attributes, fill_value=fill_value, chunk_shape=chunk_shape, chunk_key_encoding=chunk_key_encoding, codecs=codecs, dimension_names=dimension_names, chunks=chunks, dimension_separator=dimension_separator, order=order, filters=filters, compressor=compressor, overwrite=overwrite, config=config, ), ) return cls(async_array) @classmethod def from_dict( cls, store_path: StorePath, data: dict[str, JSON], ) -> Array: """ Create a Zarr array from a dictionary. Parameters ---------- store_path : StorePath The path within the store where the array should be created. data : dict A dictionary representing the array data. This dictionary should include necessary metadata for the array, such as shape, dtype, fill value, and attributes. Returns ------- Array The created Zarr array. Raises ------ ValueError If the dictionary data is invalid or missing required fields for array creation. """ async_array = AsyncArray.from_dict(store_path=store_path, data=data) return cls(async_array) @classmethod def open( cls, store: StoreLike, ) -> Array: """Opens an existing Array from a store. Parameters ---------- store : Store Store containing the Array. Returns ------- Array Array opened from the store. """ async_array = sync(AsyncArray.open(store)) return cls(async_array) @property def store(self) -> Store: return self._async_array.store @property def ndim(self) -> int: """Returns the number of dimensions in the array. Returns ------- int The number of dimensions in the array. """ return self._async_array.ndim @property def shape(self) -> ChunkCoords: """Returns the shape of the array. Returns ------- ChunkCoords The shape of the array. """ return self._async_array.shape @shape.setter def shape(self, value: ChunkCoords) -> None: """Sets the shape of the array by calling resize.""" self.resize(value) @property def chunks(self) -> ChunkCoords: """Returns a tuple of integers describing the length of each dimension of a chunk of the array. If sharding is used the inner chunk shape is returned. Only defined for arrays using using `RegularChunkGrid`. If array doesn't use `RegularChunkGrid`, `NotImplementedError` is raised. Returns ------- tuple A tuple of integers representing the length of each dimension of a chunk. """ return self._async_array.chunks @property def shards(self) -> ChunkCoords | None: """Returns a tuple of integers describing the length of each dimension of a shard of the array. Returns None if sharding is not used. Only defined for arrays using using `RegularChunkGrid`. If array doesn't use `RegularChunkGrid`, `NotImplementedError` is raised. Returns ------- tuple | None A tuple of integers representing the length of each dimension of a shard or None if sharding is not used. """ return self._async_array.shards @property def size(self) -> int: """Returns the total number of elements in the array. Returns ------- int Total number of elements in the array. """ return self._async_array.size @property def dtype(self) -> np.dtype[Any]: """Returns the NumPy data type. Returns ------- np.dtype The NumPy data type. """ return self._async_array.dtype @property def attrs(self) -> Attributes: """Returns a MutableMapping containing user-defined attributes. Returns ------- attrs : MutableMapping A MutableMapping object containing user-defined attributes. Notes ----- Note that attribute values must be JSON serializable. """ return Attributes(self) @property def path(self) -> str: """Storage path.""" return self._async_array.path @property def name(self) -> str: """Array name following h5py convention.""" return self._async_array.name @property def basename(self) -> str: """Final component of name.""" return self._async_array.basename @property def metadata(self) -> ArrayMetadata: return self._async_array.metadata @property def store_path(self) -> StorePath: return self._async_array.store_path @property def order(self) -> MemoryOrder: return self._async_array.order @property def read_only(self) -> bool: return self._async_array.read_only @property def fill_value(self) -> Any: return self.metadata.fill_value @property def filters(self) -> tuple[numcodecs.abc.Codec, ...] | tuple[ArrayArrayCodec, ...]: """ Filters that are applied to each chunk of the array, in order, before serializing that chunk to bytes. """ return self._async_array.filters @property def serializer(self) -> None | ArrayBytesCodec: """ Array-to-bytes codec to use for serializing the chunks into bytes. """ return self._async_array.serializer @property @deprecated("Use Array.compressors instead.") def compressor(self) -> numcodecs.abc.Codec | None: """ Compressor that is applied to each chunk of the array. .. deprecated:: 3.0.0 `array.compressor` is deprecated and will be removed in a future release. Use `array.compressors` instead. """ return self._async_array.compressor @property def compressors(self) -> tuple[numcodecs.abc.Codec, ...] | tuple[BytesBytesCodec, ...]: """ Compressors that are applied to each chunk of the array. Compressors are applied in order, and after any filters are applied (if any are specified) and the data is serialized into bytes. """ return self._async_array.compressors @property def cdata_shape(self) -> ChunkCoords: """ The shape of the chunk grid for this array. """ return tuple(starmap(ceildiv, zip(self.shape, self.chunks, strict=False))) @property def nchunks(self) -> int: """ The number of chunks in the stored representation of this array. """ return self._async_array.nchunks def _iter_chunk_coords( self, origin: Sequence[int] | None = None, selection_shape: Sequence[int] | None = None ) -> Iterator[ChunkCoords]: """ Create an iterator over the coordinates of chunks in chunk grid space. If the `origin` keyword is used, iteration will start at the chunk index specified by `origin`. The default behavior is to start at the origin of the grid coordinate space. If the `selection_shape` keyword is used, iteration will be bounded over a contiguous region ranging from `[origin, origin + selection_shape]`, where the upper bound is exclusive as per python indexing conventions. Parameters ---------- origin : Sequence[int] | None, default=None The origin of the selection relative to the array's chunk grid. selection_shape : Sequence[int] | None, default=None The shape of the selection in chunk grid coordinates. Yields ------ chunk_coords: ChunkCoords The coordinates of each chunk in the selection. """ yield from self._async_array._iter_chunk_coords( origin=origin, selection_shape=selection_shape ) @property def nbytes(self) -> int: """ The total number of bytes that can be stored in the chunks of this array. Notes ----- This value is calculated by multiplying the number of elements in the array and the size of each element, the latter of which is determined by the dtype of the array. For this reason, ``nbytes`` will likely be inaccurate for arrays with variable-length dtypes. It is not possible to determine the size of an array with variable-length elements from the shape and dtype alone. """ return self._async_array.nbytes @property def nchunks_initialized(self) -> int: """ Calculate the number of chunks that have been initialized, i.e. the number of chunks that have been persisted to the storage backend. Returns ------- nchunks_initialized : int The number of chunks that have been initialized. Notes ----- On :class:`Array` this is a (synchronous) property, unlike asynchronous function :meth:`AsyncArray.nchunks_initialized`. Examples -------- >>> arr = await zarr.create(shape=(10,), chunks=(2,)) >>> arr.nchunks_initialized 0 >>> arr[:5] = 1 >>> arr.nchunks_initialized 3 """ return sync(self._async_array.nchunks_initialized()) def nbytes_stored(self) -> int: """ Determine the size, in bytes, of the array actually written to the store. Returns ------- size : int """ return sync(self._async_array.nbytes_stored()) def _iter_chunk_keys( self, origin: Sequence[int] | None = None, selection_shape: Sequence[int] | None = None ) -> Iterator[str]: """ Iterate over the storage keys of each chunk, relative to an optional origin, and optionally limited to a contiguous region in chunk grid coordinates. Parameters ---------- origin : Sequence[int] | None, default=None The origin of the selection relative to the array's chunk grid. selection_shape : Sequence[int] | None, default=None The shape of the selection in chunk grid coordinates. Yields ------ key: str The storage key of each chunk in the selection. """ yield from self._async_array._iter_chunk_keys( origin=origin, selection_shape=selection_shape ) def _iter_chunk_regions( self, origin: Sequence[int] | None = None, selection_shape: Sequence[int] | None = None ) -> Iterator[tuple[slice, ...]]: """ Iterate over the regions spanned by each chunk. Parameters ---------- origin : Sequence[int] | None, default=None The origin of the selection relative to the array's chunk grid. selection_shape : Sequence[int] | None, default=None The shape of the selection in chunk grid coordinates. Yields ------ region: tuple[slice, ...] A tuple of slice objects representing the region spanned by each chunk in the selection. """ yield from self._async_array._iter_chunk_regions( origin=origin, selection_shape=selection_shape ) def __array__( self, dtype: npt.DTypeLike | None = None, copy: bool | None = None ) -> NDArrayLike: """ This method is used by numpy when converting zarr.Array into a numpy array. For more information, see https://numpy.org/devdocs/user/basics.interoperability.html#the-array-method """ if copy is False: msg = "`copy=False` is not supported. This method always creates a copy." raise ValueError(msg) arr_np = self[...] if dtype is not None: arr_np = arr_np.astype(dtype) return arr_np def __getitem__(self, selection: Selection) -> NDArrayLike: """Retrieve data for an item or region of the array. Parameters ---------- selection : tuple An integer index or slice or tuple of int/slice objects specifying the requested item or region for each dimension of the array. Returns ------- NDArrayLike An array-like containing the data for the requested region. Examples -------- Setup a 1-dimensional array:: >>> import zarr >>> import numpy as np >>> data = np.arange(100, dtype="uint16") >>> z = zarr.create_array( >>> StorePath(MemoryStore(mode="w")), >>> shape=data.shape, >>> chunks=(10,), >>> dtype=data.dtype, >>> ) >>> z[:] = data Retrieve a single item:: >>> z[5] 5 Retrieve a region via slicing:: >>> z[:5] array([0, 1, 2, 3, 4]) >>> z[-5:] array([95, 96, 97, 98, 99]) >>> z[5:10] array([5, 6, 7, 8, 9]) >>> z[5:10:2] array([5, 7, 9]) >>> z[::2] array([ 0, 2, 4, ..., 94, 96, 98]) Load the entire array into memory:: >>> z[...] array([ 0, 1, 2, ..., 97, 98, 99]) Setup a 2-dimensional array:: >>> data = np.arange(100, dtype="uint16").reshape(10, 10) >>> z = zarr.create_array( >>> StorePath(MemoryStore(mode="w")), >>> shape=data.shape, >>> chunks=(10, 10), >>> dtype=data.dtype, >>> ) >>> z[:] = data Retrieve an item:: >>> z[2, 2] 22 Retrieve a region via slicing:: >>> z[1:3, 1:3] array([[11, 12], [21, 22]]) >>> z[1:3, :] array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) >>> z[:, 1:3] array([[ 1, 2], [11, 12], [21, 22], [31, 32], [41, 42], [51, 52], [61, 62], [71, 72], [81, 82], [91, 92]]) >>> z[0:5:2, 0:5:2] array([[ 0, 2, 4], [20, 22, 24], [40, 42, 44]]) >>> z[::2, ::2] array([[ 0, 2, 4, 6, 8], [20, 22, 24, 26, 28], [40, 42, 44, 46, 48], [60, 62, 64, 66, 68], [80, 82, 84, 86, 88]]) Load the entire array into memory:: >>> z[...] array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59], [60, 61, 62, 63, 64, 65, 66, 67, 68, 69], [70, 71, 72, 73, 74, 75, 76, 77, 78, 79], [80, 81, 82, 83, 84, 85, 86, 87, 88, 89], [90, 91, 92, 93, 94, 95, 96, 97, 98, 99]]) Notes ----- Slices with step > 1 are supported, but slices with negative step are not. For arrays with a structured dtype, see Zarr format 2 for examples of how to use fields Currently the implementation for __getitem__ is provided by :func:`vindex` if the indexing is pure fancy indexing (ie a broadcast-compatible tuple of integer array indices), or by :func:`set_basic_selection` otherwise. Effectively, this means that the following indexing modes are supported: - integer indexing - slice indexing - mixed slice and integer indexing - boolean indexing - fancy indexing (vectorized list of integers) For specific indexing options including outer indexing, see the methods listed under See Also. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection, set_orthogonal_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __setitem__ """ fields, pure_selection = pop_fields(selection) if is_pure_fancy_indexing(pure_selection, self.ndim): return self.vindex[cast(CoordinateSelection | MaskSelection, selection)] elif is_pure_orthogonal_indexing(pure_selection, self.ndim): return self.get_orthogonal_selection(pure_selection, fields=fields) else: return self.get_basic_selection(cast(BasicSelection, pure_selection), fields=fields) def __setitem__(self, selection: Selection, value: npt.ArrayLike) -> None: """Modify data for an item or region of the array. Parameters ---------- selection : tuple An integer index or slice or tuple of int/slice specifying the requested region for each dimension of the array. value : npt.ArrayLike An array-like containing the data to be stored in the selection. Examples -------- Setup a 1-dimensional array:: >>> import zarr >>> z = zarr.zeros( >>> shape=(100,), >>> store=StorePath(MemoryStore(mode="w")), >>> chunk_shape=(5,), >>> dtype="i4", >>> ) Set all array elements to the same scalar value:: >>> z[...] = 42 >>> z[...] array([42, 42, 42, ..., 42, 42, 42]) Set a portion of the array:: >>> z[:10] = np.arange(10) >>> z[-10:] = np.arange(10)[::-1] >>> z[...] array([ 0, 1, 2, ..., 2, 1, 0]) Setup a 2-dimensional array:: >>> z = zarr.zeros( >>> shape=(5, 5), >>> store=StorePath(MemoryStore(mode="w")), >>> chunk_shape=(5, 5), >>> dtype="i4", >>> ) Set all array elements to the same scalar value:: >>> z[...] = 42 Set a portion of the array:: >>> z[0, :] = np.arange(z.shape[1]) >>> z[:, 0] = np.arange(z.shape[0]) >>> z[...] array([[ 0, 1, 2, 3, 4], [ 1, 42, 42, 42, 42], [ 2, 42, 42, 42, 42], [ 3, 42, 42, 42, 42], [ 4, 42, 42, 42, 42]]) Notes ----- Slices with step > 1 are supported, but slices with negative step are not. For arrays with a structured dtype, see Zarr format 2 for examples of how to use fields Currently the implementation for __setitem__ is provided by :func:`vindex` if the indexing is pure fancy indexing (ie a broadcast-compatible tuple of integer array indices), or by :func:`set_basic_selection` otherwise. Effectively, this means that the following indexing modes are supported: - integer indexing - slice indexing - mixed slice and integer indexing - boolean indexing - fancy indexing (vectorized list of integers) For specific indexing options including outer indexing, see the methods listed under See Also. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection, set_orthogonal_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __getitem__ """ fields, pure_selection = pop_fields(selection) if is_pure_fancy_indexing(pure_selection, self.ndim): self.vindex[cast(CoordinateSelection | MaskSelection, selection)] = value elif is_pure_orthogonal_indexing(pure_selection, self.ndim): self.set_orthogonal_selection(pure_selection, value, fields=fields) else: self.set_basic_selection(cast(BasicSelection, pure_selection), value, fields=fields) @_deprecate_positional_args def get_basic_selection( self, selection: BasicSelection = Ellipsis, *, out: NDBuffer | None = None, prototype: BufferPrototype | None = None, fields: Fields | None = None, ) -> NDArrayLike: """Retrieve data for an item or region of the array. Parameters ---------- selection : tuple A tuple specifying the requested item or region for each dimension of the array. May be any combination of int and/or slice or ellipsis for multidimensional arrays. out : NDBuffer, optional If given, load the selected data directly into this buffer. prototype : BufferPrototype, optional The prototype of the buffer to use for the output data. If not provided, the default buffer prototype is used. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to extract data for. Returns ------- NDArrayLike An array-like containing the data for the requested region. Examples -------- Setup a 1-dimensional array:: >>> import zarr >>> import numpy as np >>> data = np.arange(100, dtype="uint16") >>> z = zarr.create_array( >>> StorePath(MemoryStore(mode="w")), >>> shape=data.shape, >>> chunks=(3,), >>> dtype=data.dtype, >>> ) >>> z[:] = data Retrieve a single item:: >>> z.get_basic_selection(5) 5 Retrieve a region via slicing:: >>> z.get_basic_selection(slice(5)) array([0, 1, 2, 3, 4]) >>> z.get_basic_selection(slice(-5, None)) array([95, 96, 97, 98, 99]) >>> z.get_basic_selection(slice(5, 10)) array([5, 6, 7, 8, 9]) >>> z.get_basic_selection(slice(5, 10, 2)) array([5, 7, 9]) >>> z.get_basic_selection(slice(None, None, 2)) array([ 0, 2, 4, ..., 94, 96, 98]) Setup a 3-dimensional array:: >>> data = np.arange(1000).reshape(10, 10, 10) >>> z = zarr.create_array( >>> StorePath(MemoryStore(mode="w")), >>> shape=data.shape, >>> chunks=(5, 5, 5), >>> dtype=data.dtype, >>> ) >>> z[:] = data Retrieve an item:: >>> z.get_basic_selection((1, 2, 3)) 123 Retrieve a region via slicing and Ellipsis:: >>> z.get_basic_selection((slice(1, 3), slice(1, 3), 0)) array([[110, 120], [210, 220]]) >>> z.get_basic_selection(0, (slice(1, 3), slice(None))) array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]]) >>> z.get_basic_selection((..., 5)) array([[ 2 12 22 32 42 52 62 72 82 92] [102 112 122 132 142 152 162 172 182 192] ... [802 812 822 832 842 852 862 872 882 892] [902 912 922 932 942 952 962 972 982 992]] Notes ----- Slices with step > 1 are supported, but slices with negative step are not. For arrays with a structured dtype, see Zarr format 2 for examples of how to use the `fields` parameter. This method provides the implementation for accessing data via the square bracket notation (__getitem__). See :func:`__getitem__` for examples using the alternative notation. See Also -------- set_basic_selection, get_mask_selection, set_mask_selection, get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection, set_orthogonal_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __getitem__, __setitem__ """ if prototype is None: prototype = default_buffer_prototype() return sync( self._async_array._get_selection( BasicIndexer(selection, self.shape, self.metadata.chunk_grid), out=out, fields=fields, prototype=prototype, ) ) @_deprecate_positional_args def set_basic_selection( self, selection: BasicSelection, value: npt.ArrayLike, *, fields: Fields | None = None, prototype: BufferPrototype | None = None, ) -> None: """Modify data for an item or region of the array. Parameters ---------- selection : tuple A tuple specifying the requested item or region for each dimension of the array. May be any combination of int and/or slice or ellipsis for multidimensional arrays. value : npt.ArrayLike An array-like containing values to be stored into the array. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to set data for. prototype : BufferPrototype, optional The prototype of the buffer used for setting the data. If not provided, the default buffer prototype is used. Examples -------- Setup a 1-dimensional array:: >>> import zarr >>> z = zarr.zeros( >>> shape=(100,), >>> store=StorePath(MemoryStore(mode="w")), >>> chunk_shape=(100,), >>> dtype="i4", >>> ) Set all array elements to the same scalar value:: >>> z.set_basic_selection(..., 42) >>> z[...] array([42, 42, 42, ..., 42, 42, 42]) Set a portion of the array:: >>> z.set_basic_selection(slice(10), np.arange(10)) >>> z.set_basic_selection(slice(-10, None), np.arange(10)[::-1]) >>> z[...] array([ 0, 1, 2, ..., 2, 1, 0]) Setup a 2-dimensional array:: >>> z = zarr.zeros( >>> shape=(5, 5), >>> store=StorePath(MemoryStore(mode="w")), >>> chunk_shape=(5, 5), >>> dtype="i4", >>> ) Set all array elements to the same scalar value:: >>> z.set_basic_selection(..., 42) Set a portion of the array:: >>> z.set_basic_selection((0, slice(None)), np.arange(z.shape[1])) >>> z.set_basic_selection((slice(None), 0), np.arange(z.shape[0])) >>> z[...] array([[ 0, 1, 2, 3, 4], [ 1, 42, 42, 42, 42], [ 2, 42, 42, 42, 42], [ 3, 42, 42, 42, 42], [ 4, 42, 42, 42, 42]]) Notes ----- For arrays with a structured dtype, see Zarr format 2 for examples of how to use the `fields` parameter. This method provides the underlying implementation for modifying data via square bracket notation, see :func:`__setitem__` for equivalent examples using the alternative notation. See Also -------- get_basic_selection, get_mask_selection, set_mask_selection, get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection, set_orthogonal_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __getitem__, __setitem__ """ if prototype is None: prototype = default_buffer_prototype() indexer = BasicIndexer(selection, self.shape, self.metadata.chunk_grid) sync(self._async_array._set_selection(indexer, value, fields=fields, prototype=prototype)) @_deprecate_positional_args def get_orthogonal_selection( self, selection: OrthogonalSelection, *, out: NDBuffer | None = None, fields: Fields | None = None, prototype: BufferPrototype | None = None, ) -> NDArrayLike: """Retrieve data by making a selection for each dimension of the array. For example, if an array has 2 dimensions, allows selecting specific rows and/or columns. The selection for each dimension can be either an integer (indexing a single item), a slice, an array of integers, or a Boolean array where True values indicate a selection. Parameters ---------- selection : tuple A selection for each dimension of the array. May be any combination of int, slice, integer array or Boolean array. out : NDBuffer, optional If given, load the selected data directly into this buffer. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to extract data for. prototype : BufferPrototype, optional The prototype of the buffer to use for the output data. If not provided, the default buffer prototype is used. Returns ------- NDArrayLike An array-like containing the data for the requested selection. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> import numpy as np >>> data = np.arange(100).reshape(10, 10) >>> z = zarr.create_array( >>> StorePath(MemoryStore(mode="w")), >>> shape=data.shape, >>> chunks=data.shape, >>> dtype=data.dtype, >>> ) >>> z[:] = data Retrieve rows and columns via any combination of int, slice, integer array and/or Boolean array:: >>> z.get_orthogonal_selection(([1, 4], slice(None))) array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49]]) >>> z.get_orthogonal_selection((slice(None), [1, 4])) array([[ 1, 4], [11, 14], [21, 24], [31, 34], [41, 44], [51, 54], [61, 64], [71, 74], [81, 84], [91, 94]]) >>> z.get_orthogonal_selection(([1, 4], [1, 4])) array([[11, 14], [41, 44]]) >>> sel = np.zeros(z.shape[0], dtype=bool) >>> sel[1] = True >>> sel[4] = True >>> z.get_orthogonal_selection((sel, sel)) array([[11, 14], [41, 44]]) For convenience, the orthogonal selection functionality is also available via the `oindex` property, e.g.:: >>> z.oindex[[1, 4], :] array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49]]) >>> z.oindex[:, [1, 4]] array([[ 1, 4], [11, 14], [21, 24], [31, 34], [41, 44], [51, 54], [61, 64], [71, 74], [81, 84], [91, 94]]) >>> z.oindex[[1, 4], [1, 4]] array([[11, 14], [41, 44]]) >>> sel = np.zeros(z.shape[0], dtype=bool) >>> sel[1] = True >>> sel[4] = True >>> z.oindex[sel, sel] array([[11, 14], [41, 44]]) Notes ----- Orthogonal indexing is also known as outer indexing. Slices with step > 1 are supported, but slices with negative step are not. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_coordinate_selection, set_coordinate_selection, set_orthogonal_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __getitem__, __setitem__ """ if prototype is None: prototype = default_buffer_prototype() indexer = OrthogonalIndexer(selection, self.shape, self.metadata.chunk_grid) return sync( self._async_array._get_selection( indexer=indexer, out=out, fields=fields, prototype=prototype ) ) @_deprecate_positional_args def set_orthogonal_selection( self, selection: OrthogonalSelection, value: npt.ArrayLike, *, fields: Fields | None = None, prototype: BufferPrototype | None = None, ) -> None: """Modify data via a selection for each dimension of the array. Parameters ---------- selection : tuple A selection for each dimension of the array. May be any combination of int, slice, integer array or Boolean array. value : npt.ArrayLike An array-like array containing the data to be stored in the array. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to set data for. prototype : BufferPrototype, optional The prototype of the buffer used for setting the data. If not provided, the default buffer prototype is used. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> z = zarr.zeros( >>> shape=(5, 5), >>> store=StorePath(MemoryStore(mode="w")), >>> chunk_shape=(5, 5), >>> dtype="i4", >>> ) Set data for a selection of rows:: >>> z.set_orthogonal_selection(([1, 4], slice(None)), 1) >>> z[...] array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]) Set data for a selection of columns:: >>> z.set_orthogonal_selection((slice(None), [1, 4]), 2) >>> z[...] array([[0, 2, 0, 0, 2], [1, 2, 1, 1, 2], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 2, 1, 1, 2]]) Set data for a selection of rows and columns:: >>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3) >>> z[...] array([[0, 2, 0, 0, 2], [1, 3, 1, 1, 3], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 3, 1, 1, 3]]) Set data from a 2D array:: >>> values = np.arange(10).reshape(2, 5) >>> z.set_orthogonal_selection(([0, 3], ...), values) >>> z[...] array([[0, 1, 2, 3, 4], [1, 3, 1, 1, 3], [0, 2, 0, 0, 2], [5, 6, 7, 8, 9], [1, 3, 1, 1, 3]]) For convenience, this functionality is also available via the `oindex` property. E.g.:: >>> z.oindex[[1, 4], [1, 4]] = 4 >>> z[...] array([[0, 1, 2, 3, 4], [1, 4, 1, 1, 4], [0, 2, 0, 0, 2], [5, 6, 7, 8, 9], [1, 4, 1, 1, 4]]) Notes ----- Orthogonal indexing is also known as outer indexing. Slices with step > 1 are supported, but slices with negative step are not. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __getitem__, __setitem__ """ if prototype is None: prototype = default_buffer_prototype() indexer = OrthogonalIndexer(selection, self.shape, self.metadata.chunk_grid) return sync( self._async_array._set_selection(indexer, value, fields=fields, prototype=prototype) ) @_deprecate_positional_args def get_mask_selection( self, mask: MaskSelection, *, out: NDBuffer | None = None, fields: Fields | None = None, prototype: BufferPrototype | None = None, ) -> NDArrayLike: """Retrieve a selection of individual items, by providing a Boolean array of the same shape as the array against which the selection is being made, where True values indicate a selected item. Parameters ---------- mask : ndarray, bool A Boolean array of the same shape as the array against which the selection is being made. out : NDBuffer, optional If given, load the selected data directly into this buffer. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to extract data for. prototype : BufferPrototype, optional The prototype of the buffer to use for the output data. If not provided, the default buffer prototype is used. Returns ------- NDArrayLike An array-like containing the data for the requested selection. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> import numpy as np >>> data = np.arange(100).reshape(10, 10) >>> z = zarr.create_array( >>> StorePath(MemoryStore(mode="w")), >>> shape=data.shape, >>> chunks=data.shape, >>> dtype=data.dtype, >>> ) >>> z[:] = data Retrieve items by specifying a mask:: >>> sel = np.zeros_like(z, dtype=bool) >>> sel[1, 1] = True >>> sel[4, 4] = True >>> z.get_mask_selection(sel) array([11, 44]) For convenience, the mask selection functionality is also available via the `vindex` property, e.g.:: >>> z.vindex[sel] array([11, 44]) Notes ----- Mask indexing is a form of vectorized or inner indexing, and is equivalent to coordinate indexing. Internally the mask array is converted to coordinate arrays by calling `np.nonzero`. See Also -------- get_basic_selection, set_basic_selection, set_mask_selection, get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection, set_coordinate_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __getitem__, __setitem__ """ if prototype is None: prototype = default_buffer_prototype() indexer = MaskIndexer(mask, self.shape, self.metadata.chunk_grid) return sync( self._async_array._get_selection( indexer=indexer, out=out, fields=fields, prototype=prototype ) ) @_deprecate_positional_args def set_mask_selection( self, mask: MaskSelection, value: npt.ArrayLike, *, fields: Fields | None = None, prototype: BufferPrototype | None = None, ) -> None: """Modify a selection of individual items, by providing a Boolean array of the same shape as the array against which the selection is being made, where True values indicate a selected item. Parameters ---------- mask : ndarray, bool A Boolean array of the same shape as the array against which the selection is being made. value : npt.ArrayLike An array-like containing values to be stored into the array. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to set data for. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> z = zarr.zeros( >>> shape=(5, 5), >>> store=StorePath(MemoryStore(mode="w")), >>> chunk_shape=(5, 5), >>> dtype="i4", >>> ) Set data for a selection of items:: >>> sel = np.zeros_like(z, dtype=bool) >>> sel[1, 1] = True >>> sel[4, 4] = True >>> z.set_mask_selection(sel, 1) >>> z[...] array([[0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) For convenience, this functionality is also available via the `vindex` property. E.g.:: >>> z.vindex[sel] = 2 >>> z[...] array([[0, 0, 0, 0, 0], [0, 2, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 2]]) Notes ----- Mask indexing is a form of vectorized or inner indexing, and is equivalent to coordinate indexing. Internally the mask array is converted to coordinate arrays by calling `np.nonzero`. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection, set_coordinate_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __getitem__, __setitem__ """ if prototype is None: prototype = default_buffer_prototype() indexer = MaskIndexer(mask, self.shape, self.metadata.chunk_grid) sync(self._async_array._set_selection(indexer, value, fields=fields, prototype=prototype)) @_deprecate_positional_args def get_coordinate_selection( self, selection: CoordinateSelection, *, out: NDBuffer | None = None, fields: Fields | None = None, prototype: BufferPrototype | None = None, ) -> NDArrayLike: """Retrieve a selection of individual items, by providing the indices (coordinates) for each selected item. Parameters ---------- selection : tuple An integer (coordinate) array for each dimension of the array. out : NDBuffer, optional If given, load the selected data directly into this buffer. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to extract data for. prototype : BufferPrototype, optional The prototype of the buffer to use for the output data. If not provided, the default buffer prototype is used. Returns ------- NDArrayLike An array-like containing the data for the requested coordinate selection. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> import numpy as np >>> data = np.arange(0, 100, dtype="uint16").reshape((10, 10)) >>> z = zarr.create_array( >>> StorePath(MemoryStore(mode="w")), >>> shape=data.shape, >>> chunks=(3, 3), >>> dtype=data.dtype, >>> ) >>> z[:] = data Retrieve items by specifying their coordinates:: >>> z.get_coordinate_selection(([1, 4], [1, 4])) array([11, 44]) For convenience, the coordinate selection functionality is also available via the `vindex` property, e.g.:: >>> z.vindex[[1, 4], [1, 4]] array([11, 44]) Notes ----- Coordinate indexing is also known as point selection, and is a form of vectorized or inner indexing. Slices are not supported. Coordinate arrays must be provided for all dimensions of the array. Coordinate arrays may be multidimensional, in which case the output array will also be multidimensional. Coordinate arrays are broadcast against each other before being applied. The shape of the output will be the same as the shape of each coordinate array after broadcasting. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_orthogonal_selection, set_orthogonal_selection, set_coordinate_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __getitem__, __setitem__ """ if prototype is None: prototype = default_buffer_prototype() indexer = CoordinateIndexer(selection, self.shape, self.metadata.chunk_grid) out_array = sync( self._async_array._get_selection( indexer=indexer, out=out, fields=fields, prototype=prototype ) ) if hasattr(out_array, "shape"): # restore shape out_array = np.array(out_array).reshape(indexer.sel_shape) return out_array @_deprecate_positional_args def set_coordinate_selection( self, selection: CoordinateSelection, value: npt.ArrayLike, *, fields: Fields | None = None, prototype: BufferPrototype | None = None, ) -> None: """Modify a selection of individual items, by providing the indices (coordinates) for each item to be modified. Parameters ---------- selection : tuple An integer (coordinate) array for each dimension of the array. value : npt.ArrayLike An array-like containing values to be stored into the array. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to set data for. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> z = zarr.zeros( >>> shape=(5, 5), >>> store=StorePath(MemoryStore(mode="w")), >>> chunk_shape=(5, 5), >>> dtype="i4", >>> ) Set data for a selection of items:: >>> z.set_coordinate_selection(([1, 4], [1, 4]), 1) >>> z[...] array([[0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) For convenience, this functionality is also available via the `vindex` property. E.g.:: >>> z.vindex[[1, 4], [1, 4]] = 2 >>> z[...] array([[0, 0, 0, 0, 0], [0, 2, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 2]]) Notes ----- Coordinate indexing is also known as point selection, and is a form of vectorized or inner indexing. Slices are not supported. Coordinate arrays must be provided for all dimensions of the array. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __getitem__, __setitem__ """ if prototype is None: prototype = default_buffer_prototype() # setup indexer indexer = CoordinateIndexer(selection, self.shape, self.metadata.chunk_grid) # handle value - need ndarray-like flatten value if not is_scalar(value, self.dtype): try: from numcodecs.compat import ensure_ndarray_like value = ensure_ndarray_like(value) # TODO replace with agnostic except TypeError: # Handle types like `list` or `tuple` value = np.array(value) # TODO replace with agnostic if hasattr(value, "shape") and len(value.shape) > 1: value = np.array(value).reshape(-1) if not is_scalar(value, self.dtype) and ( isinstance(value, NDArrayLike) and indexer.shape != value.shape ): raise ValueError( f"Attempting to set a selection of {indexer.sel_shape[0]} " f"elements with an array of {value.shape[0]} elements." ) sync(self._async_array._set_selection(indexer, value, fields=fields, prototype=prototype)) @_deprecate_positional_args def get_block_selection( self, selection: BasicSelection, *, out: NDBuffer | None = None, fields: Fields | None = None, prototype: BufferPrototype | None = None, ) -> NDArrayLike: """Retrieve a selection of individual items, by providing the indices (coordinates) for each selected item. Parameters ---------- selection : int or slice or tuple of int or slice An integer (coordinate) or slice for each dimension of the array. out : NDBuffer, optional If given, load the selected data directly into this buffer. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to extract data for. prototype : BufferPrototype, optional The prototype of the buffer to use for the output data. If not provided, the default buffer prototype is used. Returns ------- NDArrayLike An array-like containing the data for the requested block selection. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> import numpy as np >>> data = np.arange(0, 100, dtype="uint16").reshape((10, 10)) >>> z = zarr.create_array( >>> StorePath(MemoryStore(mode="w")), >>> shape=data.shape, >>> chunks=(3, 3), >>> dtype=data.dtype, >>> ) >>> z[:] = data Retrieve items by specifying their block coordinates:: >>> z.get_block_selection((1, slice(None))) array([[30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]]) Which is equivalent to:: >>> z[3:6, :] array([[30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]]) For convenience, the block selection functionality is also available via the `blocks` property, e.g.:: >>> z.blocks[1] array([[30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59]]) Notes ----- Block indexing is a convenience indexing method to work on individual chunks with chunk index slicing. It has the same concept as Dask's `Array.blocks` indexing. Slices are supported. However, only with a step size of one. Block index arrays may be multidimensional to index multidimensional arrays. For example:: >>> z.blocks[0, 1:3] array([[ 3, 4, 5, 6, 7, 8], [13, 14, 15, 16, 17, 18], [23, 24, 25, 26, 27, 28]]) See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection, set_coordinate_selection, set_block_selection, vindex, oindex, blocks, __getitem__, __setitem__ """ if prototype is None: prototype = default_buffer_prototype() indexer = BlockIndexer(selection, self.shape, self.metadata.chunk_grid) return sync( self._async_array._get_selection( indexer=indexer, out=out, fields=fields, prototype=prototype ) ) @_deprecate_positional_args def set_block_selection( self, selection: BasicSelection, value: npt.ArrayLike, *, fields: Fields | None = None, prototype: BufferPrototype | None = None, ) -> None: """Modify a selection of individual blocks, by providing the chunk indices (coordinates) for each block to be modified. Parameters ---------- selection : tuple An integer (coordinate) or slice for each dimension of the array. value : npt.ArrayLike An array-like containing the data to be stored in the block selection. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to set data for. prototype : BufferPrototype, optional The prototype of the buffer used for setting the data. If not provided, the default buffer prototype is used. Examples -------- Set up a 2-dimensional array:: >>> import zarr >>> z = zarr.zeros( >>> shape=(6, 6), >>> store=StorePath(MemoryStore(mode="w")), >>> chunk_shape=(2, 2), >>> dtype="i4", >>> ) Set data for a selection of items:: >>> z.set_block_selection((1, 0), 1) >>> z[...] array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) For convenience, this functionality is also available via the `blocks` property. E.g.:: >>> z.blocks[2, 1] = 4 >>> z[...] array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 4, 4, 0, 0], [0, 0, 4, 4, 0, 0]]) >>> z.blocks[:, 2] = 7 >>> z[...] array([[0, 0, 0, 0, 7, 7], [0, 0, 0, 0, 7, 7], [1, 1, 0, 0, 7, 7], [1, 1, 0, 0, 7, 7], [0, 0, 4, 4, 7, 7], [0, 0, 4, 4, 7, 7]]) Notes ----- Block indexing is a convenience indexing method to work on individual chunks with chunk index slicing. It has the same concept as Dask's `Array.blocks` indexing. Slices are supported. However, only with a step size of one. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection, get_block_selection, set_block_selection, vindex, oindex, blocks, __getitem__, __setitem__ """ if prototype is None: prototype = default_buffer_prototype() indexer = BlockIndexer(selection, self.shape, self.metadata.chunk_grid) sync(self._async_array._set_selection(indexer, value, fields=fields, prototype=prototype)) @property def vindex(self) -> VIndex: """Shortcut for vectorized (inner) indexing, see :func:`get_coordinate_selection`, :func:`set_coordinate_selection`, :func:`get_mask_selection` and :func:`set_mask_selection` for documentation and examples.""" return VIndex(self) @property def oindex(self) -> OIndex: """Shortcut for orthogonal (outer) indexing, see :func:`get_orthogonal_selection` and :func:`set_orthogonal_selection` for documentation and examples.""" return OIndex(self) @property def blocks(self) -> BlockIndex: """Shortcut for blocked chunked indexing, see :func:`get_block_selection` and :func:`set_block_selection` for documentation and examples.""" return BlockIndex(self) def resize(self, new_shape: ShapeLike) -> None: """ Change the shape of the array by growing or shrinking one or more dimensions. Parameters ---------- new_shape : tuple New shape of the array. Notes ----- If one or more dimensions are shrunk, any chunks falling outside the new array shape will be deleted from the underlying store. However, it is noteworthy that the chunks partially falling inside the new array (i.e. boundary chunks) will remain intact, and therefore, the data falling outside the new array but inside the boundary chunks would be restored by a subsequent resize operation that grows the array size. Examples -------- >>> import zarr >>> z = zarr.zeros(shape=(10000, 10000), >>> chunk_shape=(1000, 1000), >>> dtype="i4",) >>> z.shape (10000, 10000) >>> z = z.resize(20000, 1000) >>> z.shape (20000, 1000) >>> z2 = z.resize(50, 50) >>> z.shape (20000, 1000) >>> z2.shape (50, 50) """ sync(self._async_array.resize(new_shape)) def append(self, data: npt.ArrayLike, axis: int = 0) -> ChunkCoords: """Append `data` to `axis`. Parameters ---------- data : array-like Data to be appended. axis : int Axis along which to append. Returns ------- new_shape : tuple Notes ----- The size of all dimensions other than `axis` must match between this array and `data`. Examples -------- >>> import numpy as np >>> import zarr >>> a = np.arange(10000000, dtype='i4').reshape(10000, 1000) >>> z = zarr.array(a, chunks=(1000, 100)) >>> z.shape (10000, 1000) >>> z.append(a) (20000, 1000) >>> z.append(np.vstack([a, a]), axis=1) (20000, 2000) >>> z.shape (20000, 2000) """ return sync(self._async_array.append(data, axis=axis)) def update_attributes(self, new_attributes: dict[str, JSON]) -> Array: """ Update the array's attributes. Parameters ---------- new_attributes : dict A dictionary of new attributes to update or add to the array. The keys represent attribute names, and the values must be JSON-compatible. Returns ------- Array The array with the updated attributes. Raises ------ ValueError If the attributes are invalid or incompatible with the array's metadata. Notes ----- - The updated attributes will be merged with existing attributes, and any conflicts will be overwritten by the new values. """ # TODO: remove this cast when type inference improves new_array = sync(self._async_array.update_attributes(new_attributes)) # TODO: remove this cast when type inference improves _new_array = cast(AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata], new_array) return type(self)(_new_array) def __repr__(self) -> str: return f"" @property def info(self) -> Any: """ Return the statically known information for an array. Returns ------- ArrayInfo See Also -------- Array.info_complete All information about a group, including dynamic information like the number of bytes and chunks written. Examples -------- >>> arr = zarr.create(shape=(10,), chunks=(2,), dtype="float32") >>> arr.info Type : Array Zarr format : 3 Data type : DataType.float32 Shape : (10,) Chunk shape : (2,) Order : C Read-only : False Store type : MemoryStore Codecs : [BytesCodec(endian=)] No. bytes : 40 """ return self._async_array.info def info_complete(self) -> Any: """ Returns all the information about an array, including information from the Store. In addition to the statically known information like ``name`` and ``zarr_format``, this includes additional information like the size of the array in bytes and the number of chunks written. Note that this method will need to read metadata from the store. Returns ------- ArrayInfo See Also -------- Array.info The statically known subset of metadata about an array. """ return sync(self._async_array.info_complete()) async def chunks_initialized( array: AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata], ) -> tuple[str, ...]: """ Return the keys of the chunks that have been persisted to the storage backend. Parameters ---------- array : AsyncArray The array to inspect. Returns ------- chunks_initialized : tuple[str, ...] The keys of the chunks that have been initialized. See Also -------- nchunks_initialized """ store_contents = [ x async for x in array.store_path.store.list_prefix(prefix=array.store_path.path) ] return tuple(chunk_key for chunk_key in array._iter_chunk_keys() if chunk_key in store_contents) def _build_parents( node: AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | AsyncGroup, ) -> list[AsyncGroup]: from zarr.core.group import AsyncGroup, GroupMetadata store = node.store_path.store path = node.store_path.path if not path: return [] required_parts = path.split("/")[:-1] parents = [ # the root group AsyncGroup( metadata=GroupMetadata(zarr_format=node.metadata.zarr_format), store_path=StorePath(store=store, path=""), ) ] for i, part in enumerate(required_parts): p = "/".join(required_parts[:i] + [part]) parents.append( AsyncGroup( metadata=GroupMetadata(zarr_format=node.metadata.zarr_format), store_path=StorePath(store=store, path=p), ) ) return parents def _get_default_codecs( np_dtype: np.dtype[Any], ) -> tuple[Codec, ...]: filters, serializer, compressors = _get_default_chunk_encoding_v3(np_dtype) return filters + (serializer,) + compressors FiltersLike: TypeAlias = ( Iterable[dict[str, JSON] | ArrayArrayCodec | numcodecs.abc.Codec] | ArrayArrayCodec | Iterable[numcodecs.abc.Codec] | numcodecs.abc.Codec | Literal["auto"] | None ) CompressorLike: TypeAlias = dict[str, JSON] | BytesBytesCodec | numcodecs.abc.Codec | None CompressorsLike: TypeAlias = ( Iterable[dict[str, JSON] | BytesBytesCodec | numcodecs.abc.Codec] | dict[str, JSON] | BytesBytesCodec | numcodecs.abc.Codec | Literal["auto"] | None ) SerializerLike: TypeAlias = dict[str, JSON] | ArrayBytesCodec | Literal["auto"] class ShardsConfigParam(TypedDict): shape: ChunkCoords index_location: ShardingCodecIndexLocation | None ShardsLike: TypeAlias = ChunkCoords | ShardsConfigParam | Literal["auto"] async def init_array( *, store_path: StorePath, shape: ShapeLike, dtype: npt.DTypeLike, chunks: ChunkCoords | Literal["auto"] = "auto", shards: ShardsLike | None = None, filters: FiltersLike = "auto", compressors: CompressorsLike = "auto", serializer: SerializerLike = "auto", fill_value: Any | None = None, order: MemoryOrder | None = None, zarr_format: ZarrFormat | None = 3, attributes: dict[str, JSON] | None = None, chunk_key_encoding: ChunkKeyEncodingLike | None = None, dimension_names: Iterable[str] | None = None, overwrite: bool = False, config: ArrayConfigLike | None, ) -> AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata]: """Create and persist an array metadata document. Parameters ---------- store_path : StorePath StorePath instance. The path attribute is the name of the array to initialize. shape : ChunkCoords Shape of the array. dtype : npt.DTypeLike Data type of the array. chunks : ChunkCoords, optional Chunk shape of the array. If not specified, default are guessed based on the shape and dtype. shards : ChunkCoords, optional Shard shape of the array. The default value of ``None`` results in no sharding at all. filters : Iterable[Codec], optional Iterable of filters to apply to each chunk of the array, in order, before serializing that chunk to bytes. For Zarr format 3, a "filter" is a codec that takes an array and returns an array, and these values must be instances of ``ArrayArrayCodec``, or dict representations of ``ArrayArrayCodec``. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v3_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. For Zarr format 2, a "filter" can be any numcodecs codec; you should ensure that the the order if your filters is consistent with the behavior of each filter. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v2_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. compressors : Iterable[Codec], optional List of compressors to apply to the array. Compressors are applied in order, and after any filters are applied (if any are specified) and the data is serialized into bytes. For Zarr format 3, a "compressor" is a codec that takes a bytestream, and returns another bytestream. Multiple compressors my be provided for Zarr format 3. If no ``compressors`` are provided, a default set of compressors will be used. These defaults can be changed by modifying the value of ``array.v3_default_compressors`` in :mod:`zarr.core.config`. Use ``None`` to omit default compressors. For Zarr format 2, a "compressor" can be any numcodecs codec. Only a single compressor may be provided for Zarr format 2. If no ``compressor`` is provided, a default compressor will be used. in :mod:`zarr.core.config`. Use ``None`` to omit the default compressor. serializer : dict[str, JSON] | ArrayBytesCodec, optional Array-to-bytes codec to use for encoding the array data. Zarr format 3 only. Zarr format 2 arrays use implicit array-to-bytes conversion. If no ``serializer`` is provided, a default serializer will be used. These defaults can be changed by modifying the value of ``array.v3_default_serializer`` in :mod:`zarr.core.config`. fill_value : Any, optional Fill value for the array. order : {"C", "F"}, optional The memory of the array (default is "C"). For Zarr format 2, this parameter sets the memory order of the array. For Zarr format 3, this parameter is deprecated, because memory order is a runtime parameter for Zarr format 3 arrays. The recommended way to specify the memory order for Zarr format 3 arrays is via the ``config`` parameter, e.g. ``{'config': 'C'}``. If no ``order`` is provided, a default order will be used. This default can be changed by modifying the value of ``array.order`` in :mod:`zarr.core.config`. zarr_format : {2, 3}, optional The zarr format to use when saving. attributes : dict, optional Attributes for the array. chunk_key_encoding : ChunkKeyEncodingLike, optional A specification of how the chunk keys are represented in storage. For Zarr format 3, the default is ``{"name": "default", "separator": "/"}}``. For Zarr format 2, the default is ``{"name": "v2", "separator": "."}}``. dimension_names : Iterable[str], optional The names of the dimensions (default is None). Zarr format 3 only. Zarr format 2 arrays should not use this parameter. overwrite : bool, default False Whether to overwrite an array with the same name in the store, if one exists. config : ArrayConfigLike or None, optional Configuration for this array. Returns ------- AsyncArray The AsyncArray. """ if zarr_format is None: zarr_format = _default_zarr_format() from zarr.codecs.sharding import ShardingCodec, ShardingCodecIndexLocation dtype_parsed = parse_dtype(dtype, zarr_format=zarr_format) shape_parsed = parse_shapelike(shape) chunk_key_encoding_parsed = _parse_chunk_key_encoding( chunk_key_encoding, zarr_format=zarr_format ) if overwrite: if store_path.store.supports_deletes: await store_path.delete_dir() else: await ensure_no_existing_node(store_path, zarr_format=zarr_format) else: await ensure_no_existing_node(store_path, zarr_format=zarr_format) shard_shape_parsed, chunk_shape_parsed = _auto_partition( array_shape=shape_parsed, shard_shape=shards, chunk_shape=chunks, dtype=dtype_parsed ) chunks_out: tuple[int, ...] meta: ArrayV2Metadata | ArrayV3Metadata if zarr_format == 2: if shard_shape_parsed is not None: msg = ( "Zarr format 2 arrays can only be created with `shard_shape` set to `None`. " f"Got `shard_shape={shards}` instead." ) raise ValueError(msg) if serializer != "auto": raise ValueError("Zarr format 2 arrays do not support `serializer`.") filters_parsed, compressor_parsed = _parse_chunk_encoding_v2( compressor=compressors, filters=filters, dtype=np.dtype(dtype) ) if dimension_names is not None: raise ValueError("Zarr format 2 arrays do not support dimension names.") if order is None: order_parsed = zarr_config.get("array.order") else: order_parsed = order meta = AsyncArray._create_metadata_v2( shape=shape_parsed, dtype=dtype_parsed, chunks=chunk_shape_parsed, dimension_separator=chunk_key_encoding_parsed.separator, fill_value=fill_value, order=order_parsed, filters=filters_parsed, compressor=compressor_parsed, attributes=attributes, ) else: array_array, array_bytes, bytes_bytes = _parse_chunk_encoding_v3( compressors=compressors, filters=filters, serializer=serializer, dtype=dtype_parsed, ) sub_codecs = cast(tuple[Codec, ...], (*array_array, array_bytes, *bytes_bytes)) codecs_out: tuple[Codec, ...] if shard_shape_parsed is not None: index_location = None if isinstance(shards, dict): index_location = ShardingCodecIndexLocation(shards.get("index_location", None)) if index_location is None: index_location = ShardingCodecIndexLocation.end sharding_codec = ShardingCodec( chunk_shape=chunk_shape_parsed, codecs=sub_codecs, index_location=index_location ) sharding_codec.validate( shape=chunk_shape_parsed, dtype=dtype_parsed, chunk_grid=RegularChunkGrid(chunk_shape=shard_shape_parsed), ) codecs_out = (sharding_codec,) chunks_out = shard_shape_parsed else: chunks_out = chunk_shape_parsed codecs_out = sub_codecs meta = AsyncArray._create_metadata_v3( shape=shape_parsed, dtype=dtype_parsed, fill_value=fill_value, chunk_shape=chunks_out, chunk_key_encoding=chunk_key_encoding_parsed, codecs=codecs_out, dimension_names=dimension_names, attributes=attributes, ) arr = AsyncArray(metadata=meta, store_path=store_path, config=config) await arr._save_metadata(meta, ensure_parents=True) return arr async def create_array( store: str | StoreLike, *, name: str | None = None, shape: ShapeLike | None = None, dtype: npt.DTypeLike | None = None, data: np.ndarray[Any, np.dtype[Any]] | None = None, chunks: ChunkCoords | Literal["auto"] = "auto", shards: ShardsLike | None = None, filters: FiltersLike = "auto", compressors: CompressorsLike = "auto", serializer: SerializerLike = "auto", fill_value: Any | None = None, order: MemoryOrder | None = None, zarr_format: ZarrFormat | None = 3, attributes: dict[str, JSON] | None = None, chunk_key_encoding: ChunkKeyEncodingLike | None = None, dimension_names: Iterable[str] | None = None, storage_options: dict[str, Any] | None = None, overwrite: bool = False, config: ArrayConfigLike | None = None, write_data: bool = True, ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array. Parameters ---------- store : str or Store Store or path to directory in file system or name of zip file. name : str or None, optional The name of the array within the store. If ``name`` is ``None``, the array will be located at the root of the store. shape : ChunkCoords, optional Shape of the array. Can be ``None`` if ``data`` is provided. dtype : npt.DTypeLike | None Data type of the array. Can be ``None`` if ``data`` is provided. data : Array-like data to use for initializing the array. If this parameter is provided, the ``shape`` and ``dtype`` parameters must be identical to ``data.shape`` and ``data.dtype``, or ``None``. chunks : ChunkCoords, optional Chunk shape of the array. If not specified, default are guessed based on the shape and dtype. shards : ChunkCoords, optional Shard shape of the array. The default value of ``None`` results in no sharding at all. filters : Iterable[Codec], optional Iterable of filters to apply to each chunk of the array, in order, before serializing that chunk to bytes. For Zarr format 3, a "filter" is a codec that takes an array and returns an array, and these values must be instances of ``ArrayArrayCodec``, or dict representations of ``ArrayArrayCodec``. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v3_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. For Zarr format 2, a "filter" can be any numcodecs codec; you should ensure that the the order if your filters is consistent with the behavior of each filter. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v2_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. compressors : Iterable[Codec], optional List of compressors to apply to the array. Compressors are applied in order, and after any filters are applied (if any are specified) and the data is serialized into bytes. For Zarr format 3, a "compressor" is a codec that takes a bytestream, and returns another bytestream. Multiple compressors my be provided for Zarr format 3. If no ``compressors`` are provided, a default set of compressors will be used. These defaults can be changed by modifying the value of ``array.v3_default_compressors`` in :mod:`zarr.core.config`. Use ``None`` to omit default compressors. For Zarr format 2, a "compressor" can be any numcodecs codec. Only a single compressor may be provided for Zarr format 2. If no ``compressor`` is provided, a default compressor will be used. in :mod:`zarr.core.config`. Use ``None`` to omit the default compressor. serializer : dict[str, JSON] | ArrayBytesCodec, optional Array-to-bytes codec to use for encoding the array data. Zarr format 3 only. Zarr format 2 arrays use implicit array-to-bytes conversion. If no ``serializer`` is provided, a default serializer will be used. These defaults can be changed by modifying the value of ``array.v3_default_serializer`` in :mod:`zarr.core.config`. fill_value : Any, optional Fill value for the array. order : {"C", "F"}, optional The memory of the array (default is "C"). For Zarr format 2, this parameter sets the memory order of the array. For Zarr format 3, this parameter is deprecated, because memory order is a runtime parameter for Zarr format 3 arrays. The recommended way to specify the memory order for Zarr format 3 arrays is via the ``config`` parameter, e.g. ``{'config': 'C'}``. If no ``order`` is provided, a default order will be used. This default can be changed by modifying the value of ``array.order`` in :mod:`zarr.core.config`. zarr_format : {2, 3}, optional The zarr format to use when saving. attributes : dict, optional Attributes for the array. chunk_key_encoding : ChunkKeyEncodingLike, optional A specification of how the chunk keys are represented in storage. For Zarr format 3, the default is ``{"name": "default", "separator": "/"}}``. For Zarr format 2, the default is ``{"name": "v2", "separator": "."}}``. dimension_names : Iterable[str], optional The names of the dimensions (default is None). Zarr format 3 only. Zarr format 2 arrays should not use this parameter. storage_options : dict, optional If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. overwrite : bool, default False Whether to overwrite an array with the same name in the store, if one exists. config : ArrayConfigLike, optional Runtime configuration for the array. write_data : bool If a pre-existing array-like object was provided to this function via the ``data`` parameter then ``write_data`` determines whether the values in that array-like object should be written to the Zarr array created by this function. If ``write_data`` is ``False``, then the array will be left empty. Returns ------- AsyncArray The array. Examples -------- >>> import zarr >>> store = zarr.storage.MemoryStore(mode='w') >>> async_arr = await zarr.api.asynchronous.create_array( >>> store=store, >>> shape=(100,100), >>> chunks=(10,10), >>> dtype='i4', >>> fill_value=0) """ mode: Literal["a"] = "a" store_path = await make_store_path(store, path=name, mode=mode, storage_options=storage_options) data_parsed, shape_parsed, dtype_parsed = _parse_data_params( data=data, shape=shape, dtype=dtype ) result = await init_array( store_path=store_path, shape=shape_parsed, dtype=dtype_parsed, chunks=chunks, shards=shards, filters=filters, compressors=compressors, serializer=serializer, fill_value=fill_value, order=order, zarr_format=zarr_format, attributes=attributes, chunk_key_encoding=chunk_key_encoding, dimension_names=dimension_names, overwrite=overwrite, config=config, ) if write_data is True and data_parsed is not None: await result._set_selection( BasicIndexer(..., shape=result.shape, chunk_grid=result.metadata.chunk_grid), data_parsed, prototype=default_buffer_prototype(), ) return result def _parse_chunk_key_encoding( data: ChunkKeyEncodingLike | None, zarr_format: ZarrFormat ) -> ChunkKeyEncoding: """ Take an implicit specification of a chunk key encoding and parse it into a ChunkKeyEncoding object. """ if data is None: if zarr_format == 2: result = ChunkKeyEncoding.from_dict({"name": "v2", "separator": "."}) else: result = ChunkKeyEncoding.from_dict({"name": "default", "separator": "/"}) elif isinstance(data, ChunkKeyEncoding): result = data else: result = ChunkKeyEncoding.from_dict(data) if zarr_format == 2 and result.name != "v2": msg = ( "Invalid chunk key encoding. For Zarr format 2 arrays, the `name` field of the " f"chunk key encoding must be 'v2'. Got `name` = {result.name} instead." ) raise ValueError(msg) return result def _get_default_chunk_encoding_v3( np_dtype: np.dtype[Any], ) -> tuple[tuple[ArrayArrayCodec, ...], ArrayBytesCodec, tuple[BytesBytesCodec, ...]]: """ Get the default ArrayArrayCodecs, ArrayBytesCodec, and BytesBytesCodec for a given dtype. """ dtype = DataType.from_numpy(np_dtype) if dtype == DataType.string: dtype_key = "string" elif dtype == DataType.bytes: dtype_key = "bytes" else: dtype_key = "numeric" default_filters = zarr_config.get("array.v3_default_filters").get(dtype_key) default_serializer = zarr_config.get("array.v3_default_serializer").get(dtype_key) default_compressors = zarr_config.get("array.v3_default_compressors").get(dtype_key) filters = tuple(_parse_array_array_codec(codec_dict) for codec_dict in default_filters) serializer = _parse_array_bytes_codec(default_serializer) compressors = tuple(_parse_bytes_bytes_codec(codec_dict) for codec_dict in default_compressors) return filters, serializer, compressors def _get_default_chunk_encoding_v2( np_dtype: np.dtype[Any], ) -> tuple[tuple[numcodecs.abc.Codec, ...] | None, numcodecs.abc.Codec | None]: """ Get the default chunk encoding for Zarr format 2 arrays, given a dtype """ compressor_dict = _default_compressor(np_dtype) filter_dicts = _default_filters(np_dtype) compressor = None if compressor_dict is not None: compressor = numcodecs.get_codec(compressor_dict) filters = None if filter_dicts is not None: filters = tuple(numcodecs.get_codec(f) for f in filter_dicts) return filters, compressor def _parse_chunk_encoding_v2( *, compressor: CompressorsLike, filters: FiltersLike, dtype: np.dtype[Any], ) -> tuple[tuple[numcodecs.abc.Codec, ...] | None, numcodecs.abc.Codec | None]: """ Generate chunk encoding classes for Zarr format 2 arrays with optional defaults. """ default_filters, default_compressor = _get_default_chunk_encoding_v2(dtype) _filters: tuple[numcodecs.abc.Codec, ...] | None _compressor: numcodecs.abc.Codec | None if compressor is None or compressor == (): _compressor = None elif compressor == "auto": _compressor = default_compressor elif isinstance(compressor, tuple | list) and len(compressor) == 1: _compressor = parse_compressor(compressor[0]) else: if isinstance(compressor, Iterable) and not isinstance(compressor, dict): msg = f"For Zarr format 2 arrays, the `compressor` must be a single codec. Got an iterable with type {type(compressor)} instead." raise TypeError(msg) _compressor = parse_compressor(compressor) if filters is None: _filters = None elif filters == "auto": _filters = default_filters else: if isinstance(filters, Iterable): for idx, f in enumerate(filters): if not isinstance(f, numcodecs.abc.Codec): msg = ( "For Zarr format 2 arrays, all elements of `filters` must be numcodecs codecs. " f"Element at index {idx} has type {type(f)}, which is not a numcodecs codec." ) raise TypeError(msg) _filters = parse_filters(filters) return _filters, _compressor def _parse_chunk_encoding_v3( *, compressors: CompressorsLike, filters: FiltersLike, serializer: SerializerLike, dtype: np.dtype[Any], ) -> tuple[tuple[ArrayArrayCodec, ...], ArrayBytesCodec, tuple[BytesBytesCodec, ...]]: """ Generate chunk encoding classes for v3 arrays with optional defaults. """ default_array_array, default_array_bytes, default_bytes_bytes = _get_default_chunk_encoding_v3( dtype ) if filters is None: out_array_array: tuple[ArrayArrayCodec, ...] = () elif filters == "auto": out_array_array = default_array_array else: maybe_array_array: Iterable[Codec | dict[str, JSON]] if isinstance(filters, dict | Codec): maybe_array_array = (filters,) else: maybe_array_array = cast(Iterable[Codec | dict[str, JSON]], filters) out_array_array = tuple(_parse_array_array_codec(c) for c in maybe_array_array) if serializer == "auto": out_array_bytes = default_array_bytes else: out_array_bytes = _parse_array_bytes_codec(serializer) if compressors is None: out_bytes_bytes: tuple[BytesBytesCodec, ...] = () elif compressors == "auto": out_bytes_bytes = default_bytes_bytes else: maybe_bytes_bytes: Iterable[Codec | dict[str, JSON]] if isinstance(compressors, dict | Codec): maybe_bytes_bytes = (compressors,) else: maybe_bytes_bytes = cast(Iterable[Codec | dict[str, JSON]], compressors) out_bytes_bytes = tuple(_parse_bytes_bytes_codec(c) for c in maybe_bytes_bytes) return out_array_array, out_array_bytes, out_bytes_bytes def _parse_deprecated_compressor( compressor: CompressorLike | None, compressors: CompressorsLike, zarr_format: int = 3 ) -> CompressorsLike | None: if compressor != "auto": if compressors != "auto": raise ValueError("Cannot specify both `compressor` and `compressors`.") if zarr_format == 3: warn( "The `compressor` argument is deprecated. Use `compressors` instead.", category=UserWarning, stacklevel=2, ) if compressor is None: # "no compression" compressors = () else: compressors = (compressor,) elif zarr_format == 2 and compressor == compressors == "auto": compressors = ({"id": "blosc"},) return compressors def _parse_data_params( *, data: np.ndarray[Any, np.dtype[Any]] | None, shape: ShapeLike | None, dtype: npt.DTypeLike | None, ) -> tuple[np.ndarray[Any, np.dtype[Any]] | None, ShapeLike, npt.DTypeLike]: """ Ensure an array-like ``data`` parameter is consistent with the ``dtype`` and ``shape`` parameters. """ if data is None: if shape is None: msg = ( "The data parameter was set to None, but shape was not specified. " "Either provide a value for data, or specify shape." ) raise ValueError(msg) shape_out = shape if dtype is None: msg = ( "The data parameter was set to None, but dtype was not specified." "Either provide an array-like value for data, or specify dtype." ) raise ValueError(msg) dtype_out = dtype else: if shape is not None: msg = ( "The data parameter was used, but the shape parameter was also " "used. This is an error. Either use the data parameter, or the shape parameter, " "but not both." ) raise ValueError(msg) shape_out = data.shape if dtype is not None: msg = ( "The data parameter was used, but the dtype parameter was also " "used. This is an error. Either use the data parameter, or the dtype parameter, " "but not both." ) raise ValueError(msg) dtype_out = data.dtype return data, shape_out, dtype_out zarr-python-3.0.6/src/zarr/core/array_spec.py000066400000000000000000000070701476711733500212360ustar00rootroot00000000000000from __future__ import annotations from dataclasses import dataclass, fields from typing import TYPE_CHECKING, Any, Literal, Self, TypedDict, cast import numpy as np from zarr.core.common import ( MemoryOrder, parse_bool, parse_fill_value, parse_order, parse_shapelike, ) from zarr.core.config import config as zarr_config if TYPE_CHECKING: from typing import NotRequired from zarr.core.buffer import BufferPrototype from zarr.core.common import ChunkCoords class ArrayConfigParams(TypedDict): """ A TypedDict model of the attributes of an ArrayConfig class, but with no required fields. This allows for partial construction of an ArrayConfig, with the assumption that the unset keys will be taken from a global configuration. """ order: NotRequired[MemoryOrder] write_empty_chunks: NotRequired[bool] @dataclass(frozen=True) class ArrayConfig: """ A model of the runtime configuration of an array. Parameters ---------- order : MemoryOrder The memory layout of the arrays returned when reading data from the store. write_empty_chunks : bool If True, empty chunks will be written to the store. """ order: MemoryOrder write_empty_chunks: bool def __init__(self, order: MemoryOrder, write_empty_chunks: bool) -> None: order_parsed = parse_order(order) write_empty_chunks_parsed = parse_bool(write_empty_chunks) object.__setattr__(self, "order", order_parsed) object.__setattr__(self, "write_empty_chunks", write_empty_chunks_parsed) @classmethod def from_dict(cls, data: ArrayConfigParams) -> Self: """ Create an ArrayConfig from a dict. The keys of that dict are a subset of the attributes of the ArrayConfig class. Any keys missing from that dict will be set to the the values in the ``array`` namespace of ``zarr.config``. """ kwargs_out: ArrayConfigParams = {} for f in fields(ArrayConfig): field_name = cast(Literal["order", "write_empty_chunks"], f.name) if field_name not in data: kwargs_out[field_name] = zarr_config.get(f"array.{field_name}") else: kwargs_out[field_name] = data[field_name] return cls(**kwargs_out) ArrayConfigLike = ArrayConfig | ArrayConfigParams def parse_array_config(data: ArrayConfigLike | None) -> ArrayConfig: """ Convert various types of data to an ArrayConfig. """ if data is None: return ArrayConfig.from_dict({}) elif isinstance(data, ArrayConfig): return data else: return ArrayConfig.from_dict(data) @dataclass(frozen=True) class ArraySpec: shape: ChunkCoords dtype: np.dtype[Any] fill_value: Any config: ArrayConfig prototype: BufferPrototype def __init__( self, shape: ChunkCoords, dtype: np.dtype[Any], fill_value: Any, config: ArrayConfig, prototype: BufferPrototype, ) -> None: shape_parsed = parse_shapelike(shape) dtype_parsed = np.dtype(dtype) fill_value_parsed = parse_fill_value(fill_value) object.__setattr__(self, "shape", shape_parsed) object.__setattr__(self, "dtype", dtype_parsed) object.__setattr__(self, "fill_value", fill_value_parsed) object.__setattr__(self, "config", config) object.__setattr__(self, "prototype", prototype) @property def ndim(self) -> int: return len(self.shape) @property def order(self) -> MemoryOrder: return self.config.order zarr-python-3.0.6/src/zarr/core/attributes.py000066400000000000000000000032521476711733500212720ustar00rootroot00000000000000from __future__ import annotations from collections.abc import MutableMapping from typing import TYPE_CHECKING from zarr.core.common import JSON if TYPE_CHECKING: from collections.abc import Iterator from zarr.core.array import Array from zarr.core.group import Group class Attributes(MutableMapping[str, JSON]): def __init__(self, obj: Array | Group) -> None: # key=".zattrs", read_only=False, cache=True, synchronizer=None self._obj = obj def __getitem__(self, key: str) -> JSON: return self._obj.metadata.attributes[key] def __setitem__(self, key: str, value: JSON) -> None: new_attrs = dict(self._obj.metadata.attributes) new_attrs[key] = value self._obj = self._obj.update_attributes(new_attrs) def __delitem__(self, key: str) -> None: new_attrs = dict(self._obj.metadata.attributes) del new_attrs[key] self.put(new_attrs) def __iter__(self) -> Iterator[str]: return iter(self._obj.metadata.attributes) def __len__(self) -> int: return len(self._obj.metadata.attributes) def put(self, d: dict[str, JSON]) -> None: """ Overwrite all attributes with the values from `d`. Equivalent to the following pseudo-code, but performed atomically. .. code-block:: python >>> attrs = {"a": 1, "b": 2} >>> attrs.clear() >>> attrs.update({"a": 3", "c": 4}) >>> attrs {'a': 3, 'c': 4} """ self._obj.metadata.attributes.clear() self._obj = self._obj.update_attributes(d) def asdict(self) -> dict[str, JSON]: return dict(self._obj.metadata.attributes) zarr-python-3.0.6/src/zarr/core/buffer/000077500000000000000000000000001476711733500200015ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/core/buffer/__init__.py000066400000000000000000000005611476711733500221140ustar00rootroot00000000000000from zarr.core.buffer.core import ( ArrayLike, Buffer, BufferPrototype, NDArrayLike, NDBuffer, default_buffer_prototype, ) from zarr.core.buffer.cpu import numpy_buffer_prototype __all__ = [ "ArrayLike", "Buffer", "BufferPrototype", "NDArrayLike", "NDBuffer", "default_buffer_prototype", "numpy_buffer_prototype", ] zarr-python-3.0.6/src/zarr/core/buffer/core.py000066400000000000000000000357401476711733500213140ustar00rootroot00000000000000from __future__ import annotations import sys from abc import ABC, abstractmethod from typing import ( TYPE_CHECKING, Any, Literal, NamedTuple, Protocol, SupportsIndex, cast, runtime_checkable, ) import numpy as np import numpy.typing as npt if TYPE_CHECKING: from collections.abc import Iterable, Sequence from typing import Self from zarr.codecs.bytes import Endian from zarr.core.common import BytesLike, ChunkCoords # Everything here is imported into ``zarr.core.buffer`` namespace. __all__: list[str] = [] @runtime_checkable class ArrayLike(Protocol): """Protocol for the array-like type that underlie Buffer""" @property def dtype(self) -> np.dtype[Any]: ... @property def ndim(self) -> int: ... @property def size(self) -> int: ... def __getitem__(self, key: slice) -> Self: ... def __setitem__(self, key: slice, value: Any) -> None: ... @runtime_checkable class NDArrayLike(Protocol): """Protocol for the nd-array-like type that underlie NDBuffer""" @property def dtype(self) -> np.dtype[Any]: ... @property def ndim(self) -> int: ... @property def size(self) -> int: ... @property def shape(self) -> ChunkCoords: ... def __len__(self) -> int: ... def __getitem__(self, key: slice) -> Self: ... def __setitem__(self, key: slice, value: Any) -> None: ... def __array__(self) -> npt.NDArray[Any]: ... def reshape( self, shape: ChunkCoords | Literal[-1], *, order: Literal["A", "C", "F"] = ... ) -> Self: ... def view(self, dtype: npt.DTypeLike) -> Self: ... def astype( self, dtype: npt.DTypeLike, order: Literal["K", "A", "C", "F"] = ..., *, copy: bool = ..., ) -> Self: ... def fill(self, value: Any) -> None: ... def copy(self) -> Self: ... def transpose(self, axes: SupportsIndex | Sequence[SupportsIndex] | None) -> Self: ... def ravel(self, order: Literal["K", "A", "C", "F"] = ...) -> Self: ... def all(self) -> bool: ... def __eq__(self, other: object) -> Self: # type: ignore[explicit-override, override] """Element-wise equal Notes ----- Type checkers such as mypy complains because the return type isn't a bool like its supertype "object", which violates the Liskov substitution principle. This is true, but since NumPy's ndarray is defined as an element-wise equal, our hands are tied. """ def check_item_key_is_1d_contiguous(key: Any) -> None: """Raises error if `key` isn't a 1d contiguous slice""" if not isinstance(key, slice): raise TypeError( f"Item key has incorrect type (expected slice, got {key.__class__.__name__})" ) if not (key.step is None or key.step == 1): raise ValueError("slice must be contiguous") class Buffer(ABC): """A flat contiguous memory block We use Buffer throughout Zarr to represent a contiguous block of memory. A Buffer is backed by a underlying array-like instance that represents the memory. The memory type is unspecified; can be regular host memory, CUDA device memory, or something else. The only requirement is that the array-like instance can be copied/converted to a regular Numpy array (host memory). Notes ----- This buffer is untyped, so all indexing and sizes are in bytes. Parameters ---------- array_like array-like object that must be 1-dim, contiguous, and byte dtype. """ def __init__(self, array_like: ArrayLike) -> None: if array_like.ndim != 1: raise ValueError("array_like: only 1-dim allowed") if array_like.dtype != np.dtype("b"): raise ValueError("array_like: only byte dtype allowed") self._data = array_like @classmethod @abstractmethod def create_zero_length(cls) -> Self: """Create an empty buffer with length zero Returns ------- New empty 0-length buffer """ if cls is Buffer: raise NotImplementedError("Cannot call abstract method on the abstract class 'Buffer'") return cls( cast(ArrayLike, None) ) # This line will never be reached, but it satisfies the type checker @classmethod def from_array_like(cls, array_like: ArrayLike) -> Self: """Create a new buffer of an array-like object Parameters ---------- array_like array-like object that must be 1-dim, contiguous, and byte dtype. Returns ------- New buffer representing `array_like` """ return cls(array_like) @classmethod @abstractmethod def from_buffer(cls, buffer: Buffer) -> Self: """Create a new buffer of an existing Buffer This is useful if you want to ensure that an existing buffer is of the correct subclass of Buffer. E.g., MemoryStore uses this to return a buffer instance of the subclass specified by its BufferPrototype argument. Typically, this only copies data if the data has to be moved between memory types, such as from host to device memory. Parameters ---------- buffer buffer object. Returns ------- A new buffer representing the content of the input buffer Notes ----- Subclasses of `Buffer` must override this method to implement more optimal conversions that avoid copies where possible """ if cls is Buffer: raise NotImplementedError("Cannot call abstract method on the abstract class 'Buffer'") return cls( cast(ArrayLike, None) ) # This line will never be reached, but it satisfies the type checker @classmethod @abstractmethod def from_bytes(cls, bytes_like: BytesLike) -> Self: """Create a new buffer of a bytes-like object (host memory) Parameters ---------- bytes_like bytes-like object Returns ------- New buffer representing `bytes_like` """ if cls is Buffer: raise NotImplementedError("Cannot call abstract method on the abstract class 'Buffer'") return cls( cast(ArrayLike, None) ) # This line will never be reached, but it satisfies the type checker def as_array_like(self) -> ArrayLike: """Returns the underlying array (host or device memory) of this buffer This will never copy data. Returns ------- The underlying 1d array such as a NumPy or CuPy array. """ return self._data @abstractmethod def as_numpy_array(self) -> npt.NDArray[Any]: """Returns the buffer as a NumPy array (host memory). Notes ----- Might have to copy data, consider using `.as_array_like()` instead. Returns ------- NumPy array of this buffer (might be a data copy) """ ... def to_bytes(self) -> bytes: """Returns the buffer as `bytes` (host memory). Warnings -------- Will always copy data, only use this method for small buffers such as metadata buffers. If possible, use `.as_numpy_array()` or `.as_array_like()` instead. Returns ------- `bytes` of this buffer (data copy) """ return bytes(self.as_numpy_array()) def __getitem__(self, key: slice) -> Self: check_item_key_is_1d_contiguous(key) return self.__class__(self._data.__getitem__(key)) def __setitem__(self, key: slice, value: Any) -> None: check_item_key_is_1d_contiguous(key) self._data.__setitem__(key, value) def __len__(self) -> int: return self._data.size @abstractmethod def __add__(self, other: Buffer) -> Self: """Concatenate two buffers""" ... def __eq__(self, other: object) -> bool: # Another Buffer class can override this to choose a more efficient path return isinstance(other, Buffer) and np.array_equal( self.as_numpy_array(), other.as_numpy_array() ) class NDBuffer: """An n-dimensional memory block We use NDBuffer throughout Zarr to represent a n-dimensional memory block. A NDBuffer is backed by a underlying ndarray-like instance that represents the memory. The memory type is unspecified; can be regular host memory, CUDA device memory, or something else. The only requirement is that the ndarray-like instance can be copied/converted to a regular Numpy array (host memory). Notes ----- The two buffer classes Buffer and NDBuffer are very similar. In fact, Buffer is a special case of NDBuffer where dim=1, stride=1, and dtype="b". However, in order to use Python's type system to differentiate between the contiguous Buffer and the n-dim (non-contiguous) NDBuffer, we keep the definition of the two classes separate. Parameters ---------- array : ndarray_like ndarray-like object that is convertible to a regular Numpy array. """ def __init__(self, array: NDArrayLike) -> None: self._data = array @classmethod @abstractmethod def create( cls, *, shape: Iterable[int], dtype: npt.DTypeLike, order: Literal["C", "F"] = "C", fill_value: Any | None = None, ) -> Self: """Create a new buffer and its underlying ndarray-like object Parameters ---------- shape The shape of the buffer and its underlying ndarray-like object dtype The datatype of the buffer and its underlying ndarray-like object order Whether to store multi-dimensional data in row-major (C-style) or column-major (Fortran-style) order in memory. fill_value If not None, fill the new buffer with a scalar value. Returns ------- New buffer representing a new ndarray_like object Notes ----- A subclass can overwrite this method to create a ndarray-like object other then the default Numpy array. """ if cls is NDBuffer: raise NotImplementedError( "Cannot call abstract method on the abstract class 'NDBuffer'" ) return cls( cast(NDArrayLike, None) ) # This line will never be reached, but it satisfies the type checker @classmethod def from_ndarray_like(cls, ndarray_like: NDArrayLike) -> Self: """Create a new buffer of a ndarray-like object Parameters ---------- ndarray_like ndarray-like object Returns ------- New buffer representing `ndarray_like` """ return cls(ndarray_like) @classmethod @abstractmethod def from_numpy_array(cls, array_like: npt.ArrayLike) -> Self: """Create a new buffer of Numpy array-like object Parameters ---------- array_like Object that can be coerced into a Numpy array Returns ------- New buffer representing `array_like` """ if cls is NDBuffer: raise NotImplementedError( "Cannot call abstract method on the abstract class 'NDBuffer'" ) return cls( cast(NDArrayLike, None) ) # This line will never be reached, but it satisfies the type checker def as_ndarray_like(self) -> NDArrayLike: """Returns the underlying array (host or device memory) of this buffer This will never copy data. Returns ------- The underlying array such as a NumPy or CuPy array. """ return self._data @abstractmethod def as_numpy_array(self) -> npt.NDArray[Any]: """Returns the buffer as a NumPy array (host memory). Warnings -------- Might have to copy data, consider using `.as_ndarray_like()` instead. Returns ------- NumPy array of this buffer (might be a data copy) """ ... @property def dtype(self) -> np.dtype[Any]: return self._data.dtype @property def shape(self) -> tuple[int, ...]: return self._data.shape @property def byteorder(self) -> Endian: from zarr.codecs.bytes import Endian if self.dtype.byteorder == "<": return Endian.little elif self.dtype.byteorder == ">": return Endian.big else: return Endian(sys.byteorder) def reshape(self, newshape: ChunkCoords | Literal[-1]) -> Self: return self.__class__(self._data.reshape(newshape)) def squeeze(self, axis: tuple[int, ...]) -> Self: newshape = tuple(a for i, a in enumerate(self.shape) if i not in axis) return self.__class__(self._data.reshape(newshape)) def astype(self, dtype: npt.DTypeLike, order: Literal["K", "A", "C", "F"] = "K") -> Self: return self.__class__(self._data.astype(dtype=dtype, order=order)) @abstractmethod def __getitem__(self, key: Any) -> Self: ... @abstractmethod def __setitem__(self, key: Any, value: Any) -> None: ... def __len__(self) -> int: return self._data.__len__() def __repr__(self) -> str: return f"" def all_equal(self, other: Any, equal_nan: bool = True) -> bool: """Compare to `other` using np.array_equal.""" if other is None: # Handle None fill_value for Zarr V2 return False # use array_equal to obtain equal_nan=True functionality # Since fill-value is a scalar, isn't there a faster path than allocating a new array for fill value # every single time we have to write data? _data, other = np.broadcast_arrays(self._data, other) return np.array_equal( self._data, other, equal_nan=equal_nan if self._data.dtype.kind not in "USTOV" else False, ) def fill(self, value: Any) -> None: self._data.fill(value) def copy(self) -> Self: return self.__class__(self._data.copy()) def transpose(self, axes: SupportsIndex | Sequence[SupportsIndex] | None) -> Self: return self.__class__(self._data.transpose(axes)) class BufferPrototype(NamedTuple): """Prototype of the Buffer and NDBuffer class The protocol must be pickable. Attributes ---------- buffer The Buffer class to use when Zarr needs to create new Buffer. nd_buffer The NDBuffer class to use when Zarr needs to create new NDBuffer. """ buffer: type[Buffer] nd_buffer: type[NDBuffer] # The default buffer prototype used throughout the Zarr codebase. def default_buffer_prototype() -> BufferPrototype: from zarr.registry import ( get_buffer_class, get_ndbuffer_class, ) return BufferPrototype(buffer=get_buffer_class(), nd_buffer=get_ndbuffer_class()) zarr-python-3.0.6/src/zarr/core/buffer/cpu.py000066400000000000000000000156361476711733500211550ustar00rootroot00000000000000from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Literal, ) import numpy as np import numpy.typing as npt from zarr.core.buffer import core from zarr.registry import ( register_buffer, register_ndbuffer, ) if TYPE_CHECKING: from collections.abc import Callable, Iterable from typing import Self from zarr.core.buffer.core import ArrayLike, NDArrayLike from zarr.core.common import BytesLike class Buffer(core.Buffer): """A flat contiguous memory block We use Buffer throughout Zarr to represent a contiguous block of memory. A Buffer is backed by a underlying array-like instance that represents the memory. The memory type is unspecified; can be regular host memory, CUDA device memory, or something else. The only requirement is that the array-like instance can be copied/converted to a regular Numpy array (host memory). Notes ----- This buffer is untyped, so all indexing and sizes are in bytes. Parameters ---------- array_like array-like object that must be 1-dim, contiguous, and byte dtype. """ def __init__(self, array_like: ArrayLike) -> None: super().__init__(array_like) @classmethod def create_zero_length(cls) -> Self: return cls(np.array([], dtype="b")) @classmethod def from_buffer(cls, buffer: core.Buffer) -> Self: """Create a new buffer of an existing Buffer This is useful if you want to ensure that an existing buffer is of the correct subclass of Buffer. E.g., MemoryStore uses this to return a buffer instance of the subclass specified by its BufferPrototype argument. Typically, this only copies data if the data has to be moved between memory types, such as from host to device memory. Parameters ---------- buffer buffer object. Returns ------- A new buffer representing the content of the input buffer Notes ----- Subclasses of `Buffer` must override this method to implement more optimal conversions that avoid copies where possible """ return cls.from_array_like(buffer.as_numpy_array()) @classmethod def from_bytes(cls, bytes_like: BytesLike) -> Self: """Create a new buffer of a bytes-like object (host memory) Parameters ---------- bytes_like bytes-like object Returns ------- New buffer representing `bytes_like` """ return cls.from_array_like(np.frombuffer(bytes_like, dtype="b")) def as_numpy_array(self) -> npt.NDArray[Any]: """Returns the buffer as a NumPy array (host memory). Notes ----- Might have to copy data, consider using `.as_array_like()` instead. Returns ------- NumPy array of this buffer (might be a data copy) """ return np.asanyarray(self._data) def __add__(self, other: core.Buffer) -> Self: """Concatenate two buffers""" other_array = other.as_array_like() assert other_array.dtype == np.dtype("b") return self.__class__( np.concatenate((np.asanyarray(self._data), np.asanyarray(other_array))) ) class NDBuffer(core.NDBuffer): """An n-dimensional memory block We use NDBuffer throughout Zarr to represent a n-dimensional memory block. A NDBuffer is backed by a underlying ndarray-like instance that represents the memory. The memory type is unspecified; can be regular host memory, CUDA device memory, or something else. The only requirement is that the ndarray-like instance can be copied/converted to a regular Numpy array (host memory). Notes ----- The two buffer classes Buffer and NDBuffer are very similar. In fact, Buffer is a special case of NDBuffer where dim=1, stride=1, and dtype="b". However, in order to use Python's type system to differentiate between the contiguous Buffer and the n-dim (non-contiguous) NDBuffer, we keep the definition of the two classes separate. Parameters ---------- array ndarray-like object that is convertible to a regular Numpy array. """ def __init__(self, array: NDArrayLike) -> None: super().__init__(array) @classmethod def create( cls, *, shape: Iterable[int], dtype: npt.DTypeLike, order: Literal["C", "F"] = "C", fill_value: Any | None = None, ) -> Self: if fill_value is None: return cls(np.zeros(shape=tuple(shape), dtype=dtype, order=order)) else: return cls(np.full(shape=tuple(shape), fill_value=fill_value, dtype=dtype, order=order)) @classmethod def from_numpy_array(cls, array_like: npt.ArrayLike) -> Self: return cls.from_ndarray_like(np.asanyarray(array_like)) def as_numpy_array(self) -> npt.NDArray[Any]: """Returns the buffer as a NumPy array (host memory). Warnings -------- Might have to copy data, consider using `.as_ndarray_like()` instead. Returns ------- NumPy array of this buffer (might be a data copy) """ return np.asanyarray(self._data) def __getitem__(self, key: Any) -> Self: return self.__class__(np.asanyarray(self._data.__getitem__(key))) def __setitem__(self, key: Any, value: Any) -> None: if isinstance(value, NDBuffer): value = value._data self._data.__setitem__(key, value) def as_numpy_array_wrapper( func: Callable[[npt.NDArray[Any]], bytes], buf: core.Buffer, prototype: core.BufferPrototype ) -> core.Buffer: """Converts the input of `func` to a numpy array and the output back to `Buffer`. This function is useful when calling a `func` that only support host memory such as `GZip.decode` and `Blosc.decode`. In this case, use this wrapper to convert the input `buf` to a Numpy array and convert the result back into a `Buffer`. Parameters ---------- func The callable that will be called with the converted `buf` as input. `func` must return bytes, which will be converted into a `Buffer` before returned. buf The buffer that will be converted to a Numpy array before given as input to `func`. prototype The prototype of the output buffer. Returns ------- The result of `func` converted to a `Buffer` """ return prototype.buffer.from_bytes(func(buf.as_numpy_array())) # CPU buffer prototype using numpy arrays buffer_prototype = core.BufferPrototype(buffer=Buffer, nd_buffer=NDBuffer) # default_buffer_prototype = buffer_prototype # The numpy prototype used for E.g. when reading the shard index def numpy_buffer_prototype() -> core.BufferPrototype: return core.BufferPrototype(buffer=Buffer, nd_buffer=NDBuffer) register_buffer(Buffer) register_ndbuffer(NDBuffer) zarr-python-3.0.6/src/zarr/core/buffer/gpu.py000066400000000000000000000160041476711733500211470ustar00rootroot00000000000000from __future__ import annotations import warnings from typing import ( TYPE_CHECKING, Any, Literal, cast, ) import numpy as np import numpy.typing as npt from zarr.core.buffer import core from zarr.core.buffer.core import ArrayLike, BufferPrototype, NDArrayLike from zarr.registry import ( register_buffer, register_ndbuffer, ) if TYPE_CHECKING: from collections.abc import Iterable from typing import Self from zarr.core.common import BytesLike try: import cupy as cp except ImportError: cp = None class Buffer(core.Buffer): """A flat contiguous memory block on the GPU We use Buffer throughout Zarr to represent a contiguous block of memory. A Buffer is backed by a underlying array-like instance that represents the memory. The memory type is unspecified; can be regular host memory, CUDA device memory, or something else. The only requirement is that the array-like instance can be copied/converted to a regular Numpy array (host memory). Notes ----- This buffer is untyped, so all indexing and sizes are in bytes. Parameters ---------- array_like array-like object that must be 1-dim, contiguous, and byte dtype. """ def __init__(self, array_like: ArrayLike) -> None: if cp is None: raise ImportError( "Cannot use zarr.buffer.gpu.Buffer without cupy. Please install cupy." ) if array_like.ndim != 1: raise ValueError("array_like: only 1-dim allowed") if array_like.dtype != np.dtype("b"): raise ValueError("array_like: only byte dtype allowed") if not hasattr(array_like, "__cuda_array_interface__"): # Slow copy based path for arrays that don't support the __cuda_array_interface__ # TODO: Add a fast zero-copy path for arrays that support the dlpack protocol msg = ( "Creating a zarr.buffer.gpu.Buffer with an array that does not support the " "__cuda_array_interface__ for zero-copy transfers, " "falling back to slow copy based path" ) warnings.warn( msg, stacklevel=2, ) self._data = cp.asarray(array_like) @classmethod def create_zero_length(cls) -> Self: """Create an empty buffer with length zero Returns ------- New empty 0-length buffer """ return cls(cp.array([], dtype="b")) @classmethod def from_buffer(cls, buffer: core.Buffer) -> Self: """Create an GPU Buffer given an arbitrary Buffer This will try to be zero-copy if `buffer` is already on the GPU and will trigger a copy if not. Returns ------- New GPU Buffer constructed from `buffer` """ return cls(buffer.as_array_like()) @classmethod def from_bytes(cls, bytes_like: BytesLike) -> Self: return cls.from_array_like(cp.frombuffer(bytes_like, dtype="b")) def as_numpy_array(self) -> npt.NDArray[Any]: return cast(npt.NDArray[Any], cp.asnumpy(self._data)) def __add__(self, other: core.Buffer) -> Self: other_array = other.as_array_like() assert other_array.dtype == np.dtype("b") gpu_other = Buffer(other_array) gpu_other_array = gpu_other.as_array_like() return self.__class__( cp.concatenate((cp.asanyarray(self._data), cp.asanyarray(gpu_other_array))) ) class NDBuffer(core.NDBuffer): """A n-dimensional memory block on the GPU We use NDBuffer throughout Zarr to represent a n-dimensional memory block. A NDBuffer is backed by a underlying ndarray-like instance that represents the memory. The memory type is unspecified; can be regular host memory, CUDA device memory, or something else. The only requirement is that the ndarray-like instance can be copied/converted to a regular Numpy array (host memory). Notes ----- The two buffer classes Buffer and NDBuffer are very similar. In fact, Buffer is a special case of NDBuffer where dim=1, stride=1, and dtype="b". However, in order to use Python's type system to differentiate between the contiguous Buffer and the n-dim (non-contiguous) NDBuffer, we keep the definition of the two classes separate. Parameters ---------- array ndarray-like object that is convertible to a regular Numpy array. """ def __init__(self, array: NDArrayLike) -> None: if cp is None: raise ImportError( "Cannot use zarr.buffer.gpu.NDBuffer without cupy. Please install cupy." ) # assert array.ndim > 0 assert array.dtype != object self._data = array if not hasattr(array, "__cuda_array_interface__"): # Slow copy based path for arrays that don't support the __cuda_array_interface__ # TODO: Add a fast zero-copy path for arrays that support the dlpack protocol msg = ( "Creating a zarr.buffer.gpu.NDBuffer with an array that does not support the " "__cuda_array_interface__ for zero-copy transfers, " "falling back to slow copy based path" ) warnings.warn( msg, stacklevel=2, ) self._data = cp.asarray(array) @classmethod def create( cls, *, shape: Iterable[int], dtype: npt.DTypeLike, order: Literal["C", "F"] = "C", fill_value: Any | None = None, ) -> Self: ret = cls(cp.empty(shape=tuple(shape), dtype=dtype, order=order)) if fill_value is not None: ret.fill(fill_value) return ret @classmethod def from_numpy_array(cls, array_like: npt.ArrayLike) -> Self: """Create a new buffer of Numpy array-like object Parameters ---------- array_like Object that can be coerced into a Numpy array Returns ------- New buffer representing `array_like` """ return cls(cp.asarray(array_like)) def as_numpy_array(self) -> npt.NDArray[Any]: """Returns the buffer as a NumPy array (host memory). Warnings -------- Might have to copy data, consider using `.as_ndarray_like()` instead. Returns ------- NumPy array of this buffer (might be a data copy) """ return cast(npt.NDArray[Any], cp.asnumpy(self._data)) def __getitem__(self, key: Any) -> Self: return self.__class__(self._data.__getitem__(key)) def __setitem__(self, key: Any, value: Any) -> None: if isinstance(value, NDBuffer): value = value._data elif isinstance(value, core.NDBuffer): gpu_value = NDBuffer(value.as_ndarray_like()) value = gpu_value._data self._data.__setitem__(key, value) buffer_prototype = BufferPrototype(buffer=Buffer, nd_buffer=NDBuffer) register_buffer(Buffer) register_ndbuffer(NDBuffer) zarr-python-3.0.6/src/zarr/core/chunk_grids.py000066400000000000000000000201061476711733500214010ustar00rootroot00000000000000from __future__ import annotations import itertools import math import numbers import operator import warnings from abc import abstractmethod from dataclasses import dataclass from functools import reduce from typing import TYPE_CHECKING, Any, Literal import numpy as np from zarr.abc.metadata import Metadata from zarr.core.common import ( JSON, ChunkCoords, ChunkCoordsLike, ShapeLike, parse_named_configuration, parse_shapelike, ) from zarr.core.indexing import ceildiv if TYPE_CHECKING: from collections.abc import Iterator from typing import Self from zarr.core.array import ShardsLike def _guess_chunks( shape: ShapeLike, typesize: int, *, increment_bytes: int = 256 * 1024, min_bytes: int = 128 * 1024, max_bytes: int = 64 * 1024 * 1024, ) -> ChunkCoords: """ Iteratively guess an appropriate chunk layout for an array, given its shape and the size of each element in bytes, and size constraints expressed in bytes. This logic is adapted from h5py. Parameters ---------- shape : ChunkCoords The chunk shape. typesize : int The size, in bytes, of each element of the chunk. increment_bytes : int = 256 * 1024 The number of bytes used to increment or decrement the target chunk size in bytes. min_bytes : int = 128 * 1024 The soft lower bound on the final chunk size in bytes. max_bytes : int = 64 * 1024 * 1024 The hard upper bound on the final chunk size in bytes. Returns ------- ChunkCoords """ if isinstance(shape, int): shape = (shape,) ndims = len(shape) # require chunks to have non-zero length for all dimensions chunks = np.maximum(np.array(shape, dtype="=f8"), 1) # Determine the optimal chunk size in bytes using a PyTables expression. # This is kept as a float. dset_size = np.prod(chunks) * typesize target_size = increment_bytes * (2 ** np.log10(dset_size / (1024.0 * 1024))) if target_size > max_bytes: target_size = max_bytes elif target_size < min_bytes: target_size = min_bytes idx = 0 while True: # Repeatedly loop over the axes, dividing them by 2. Stop when: # 1a. We're smaller than the target chunk size, OR # 1b. We're within 50% of the target chunk size, AND # 2. The chunk is smaller than the maximum chunk size chunk_bytes = np.prod(chunks) * typesize if ( chunk_bytes < target_size or abs(chunk_bytes - target_size) / target_size < 0.5 ) and chunk_bytes < max_bytes: break if np.prod(chunks) == 1: break # Element size larger than max_bytes chunks[idx % ndims] = math.ceil(chunks[idx % ndims] / 2.0) idx += 1 return tuple(int(x) for x in chunks) def normalize_chunks(chunks: Any, shape: tuple[int, ...], typesize: int) -> tuple[int, ...]: """Convenience function to normalize the `chunks` argument for an array with the given `shape`.""" # N.B., expect shape already normalized # handle auto-chunking if chunks is None or chunks is True: return _guess_chunks(shape, typesize) # handle no chunking if chunks is False: return shape # handle 1D convenience form if isinstance(chunks, numbers.Integral): chunks = tuple(int(chunks) for _ in shape) # handle dask-style chunks (iterable of iterables) if all(isinstance(c, (tuple | list)) for c in chunks): # take first chunk size for each dimension chunks = tuple( c[0] for c in chunks ) # TODO: check/error/warn for irregular chunks (e.g. if c[0] != c[1:-1]) # handle bad dimensionality if len(chunks) > len(shape): raise ValueError("too many dimensions in chunks") # handle underspecified chunks if len(chunks) < len(shape): # assume chunks across remaining dimensions chunks += shape[len(chunks) :] # handle None or -1 in chunks if -1 in chunks or None in chunks: chunks = tuple( s if c == -1 or c is None else int(c) for s, c in zip(shape, chunks, strict=False) ) if not all(isinstance(c, numbers.Integral) for c in chunks): raise TypeError("non integer value in chunks") return tuple(int(c) for c in chunks) @dataclass(frozen=True) class ChunkGrid(Metadata): @classmethod def from_dict(cls, data: dict[str, JSON] | ChunkGrid) -> ChunkGrid: if isinstance(data, ChunkGrid): return data name_parsed, _ = parse_named_configuration(data) if name_parsed == "regular": return RegularChunkGrid._from_dict(data) raise ValueError(f"Unknown chunk grid. Got {name_parsed}.") @abstractmethod def all_chunk_coords(self, array_shape: ChunkCoords) -> Iterator[ChunkCoords]: pass @abstractmethod def get_nchunks(self, array_shape: ChunkCoords) -> int: pass @dataclass(frozen=True) class RegularChunkGrid(ChunkGrid): chunk_shape: ChunkCoords def __init__(self, *, chunk_shape: ChunkCoordsLike) -> None: chunk_shape_parsed = parse_shapelike(chunk_shape) object.__setattr__(self, "chunk_shape", chunk_shape_parsed) @classmethod def _from_dict(cls, data: dict[str, JSON]) -> Self: _, configuration_parsed = parse_named_configuration(data, "regular") return cls(**configuration_parsed) # type: ignore[arg-type] def to_dict(self) -> dict[str, JSON]: return {"name": "regular", "configuration": {"chunk_shape": tuple(self.chunk_shape)}} def all_chunk_coords(self, array_shape: ChunkCoords) -> Iterator[ChunkCoords]: return itertools.product( *(range(ceildiv(s, c)) for s, c in zip(array_shape, self.chunk_shape, strict=False)) ) def get_nchunks(self, array_shape: ChunkCoords) -> int: return reduce( operator.mul, itertools.starmap(ceildiv, zip(array_shape, self.chunk_shape, strict=True)), 1, ) def _auto_partition( *, array_shape: tuple[int, ...], chunk_shape: tuple[int, ...] | Literal["auto"], shard_shape: ShardsLike | None, dtype: np.dtype[Any], ) -> tuple[tuple[int, ...] | None, tuple[int, ...]]: """ Automatically determine the shard shape and chunk shape for an array, given the shape and dtype of the array. If `shard_shape` is `None` and the chunk_shape is "auto", the chunks will be set heuristically based on the dtype and shape of the array. If `shard_shape` is "auto", then the shard shape will be set heuristically from the dtype and shape of the array; if the `chunk_shape` is also "auto", then the chunks will be set heuristically as well, given the dtype and shard shape. Otherwise, the chunks will be returned as-is. """ item_size = dtype.itemsize if shard_shape is None: _shards_out: None | tuple[int, ...] = None if chunk_shape == "auto": _chunks_out = _guess_chunks(array_shape, item_size) else: _chunks_out = chunk_shape else: if chunk_shape == "auto": # aim for a 1MiB chunk _chunks_out = _guess_chunks(array_shape, item_size, max_bytes=1024) else: _chunks_out = chunk_shape if shard_shape == "auto": warnings.warn( "Automatic shard shape inference is experimental and may change without notice.", UserWarning, stacklevel=2, ) _shards_out = () for a_shape, c_shape in zip(array_shape, _chunks_out, strict=True): # TODO: make a better heuristic than this. # for each axis, if there are more than 8 chunks along that axis, then put # 2 chunks in each shard for that axis. if a_shape // c_shape > 8: _shards_out += (c_shape * 2,) else: _shards_out += (c_shape,) elif isinstance(shard_shape, dict): _shards_out = tuple(shard_shape["shape"]) else: _shards_out = shard_shape return _shards_out, _chunks_out zarr-python-3.0.6/src/zarr/core/chunk_key_encodings.py000066400000000000000000000071231476711733500231160ustar00rootroot00000000000000from __future__ import annotations from abc import abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Literal, TypeAlias, TypedDict, cast if TYPE_CHECKING: from typing import NotRequired from zarr.abc.metadata import Metadata from zarr.core.common import ( JSON, ChunkCoords, parse_named_configuration, ) SeparatorLiteral = Literal[".", "/"] def parse_separator(data: JSON) -> SeparatorLiteral: if data not in (".", "/"): raise ValueError(f"Expected an '.' or '/' separator. Got {data} instead.") return cast(SeparatorLiteral, data) class ChunkKeyEncodingParams(TypedDict): name: Literal["v2", "default"] separator: NotRequired[SeparatorLiteral] @dataclass(frozen=True) class ChunkKeyEncoding(Metadata): name: str separator: SeparatorLiteral = "." def __init__(self, *, separator: SeparatorLiteral) -> None: separator_parsed = parse_separator(separator) object.__setattr__(self, "separator", separator_parsed) @classmethod def from_dict(cls, data: dict[str, JSON] | ChunkKeyEncodingLike) -> ChunkKeyEncoding: if isinstance(data, ChunkKeyEncoding): return data # handle ChunkKeyEncodingParams if "name" in data and "separator" in data: data = {"name": data["name"], "configuration": {"separator": data["separator"]}} # TODO: remove this cast when we are statically typing the JSON metadata completely. data = cast(dict[str, JSON], data) # configuration is optional for chunk key encodings name_parsed, config_parsed = parse_named_configuration(data, require_configuration=False) if name_parsed == "default": if config_parsed is None: # for default, normalize missing configuration to use the "/" separator. config_parsed = {"separator": "/"} return DefaultChunkKeyEncoding(**config_parsed) # type: ignore[arg-type] if name_parsed == "v2": if config_parsed is None: # for v2, normalize missing configuration to use the "." separator. config_parsed = {"separator": "."} return V2ChunkKeyEncoding(**config_parsed) # type: ignore[arg-type] msg = f"Unknown chunk key encoding. Got {name_parsed}, expected one of ('v2', 'default')." raise ValueError(msg) def to_dict(self) -> dict[str, JSON]: return {"name": self.name, "configuration": {"separator": self.separator}} @abstractmethod def decode_chunk_key(self, chunk_key: str) -> ChunkCoords: pass @abstractmethod def encode_chunk_key(self, chunk_coords: ChunkCoords) -> str: pass ChunkKeyEncodingLike: TypeAlias = ChunkKeyEncodingParams | ChunkKeyEncoding @dataclass(frozen=True) class DefaultChunkKeyEncoding(ChunkKeyEncoding): name: Literal["default"] = "default" def decode_chunk_key(self, chunk_key: str) -> ChunkCoords: if chunk_key == "c": return () return tuple(map(int, chunk_key[1:].split(self.separator))) def encode_chunk_key(self, chunk_coords: ChunkCoords) -> str: return self.separator.join(map(str, ("c",) + chunk_coords)) @dataclass(frozen=True) class V2ChunkKeyEncoding(ChunkKeyEncoding): name: Literal["v2"] = "v2" def decode_chunk_key(self, chunk_key: str) -> ChunkCoords: return tuple(map(int, chunk_key.split(self.separator))) def encode_chunk_key(self, chunk_coords: ChunkCoords) -> str: chunk_identifier = self.separator.join(map(str, chunk_coords)) return "0" if chunk_identifier == "" else chunk_identifier zarr-python-3.0.6/src/zarr/core/codec_pipeline.py000066400000000000000000000523761476711733500220610ustar00rootroot00000000000000from __future__ import annotations from dataclasses import dataclass from itertools import islice, pairwise from typing import TYPE_CHECKING, Any, TypeVar from warnings import warn from zarr.abc.codec import ( ArrayArrayCodec, ArrayBytesCodec, ArrayBytesCodecPartialDecodeMixin, ArrayBytesCodecPartialEncodeMixin, BytesBytesCodec, Codec, CodecPipeline, ) from zarr.core.common import ChunkCoords, concurrent_map from zarr.core.config import config from zarr.core.indexing import SelectorTuple, is_scalar from zarr.core.metadata.v2 import _default_fill_value from zarr.registry import register_pipeline if TYPE_CHECKING: from collections.abc import Iterable, Iterator from typing import Self import numpy as np from zarr.abc.store import ByteGetter, ByteSetter from zarr.core.array_spec import ArraySpec from zarr.core.buffer import Buffer, BufferPrototype, NDBuffer from zarr.core.chunk_grids import ChunkGrid T = TypeVar("T") U = TypeVar("U") def _unzip2(iterable: Iterable[tuple[T, U]]) -> tuple[list[T], list[U]]: out0: list[T] = [] out1: list[U] = [] for item0, item1 in iterable: out0.append(item0) out1.append(item1) return (out0, out1) def batched(iterable: Iterable[T], n: int) -> Iterable[tuple[T, ...]]: if n < 1: raise ValueError("n must be at least one") it = iter(iterable) while batch := tuple(islice(it, n)): yield batch def resolve_batched(codec: Codec, chunk_specs: Iterable[ArraySpec]) -> Iterable[ArraySpec]: return [codec.resolve_metadata(chunk_spec) for chunk_spec in chunk_specs] def fill_value_or_default(chunk_spec: ArraySpec) -> Any: fill_value = chunk_spec.fill_value if fill_value is None: # Zarr V2 allowed `fill_value` to be null in the metadata. # Zarr V3 requires it to be set. This has already been # validated when decoding the metadata, but we support reading # Zarr V2 data and need to support the case where fill_value # is None. return _default_fill_value(dtype=chunk_spec.dtype) else: return fill_value @dataclass(frozen=True) class BatchedCodecPipeline(CodecPipeline): """Default codec pipeline. This batched codec pipeline divides the chunk batches into batches of a configurable batch size ("mini-batch"). Fetching, decoding, encoding and storing are performed in lock step for each mini-batch. Multiple mini-batches are processing concurrently. """ array_array_codecs: tuple[ArrayArrayCodec, ...] array_bytes_codec: ArrayBytesCodec bytes_bytes_codecs: tuple[BytesBytesCodec, ...] batch_size: int def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self: return type(self).from_codecs(c.evolve_from_array_spec(array_spec=array_spec) for c in self) @classmethod def from_codecs(cls, codecs: Iterable[Codec], *, batch_size: int | None = None) -> Self: array_array_codecs, array_bytes_codec, bytes_bytes_codecs = codecs_from_list(codecs) return cls( array_array_codecs=array_array_codecs, array_bytes_codec=array_bytes_codec, bytes_bytes_codecs=bytes_bytes_codecs, batch_size=batch_size or config.get("codec_pipeline.batch_size"), ) @property def supports_partial_decode(self) -> bool: """Determines whether the codec pipeline supports partial decoding. Currently, only codec pipelines with a single ArrayBytesCodec that supports partial decoding can support partial decoding. This limitation is due to the fact that ArrayArrayCodecs can change the slice selection leading to non-contiguous slices and BytesBytesCodecs can change the chunk bytes in a way that slice selections cannot be attributed to byte ranges anymore which renders partial decoding infeasible. This limitation may softened in the future.""" return (len(self.array_array_codecs) + len(self.bytes_bytes_codecs)) == 0 and isinstance( self.array_bytes_codec, ArrayBytesCodecPartialDecodeMixin ) @property def supports_partial_encode(self) -> bool: """Determines whether the codec pipeline supports partial encoding. Currently, only codec pipelines with a single ArrayBytesCodec that supports partial encoding can support partial encoding. This limitation is due to the fact that ArrayArrayCodecs can change the slice selection leading to non-contiguous slices and BytesBytesCodecs can change the chunk bytes in a way that slice selections cannot be attributed to byte ranges anymore which renders partial encoding infeasible. This limitation may softened in the future.""" return (len(self.array_array_codecs) + len(self.bytes_bytes_codecs)) == 0 and isinstance( self.array_bytes_codec, ArrayBytesCodecPartialEncodeMixin ) def __iter__(self) -> Iterator[Codec]: yield from self.array_array_codecs yield self.array_bytes_codec yield from self.bytes_bytes_codecs def validate(self, *, shape: ChunkCoords, dtype: np.dtype[Any], chunk_grid: ChunkGrid) -> None: for codec in self: codec.validate(shape=shape, dtype=dtype, chunk_grid=chunk_grid) def compute_encoded_size(self, byte_length: int, array_spec: ArraySpec) -> int: for codec in self: byte_length = codec.compute_encoded_size(byte_length, array_spec) array_spec = codec.resolve_metadata(array_spec) return byte_length def _codecs_with_resolved_metadata_batched( self, chunk_specs: Iterable[ArraySpec] ) -> tuple[ list[tuple[ArrayArrayCodec, list[ArraySpec]]], tuple[ArrayBytesCodec, list[ArraySpec]], list[tuple[BytesBytesCodec, list[ArraySpec]]], ]: aa_codecs_with_spec: list[tuple[ArrayArrayCodec, list[ArraySpec]]] = [] chunk_specs = list(chunk_specs) for aa_codec in self.array_array_codecs: aa_codecs_with_spec.append((aa_codec, chunk_specs)) chunk_specs = [aa_codec.resolve_metadata(chunk_spec) for chunk_spec in chunk_specs] ab_codec_with_spec = (self.array_bytes_codec, chunk_specs) chunk_specs = [ self.array_bytes_codec.resolve_metadata(chunk_spec) for chunk_spec in chunk_specs ] bb_codecs_with_spec: list[tuple[BytesBytesCodec, list[ArraySpec]]] = [] for bb_codec in self.bytes_bytes_codecs: bb_codecs_with_spec.append((bb_codec, chunk_specs)) chunk_specs = [bb_codec.resolve_metadata(chunk_spec) for chunk_spec in chunk_specs] return (aa_codecs_with_spec, ab_codec_with_spec, bb_codecs_with_spec) async def decode_batch( self, chunk_bytes_and_specs: Iterable[tuple[Buffer | None, ArraySpec]], ) -> Iterable[NDBuffer | None]: chunk_bytes_batch: Iterable[Buffer | None] chunk_bytes_batch, chunk_specs = _unzip2(chunk_bytes_and_specs) ( aa_codecs_with_spec, ab_codec_with_spec, bb_codecs_with_spec, ) = self._codecs_with_resolved_metadata_batched(chunk_specs) for bb_codec, chunk_spec_batch in bb_codecs_with_spec[::-1]: chunk_bytes_batch = await bb_codec.decode( zip(chunk_bytes_batch, chunk_spec_batch, strict=False) ) ab_codec, chunk_spec_batch = ab_codec_with_spec chunk_array_batch = await ab_codec.decode( zip(chunk_bytes_batch, chunk_spec_batch, strict=False) ) for aa_codec, chunk_spec_batch in aa_codecs_with_spec[::-1]: chunk_array_batch = await aa_codec.decode( zip(chunk_array_batch, chunk_spec_batch, strict=False) ) return chunk_array_batch async def decode_partial_batch( self, batch_info: Iterable[tuple[ByteGetter, SelectorTuple, ArraySpec]], ) -> Iterable[NDBuffer | None]: assert self.supports_partial_decode assert isinstance(self.array_bytes_codec, ArrayBytesCodecPartialDecodeMixin) return await self.array_bytes_codec.decode_partial(batch_info) async def encode_batch( self, chunk_arrays_and_specs: Iterable[tuple[NDBuffer | None, ArraySpec]], ) -> Iterable[Buffer | None]: chunk_array_batch: Iterable[NDBuffer | None] chunk_specs: Iterable[ArraySpec] chunk_array_batch, chunk_specs = _unzip2(chunk_arrays_and_specs) for aa_codec in self.array_array_codecs: chunk_array_batch = await aa_codec.encode( zip(chunk_array_batch, chunk_specs, strict=False) ) chunk_specs = resolve_batched(aa_codec, chunk_specs) chunk_bytes_batch = await self.array_bytes_codec.encode( zip(chunk_array_batch, chunk_specs, strict=False) ) chunk_specs = resolve_batched(self.array_bytes_codec, chunk_specs) for bb_codec in self.bytes_bytes_codecs: chunk_bytes_batch = await bb_codec.encode( zip(chunk_bytes_batch, chunk_specs, strict=False) ) chunk_specs = resolve_batched(bb_codec, chunk_specs) return chunk_bytes_batch async def encode_partial_batch( self, batch_info: Iterable[tuple[ByteSetter, NDBuffer, SelectorTuple, ArraySpec]], ) -> None: assert self.supports_partial_encode assert isinstance(self.array_bytes_codec, ArrayBytesCodecPartialEncodeMixin) await self.array_bytes_codec.encode_partial(batch_info) async def read_batch( self, batch_info: Iterable[tuple[ByteGetter, ArraySpec, SelectorTuple, SelectorTuple, bool]], out: NDBuffer, drop_axes: tuple[int, ...] = (), ) -> None: if self.supports_partial_decode: chunk_array_batch = await self.decode_partial_batch( [ (byte_getter, chunk_selection, chunk_spec) for byte_getter, chunk_spec, chunk_selection, *_ in batch_info ] ) for chunk_array, (_, chunk_spec, _, out_selection, _) in zip( chunk_array_batch, batch_info, strict=False ): if chunk_array is not None: out[out_selection] = chunk_array else: out[out_selection] = fill_value_or_default(chunk_spec) else: chunk_bytes_batch = await concurrent_map( [(byte_getter, array_spec.prototype) for byte_getter, array_spec, *_ in batch_info], lambda byte_getter, prototype: byte_getter.get(prototype), config.get("async.concurrency"), ) chunk_array_batch = await self.decode_batch( [ (chunk_bytes, chunk_spec) for chunk_bytes, (_, chunk_spec, *_) in zip( chunk_bytes_batch, batch_info, strict=False ) ], ) for chunk_array, (_, chunk_spec, chunk_selection, out_selection, _) in zip( chunk_array_batch, batch_info, strict=False ): if chunk_array is not None: tmp = chunk_array[chunk_selection] if drop_axes != (): tmp = tmp.squeeze(axis=drop_axes) out[out_selection] = tmp else: out[out_selection] = fill_value_or_default(chunk_spec) def _merge_chunk_array( self, existing_chunk_array: NDBuffer | None, value: NDBuffer, out_selection: SelectorTuple, chunk_spec: ArraySpec, chunk_selection: SelectorTuple, is_complete_chunk: bool, drop_axes: tuple[int, ...], ) -> NDBuffer: if chunk_selection == () or is_scalar(value.as_ndarray_like(), chunk_spec.dtype): chunk_value = value else: chunk_value = value[out_selection] # handle missing singleton dimensions if drop_axes != (): item = tuple( None # equivalent to np.newaxis if idx in drop_axes else slice(None) for idx in range(chunk_spec.ndim) ) chunk_value = chunk_value[item] if is_complete_chunk and chunk_value.shape == chunk_spec.shape: # TODO: For the last chunk, we could have is_complete_chunk=True # that is smaller than the chunk_spec.shape but this throws # an error in the _decode_single return chunk_value if existing_chunk_array is None: chunk_array = chunk_spec.prototype.nd_buffer.create( shape=chunk_spec.shape, dtype=chunk_spec.dtype, order=chunk_spec.order, fill_value=fill_value_or_default(chunk_spec), ) else: chunk_array = existing_chunk_array.copy() # make a writable copy chunk_array[chunk_selection] = chunk_value return chunk_array async def write_batch( self, batch_info: Iterable[tuple[ByteSetter, ArraySpec, SelectorTuple, SelectorTuple, bool]], value: NDBuffer, drop_axes: tuple[int, ...] = (), ) -> None: if self.supports_partial_encode: # Pass scalar values as is if len(value.shape) == 0: await self.encode_partial_batch( [ (byte_setter, value, chunk_selection, chunk_spec) for byte_setter, chunk_spec, chunk_selection, out_selection, _ in batch_info ], ) else: await self.encode_partial_batch( [ (byte_setter, value[out_selection], chunk_selection, chunk_spec) for byte_setter, chunk_spec, chunk_selection, out_selection, _ in batch_info ], ) else: # Read existing bytes if not total slice async def _read_key( byte_setter: ByteSetter | None, prototype: BufferPrototype ) -> Buffer | None: if byte_setter is None: return None return await byte_setter.get(prototype=prototype) chunk_bytes_batch: Iterable[Buffer | None] chunk_bytes_batch = await concurrent_map( [ ( None if is_complete_chunk else byte_setter, chunk_spec.prototype, ) for byte_setter, chunk_spec, chunk_selection, _, is_complete_chunk in batch_info ], _read_key, config.get("async.concurrency"), ) chunk_array_decoded = await self.decode_batch( [ (chunk_bytes, chunk_spec) for chunk_bytes, (_, chunk_spec, *_) in zip( chunk_bytes_batch, batch_info, strict=False ) ], ) chunk_array_merged = [ self._merge_chunk_array( chunk_array, value, out_selection, chunk_spec, chunk_selection, is_complete_chunk, drop_axes, ) for chunk_array, ( _, chunk_spec, chunk_selection, out_selection, is_complete_chunk, ) in zip(chunk_array_decoded, batch_info, strict=False) ] chunk_array_batch: list[NDBuffer | None] = [] for chunk_array, (_, chunk_spec, *_) in zip( chunk_array_merged, batch_info, strict=False ): if chunk_array is None: chunk_array_batch.append(None) # type: ignore[unreachable] else: if not chunk_spec.config.write_empty_chunks and chunk_array.all_equal( fill_value_or_default(chunk_spec) ): chunk_array_batch.append(None) else: chunk_array_batch.append(chunk_array) chunk_bytes_batch = await self.encode_batch( [ (chunk_array, chunk_spec) for chunk_array, (_, chunk_spec, *_) in zip( chunk_array_batch, batch_info, strict=False ) ], ) async def _write_key(byte_setter: ByteSetter, chunk_bytes: Buffer | None) -> None: if chunk_bytes is None: await byte_setter.delete() else: await byte_setter.set(chunk_bytes) await concurrent_map( [ (byte_setter, chunk_bytes) for chunk_bytes, (byte_setter, *_) in zip( chunk_bytes_batch, batch_info, strict=False ) ], _write_key, config.get("async.concurrency"), ) async def decode( self, chunk_bytes_and_specs: Iterable[tuple[Buffer | None, ArraySpec]], ) -> Iterable[NDBuffer | None]: output: list[NDBuffer | None] = [] for batch_info in batched(chunk_bytes_and_specs, self.batch_size): output.extend(await self.decode_batch(batch_info)) return output async def encode( self, chunk_arrays_and_specs: Iterable[tuple[NDBuffer | None, ArraySpec]], ) -> Iterable[Buffer | None]: output: list[Buffer | None] = [] for single_batch_info in batched(chunk_arrays_and_specs, self.batch_size): output.extend(await self.encode_batch(single_batch_info)) return output async def read( self, batch_info: Iterable[tuple[ByteGetter, ArraySpec, SelectorTuple, SelectorTuple, bool]], out: NDBuffer, drop_axes: tuple[int, ...] = (), ) -> None: await concurrent_map( [ (single_batch_info, out, drop_axes) for single_batch_info in batched(batch_info, self.batch_size) ], self.read_batch, config.get("async.concurrency"), ) async def write( self, batch_info: Iterable[tuple[ByteSetter, ArraySpec, SelectorTuple, SelectorTuple, bool]], value: NDBuffer, drop_axes: tuple[int, ...] = (), ) -> None: await concurrent_map( [ (single_batch_info, value, drop_axes) for single_batch_info in batched(batch_info, self.batch_size) ], self.write_batch, config.get("async.concurrency"), ) def codecs_from_list( codecs: Iterable[Codec], ) -> tuple[tuple[ArrayArrayCodec, ...], ArrayBytesCodec, tuple[BytesBytesCodec, ...]]: from zarr.codecs.sharding import ShardingCodec array_array: tuple[ArrayArrayCodec, ...] = () array_bytes_maybe: ArrayBytesCodec | None = None bytes_bytes: tuple[BytesBytesCodec, ...] = () if any(isinstance(codec, ShardingCodec) for codec in codecs) and len(tuple(codecs)) > 1: warn( "Combining a `sharding_indexed` codec disables partial reads and " "writes, which may lead to inefficient performance.", stacklevel=3, ) for prev_codec, cur_codec in pairwise((None, *codecs)): if isinstance(cur_codec, ArrayArrayCodec): if isinstance(prev_codec, ArrayBytesCodec | BytesBytesCodec): msg = ( f"Invalid codec order. ArrayArrayCodec {cur_codec}" "must be preceded by another ArrayArrayCodec. " f"Got {type(prev_codec)} instead." ) raise TypeError(msg) array_array += (cur_codec,) elif isinstance(cur_codec, ArrayBytesCodec): if isinstance(prev_codec, BytesBytesCodec): msg = ( f"Invalid codec order. ArrayBytes codec {cur_codec}" f" must be preceded by an ArrayArrayCodec. Got {type(prev_codec)} instead." ) raise TypeError(msg) if array_bytes_maybe is not None: msg = ( f"Got two instances of ArrayBytesCodec: {array_bytes_maybe} and {cur_codec}. " "Only one array-to-bytes codec is allowed." ) raise ValueError(msg) array_bytes_maybe = cur_codec elif isinstance(cur_codec, BytesBytesCodec): if isinstance(prev_codec, ArrayArrayCodec): msg = ( f"Invalid codec order. BytesBytesCodec {cur_codec}" "must be preceded by either another BytesBytesCodec, or an ArrayBytesCodec. " f"Got {type(prev_codec)} instead." ) bytes_bytes += (cur_codec,) else: raise TypeError if array_bytes_maybe is None: raise ValueError("Required ArrayBytesCodec was not found.") else: return array_array, array_bytes_maybe, bytes_bytes register_pipeline(BatchedCodecPipeline) zarr-python-3.0.6/src/zarr/core/common.py000066400000000000000000000142021476711733500203710ustar00rootroot00000000000000from __future__ import annotations import asyncio import functools import operator import warnings from collections.abc import Iterable, Mapping, Sequence from enum import Enum from itertools import starmap from typing import ( TYPE_CHECKING, Any, Literal, TypeVar, cast, overload, ) import numpy as np from zarr.core.config import config as zarr_config from zarr.core.strings import _STRING_DTYPE if TYPE_CHECKING: from collections.abc import Awaitable, Callable, Iterator ZARR_JSON = "zarr.json" ZARRAY_JSON = ".zarray" ZGROUP_JSON = ".zgroup" ZATTRS_JSON = ".zattrs" ZMETADATA_V2_JSON = ".zmetadata" BytesLike = bytes | bytearray | memoryview ShapeLike = tuple[int, ...] | int ChunkCoords = tuple[int, ...] ChunkCoordsLike = Iterable[int] ZarrFormat = Literal[2, 3] NodeType = Literal["array", "group"] JSON = str | int | float | Mapping[str, "JSON"] | Sequence["JSON"] | None MemoryOrder = Literal["C", "F"] AccessModeLiteral = Literal["r", "r+", "a", "w", "w-"] def product(tup: ChunkCoords) -> int: return functools.reduce(operator.mul, tup, 1) T = TypeVar("T", bound=tuple[Any, ...]) V = TypeVar("V") async def concurrent_map( items: Iterable[T], func: Callable[..., Awaitable[V]], limit: int | None = None, ) -> list[V]: if limit is None: return await asyncio.gather(*list(starmap(func, items))) else: sem = asyncio.Semaphore(limit) async def run(item: tuple[Any]) -> V: async with sem: return await func(*item) return await asyncio.gather(*[asyncio.ensure_future(run(item)) for item in items]) E = TypeVar("E", bound=Enum) def enum_names(enum: type[E]) -> Iterator[str]: for item in enum: yield item.name def parse_enum(data: object, cls: type[E]) -> E: if isinstance(data, cls): return data if not isinstance(data, str): raise TypeError(f"Expected str, got {type(data)}") if data in enum_names(cls): return cls(data) raise ValueError(f"Value must be one of {list(enum_names(cls))!r}. Got {data} instead.") def parse_name(data: JSON, expected: str | None = None) -> str: if isinstance(data, str): if expected is None or data == expected: return data raise ValueError(f"Expected '{expected}'. Got {data} instead.") else: raise TypeError(f"Expected a string, got an instance of {type(data)}.") def parse_configuration(data: JSON) -> JSON: if not isinstance(data, dict): raise TypeError(f"Expected dict, got {type(data)}") return data @overload def parse_named_configuration( data: JSON, expected_name: str | None = None ) -> tuple[str, dict[str, JSON]]: ... @overload def parse_named_configuration( data: JSON, expected_name: str | None = None, *, require_configuration: bool = True ) -> tuple[str, dict[str, JSON] | None]: ... def parse_named_configuration( data: JSON, expected_name: str | None = None, *, require_configuration: bool = True ) -> tuple[str, JSON | None]: if not isinstance(data, dict): raise TypeError(f"Expected dict, got {type(data)}") if "name" not in data: raise ValueError(f"Named configuration does not have a 'name' key. Got {data}.") name_parsed = parse_name(data["name"], expected_name) if "configuration" in data: configuration_parsed = parse_configuration(data["configuration"]) elif require_configuration: raise ValueError(f"Named configuration does not have a 'configuration' key. Got {data}.") else: configuration_parsed = None return name_parsed, configuration_parsed def parse_shapelike(data: int | Iterable[int]) -> tuple[int, ...]: if isinstance(data, int): if data < 0: raise ValueError(f"Expected a non-negative integer. Got {data} instead") return (data,) try: data_tuple = tuple(data) except TypeError as e: msg = f"Expected an integer or an iterable of integers. Got {data} instead." raise TypeError(msg) from e if not all(isinstance(v, int) for v in data_tuple): msg = f"Expected an iterable of integers. Got {data} instead." raise TypeError(msg) if not all(v > -1 for v in data_tuple): msg = f"Expected all values to be non-negative. Got {data} instead." raise ValueError(msg) return data_tuple def parse_fill_value(data: Any) -> Any: # todo: real validation return data def parse_order(data: Any) -> Literal["C", "F"]: if data in ("C", "F"): return cast(Literal["C", "F"], data) raise ValueError(f"Expected one of ('C', 'F'), got {data} instead.") def parse_bool(data: Any) -> bool: if isinstance(data, bool): return data raise ValueError(f"Expected bool, got {data} instead.") def parse_dtype(dtype: Any, zarr_format: ZarrFormat) -> np.dtype[Any]: if dtype is str or dtype == "str": if zarr_format == 2: # special case as object return np.dtype("object") else: return _STRING_DTYPE return np.dtype(dtype) def _warn_write_empty_chunks_kwarg() -> None: # TODO: link to docs page on array configuration in this message msg = ( "The `write_empty_chunks` keyword argument is deprecated and will be removed in future versions. " "To control whether empty chunks are written to storage, either use the `config` keyword " "argument, as in `config={'write_empty_chunks: True}`," "or change the global 'array.write_empty_chunks' configuration variable." ) warnings.warn(msg, RuntimeWarning, stacklevel=2) def _warn_order_kwarg() -> None: # TODO: link to docs page on array configuration in this message msg = ( "The `order` keyword argument has no effect for Zarr format 3 arrays. " "To control the memory layout of the array, either use the `config` keyword " "argument, as in `config={'order: 'C'}`," "or change the global 'array.order' configuration variable." ) warnings.warn(msg, RuntimeWarning, stacklevel=2) def _default_zarr_format() -> ZarrFormat: """Return the default zarr_version""" return cast(ZarrFormat, int(zarr_config.get("default_zarr_format", 3))) zarr-python-3.0.6/src/zarr/core/config.py000066400000000000000000000123711476711733500203530ustar00rootroot00000000000000""" The config module is responsible for managing the configuration of zarr and is based on the Donfig python library. For selecting custom implementations of codecs, pipelines, buffers and ndbuffers, first register the implementations in the registry and then select them in the config. Example: An implementation of the bytes codec in a class ``your.module.NewBytesCodec`` requires the value of ``codecs.bytes`` to be ``your.module.NewBytesCodec``. Donfig can be configured programmatically, by environment variables, or from YAML files in standard locations. .. code-block:: python from your.module import NewBytesCodec from zarr.core.config import register_codec, config register_codec("bytes", NewBytesCodec) config.set({"codecs.bytes": "your.module.NewBytesCodec"}) Instead of setting the value programmatically with ``config.set``, you can also set the value with an environment variable. The environment variable ``ZARR_CODECS__BYTES`` can be set to ``your.module.NewBytesCodec``. The double underscore ``__`` is used to indicate nested access. .. code-block:: bash export ZARR_CODECS__BYTES="your.module.NewBytesCodec" For more information, see the Donfig documentation at https://github.com/pytroll/donfig. """ from __future__ import annotations from typing import TYPE_CHECKING, Any, Literal, cast from donfig import Config as DConfig if TYPE_CHECKING: from donfig.config_obj import ConfigSet class BadConfigError(ValueError): _msg = "bad Config: %r" class Config(DConfig): # type: ignore[misc] """The Config will collect configuration from config files and environment variables Example environment variables: Grabs environment variables of the form "ZARR_FOO__BAR_BAZ=123" and turns these into config variables of the form ``{"foo": {"bar-baz": 123}}`` It transforms the key and value in the following way: - Lower-cases the key text - Treats ``__`` (double-underscore) as nested access - Calls ``ast.literal_eval`` on the value """ def reset(self) -> None: self.clear() self.refresh() def enable_gpu(self) -> ConfigSet: """ Configure Zarr to use GPUs where possible. """ return self.set( {"buffer": "zarr.core.buffer.gpu.Buffer", "ndbuffer": "zarr.core.buffer.gpu.NDBuffer"} ) # The default configuration for zarr config = Config( "zarr", defaults=[ { "default_zarr_format": 3, "array": { "order": "C", "write_empty_chunks": False, "v2_default_compressor": { "numeric": {"id": "zstd", "level": 0, "checksum": False}, "string": {"id": "zstd", "level": 0, "checksum": False}, "bytes": {"id": "zstd", "level": 0, "checksum": False}, }, "v2_default_filters": { "numeric": None, "string": [{"id": "vlen-utf8"}], "bytes": [{"id": "vlen-bytes"}], "raw": None, }, "v3_default_filters": {"numeric": [], "string": [], "bytes": []}, "v3_default_serializer": { "numeric": {"name": "bytes", "configuration": {"endian": "little"}}, "string": {"name": "vlen-utf8"}, "bytes": {"name": "vlen-bytes"}, }, "v3_default_compressors": { "numeric": [ {"name": "zstd", "configuration": {"level": 0, "checksum": False}}, ], "string": [ {"name": "zstd", "configuration": {"level": 0, "checksum": False}}, ], "bytes": [ {"name": "zstd", "configuration": {"level": 0, "checksum": False}}, ], }, }, "async": {"concurrency": 10, "timeout": None}, "threading": {"max_workers": None}, "json_indent": 2, "codec_pipeline": { "path": "zarr.core.codec_pipeline.BatchedCodecPipeline", "batch_size": 1, }, "codecs": { "blosc": "zarr.codecs.blosc.BloscCodec", "gzip": "zarr.codecs.gzip.GzipCodec", "zstd": "zarr.codecs.zstd.ZstdCodec", "bytes": "zarr.codecs.bytes.BytesCodec", "endian": "zarr.codecs.bytes.BytesCodec", # compatibility with earlier versions of ZEP1 "crc32c": "zarr.codecs.crc32c_.Crc32cCodec", "sharding_indexed": "zarr.codecs.sharding.ShardingCodec", "transpose": "zarr.codecs.transpose.TransposeCodec", "vlen-utf8": "zarr.codecs.vlen_utf8.VLenUTF8Codec", "vlen-bytes": "zarr.codecs.vlen_utf8.VLenBytesCodec", }, "buffer": "zarr.core.buffer.cpu.Buffer", "ndbuffer": "zarr.core.buffer.cpu.NDBuffer", } ], ) def parse_indexing_order(data: Any) -> Literal["C", "F"]: if data in ("C", "F"): return cast(Literal["C", "F"], data) msg = f"Expected one of ('C', 'F'), got {data} instead." raise ValueError(msg) zarr-python-3.0.6/src/zarr/core/group.py000066400000000000000000004122471476711733500202500ustar00rootroot00000000000000from __future__ import annotations import asyncio import itertools import json import logging import warnings from collections import defaultdict from collections.abc import Iterator, Mapping from dataclasses import asdict, dataclass, field, fields, replace from itertools import accumulate from typing import TYPE_CHECKING, Literal, TypeVar, assert_never, cast, overload import numpy as np import numpy.typing as npt from typing_extensions import deprecated import zarr.api.asynchronous as async_api from zarr._compat import _deprecate_positional_args from zarr.abc.metadata import Metadata from zarr.abc.store import Store, set_or_delete from zarr.core._info import GroupInfo from zarr.core.array import ( Array, AsyncArray, CompressorLike, CompressorsLike, FiltersLike, SerializerLike, ShardsLike, _build_parents, _parse_deprecated_compressor, create_array, ) from zarr.core.attributes import Attributes from zarr.core.buffer import default_buffer_prototype from zarr.core.common import ( JSON, ZARR_JSON, ZARRAY_JSON, ZATTRS_JSON, ZGROUP_JSON, ZMETADATA_V2_JSON, ChunkCoords, NodeType, ShapeLike, ZarrFormat, parse_shapelike, ) from zarr.core.config import config from zarr.core.metadata import ArrayV2Metadata, ArrayV3Metadata from zarr.core.metadata.v3 import V3JsonEncoder from zarr.core.sync import SyncMixin, sync from zarr.errors import ContainsArrayError, ContainsGroupError, MetadataValidationError from zarr.storage import StoreLike, StorePath from zarr.storage._common import ensure_no_existing_node, make_store_path from zarr.storage._utils import _join_paths, _normalize_path_keys, normalize_path if TYPE_CHECKING: from collections.abc import ( AsyncGenerator, AsyncIterator, Coroutine, Generator, Iterable, ) from typing import Any from zarr.core.array_spec import ArrayConfig, ArrayConfigLike from zarr.core.buffer import Buffer, BufferPrototype from zarr.core.chunk_key_encodings import ChunkKeyEncoding, ChunkKeyEncodingLike from zarr.core.common import MemoryOrder logger = logging.getLogger("zarr.group") DefaultT = TypeVar("DefaultT") def parse_zarr_format(data: Any) -> ZarrFormat: """Parse the zarr_format field from metadata.""" if data in (2, 3): return cast(ZarrFormat, data) msg = f"Invalid zarr_format. Expected one of 2 or 3. Got {data}." raise ValueError(msg) def parse_node_type(data: Any) -> NodeType: """Parse the node_type field from metadata.""" if data in ("array", "group"): return cast(Literal["array", "group"], data) raise MetadataValidationError("node_type", "array or group", data) # todo: convert None to empty dict def parse_attributes(data: Any) -> dict[str, Any]: """Parse the attributes field from metadata.""" if data is None: return {} elif isinstance(data, dict) and all(isinstance(k, str) for k in data): return data msg = f"Expected dict with string keys. Got {type(data)} instead." raise TypeError(msg) @overload def _parse_async_node(node: AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]) -> Array: ... @overload def _parse_async_node(node: AsyncGroup) -> Group: ... def _parse_async_node( node: AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | AsyncGroup, ) -> Array | Group: """Wrap an AsyncArray in an Array, or an AsyncGroup in a Group.""" if isinstance(node, AsyncArray): return Array(node) elif isinstance(node, AsyncGroup): return Group(node) else: raise TypeError(f"Unknown node type, got {type(node)}") @dataclass(frozen=True) class ConsolidatedMetadata: """ Consolidated Metadata for this Group. This stores the metadata of child nodes below this group. Any child groups will have their consolidated metadata set appropriately. """ metadata: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata] kind: Literal["inline"] = "inline" must_understand: Literal[False] = False def to_dict(self) -> dict[str, JSON]: return { "kind": self.kind, "must_understand": self.must_understand, "metadata": {k: v.to_dict() for k, v in self.flattened_metadata.items()}, } @classmethod def from_dict(cls, data: dict[str, JSON]) -> ConsolidatedMetadata: data = dict(data) kind = data.get("kind") if kind != "inline": raise ValueError(f"Consolidated metadata kind='{kind}' is not supported.") raw_metadata = data.get("metadata") if not isinstance(raw_metadata, dict): raise TypeError(f"Unexpected type for 'metadata': {type(raw_metadata)}") metadata: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata] = {} if raw_metadata: for k, v in raw_metadata.items(): if not isinstance(v, dict): raise TypeError( f"Invalid value for metadata items. key='{k}', type='{type(v).__name__}'" ) # zarr_format is present in v2 and v3. zarr_format = parse_zarr_format(v["zarr_format"]) if zarr_format == 3: node_type = parse_node_type(v.get("node_type", None)) if node_type == "group": metadata[k] = GroupMetadata.from_dict(v) elif node_type == "array": metadata[k] = ArrayV3Metadata.from_dict(v) else: assert_never(node_type) elif zarr_format == 2: if "shape" in v: metadata[k] = ArrayV2Metadata.from_dict(v) else: metadata[k] = GroupMetadata.from_dict(v) else: assert_never(zarr_format) cls._flat_to_nested(metadata) return cls(metadata=metadata) @staticmethod def _flat_to_nested( metadata: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata], ) -> None: """ Convert a flat metadata representation to a nested one. Notes ----- Flat metadata is used when persisting the consolidated metadata. The keys include the full path, not just the node name. The key prefixes can be used to determine which nodes are children of which other nodes. Nested metadata is used in-memory. The outermost level will only have the *immediate* children of the Group. All nested child groups will be stored under the consolidated metadata of their immediate parent. """ # We have a flat mapping from {k: v} where the keys include the *full* # path segment: # { # "/a/b": { group_metadata }, # "/a/b/array-0": { array_metadata }, # "/a/b/array-1": { array_metadata }, # } # # We want to reorganize the metadata such that each Group contains the # array metadata of its immediate children. # In the example, the group at `/a/b` will have consolidated metadata # for its children `array-0` and `array-1`. # # metadata = dict(metadata) keys = sorted(metadata, key=lambda k: k.count("/")) grouped = { k: list(v) for k, v in itertools.groupby(keys, key=lambda k: k.rsplit("/", 1)[0]) } # we go top down and directly manipulate metadata. for key, children_keys in grouped.items(): # key is a key like "a", "a/b", "a/b/c" # The basic idea is to find the immediate parent (so "", "a", or "a/b") # and update that node's consolidated metadata to include the metadata # in children_keys *prefixes, name = key.split("/") parent = metadata while prefixes: # e.g. a/b/c has a parent "a/b". Walk through to get # metadata["a"]["b"] part = prefixes.pop(0) # we can assume that parent[part] here is a group # otherwise we wouldn't have a node with this `part` prefix. # We can also assume that the parent node will have consolidated metadata, # because we're walking top to bottom. parent = parent[part].consolidated_metadata.metadata # type: ignore[union-attr] node = parent[name] children_keys = list(children_keys) if isinstance(node, ArrayV2Metadata | ArrayV3Metadata): # These are already present, either thanks to being an array in the # root, or by being collected as a child in the else clause continue children_keys = list(children_keys) # We pop from metadata, since we're *moving* this under group children = { child_key.split("/")[-1]: metadata.pop(child_key) for child_key in children_keys if child_key != key } parent[name] = replace( node, consolidated_metadata=ConsolidatedMetadata(metadata=children) ) @property def flattened_metadata(self) -> dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata]: """ Return the flattened representation of Consolidated Metadata. The returned dictionary will have a key for each child node in the hierarchy under this group. Under the default (nested) representation available through ``self.metadata``, the dictionary only contains keys for immediate children. The keys of the dictionary will include the full path to a child node from the current group, where segments are joined by ``/``. Examples -------- >>> cm = ConsolidatedMetadata( ... metadata={ ... "group-0": GroupMetadata( ... consolidated_metadata=ConsolidatedMetadata( ... { ... "group-0-0": GroupMetadata(), ... } ... ) ... ), ... "group-1": GroupMetadata(), ... } ... ) {'group-0': GroupMetadata(attributes={}, zarr_format=3, consolidated_metadata=None, node_type='group'), 'group-0/group-0-0': GroupMetadata(attributes={}, zarr_format=3, consolidated_metadata=None, node_type='group'), 'group-1': GroupMetadata(attributes={}, zarr_format=3, consolidated_metadata=None, node_type='group')} """ metadata = {} def flatten( key: str, group: GroupMetadata | ArrayV2Metadata | ArrayV3Metadata ) -> dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata]: children: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata] = {} if isinstance(group, ArrayV2Metadata | ArrayV3Metadata): children[key] = group else: if group.consolidated_metadata and group.consolidated_metadata.metadata is not None: children[key] = replace( group, consolidated_metadata=ConsolidatedMetadata(metadata={}) ) for name, val in group.consolidated_metadata.metadata.items(): full_key = f"{key}/{name}" if isinstance(val, GroupMetadata): children.update(flatten(full_key, val)) else: children[full_key] = val else: children[key] = replace(group, consolidated_metadata=None) return children for k, v in self.metadata.items(): metadata.update(flatten(k, v)) return metadata @dataclass(frozen=True) class GroupMetadata(Metadata): """ Metadata for a Group. """ attributes: dict[str, Any] = field(default_factory=dict) zarr_format: ZarrFormat = 3 consolidated_metadata: ConsolidatedMetadata | None = None node_type: Literal["group"] = field(default="group", init=False) def to_buffer_dict(self, prototype: BufferPrototype) -> dict[str, Buffer]: json_indent = config.get("json_indent") if self.zarr_format == 3: return { ZARR_JSON: prototype.buffer.from_bytes( json.dumps(self.to_dict(), cls=V3JsonEncoder).encode() ) } else: items = { ZGROUP_JSON: prototype.buffer.from_bytes( json.dumps({"zarr_format": self.zarr_format}, indent=json_indent).encode() ), ZATTRS_JSON: prototype.buffer.from_bytes( json.dumps(self.attributes, indent=json_indent).encode() ), } if self.consolidated_metadata: d = { ZGROUP_JSON: {"zarr_format": self.zarr_format}, ZATTRS_JSON: self.attributes, } consolidated_metadata = self.consolidated_metadata.to_dict()["metadata"] assert isinstance(consolidated_metadata, dict) for k, v in consolidated_metadata.items(): attrs = v.pop("attributes", None) d[f"{k}/{ZATTRS_JSON}"] = attrs if "shape" in v: # it's an array d[f"{k}/{ZARRAY_JSON}"] = v else: d[f"{k}/{ZGROUP_JSON}"] = { "zarr_format": self.zarr_format, "consolidated_metadata": { "metadata": {}, "must_understand": False, "kind": "inline", }, } items[ZMETADATA_V2_JSON] = prototype.buffer.from_bytes( json.dumps( {"metadata": d, "zarr_consolidated_format": 1}, cls=V3JsonEncoder, ).encode() ) return items def __init__( self, attributes: dict[str, Any] | None = None, zarr_format: ZarrFormat = 3, consolidated_metadata: ConsolidatedMetadata | None = None, ) -> None: attributes_parsed = parse_attributes(attributes) zarr_format_parsed = parse_zarr_format(zarr_format) object.__setattr__(self, "attributes", attributes_parsed) object.__setattr__(self, "zarr_format", zarr_format_parsed) object.__setattr__(self, "consolidated_metadata", consolidated_metadata) @classmethod def from_dict(cls, data: dict[str, Any]) -> GroupMetadata: data = dict(data) assert data.pop("node_type", None) in ("group", None) consolidated_metadata = data.pop("consolidated_metadata", None) if consolidated_metadata: data["consolidated_metadata"] = ConsolidatedMetadata.from_dict(consolidated_metadata) zarr_format = data.get("zarr_format") if zarr_format == 2 or zarr_format is None: # zarr v2 allowed arbitrary keys here. # We don't want the GroupMetadata constructor to fail just because someone put an # extra key in the metadata. expected = {x.name for x in fields(cls)} data = {k: v for k, v in data.items() if k in expected} return cls(**data) def to_dict(self) -> dict[str, Any]: result = asdict(replace(self, consolidated_metadata=None)) if self.consolidated_metadata: result["consolidated_metadata"] = self.consolidated_metadata.to_dict() return result @dataclass(frozen=True) class ImplicitGroupMarker(GroupMetadata): """ Marker for an implicit group. Instances of this class are only used in the context of group creation as a placeholder to represent groups that should only be created if they do not already exist in storage """ @dataclass(frozen=True) class AsyncGroup: """ Asynchronous Group object. """ metadata: GroupMetadata store_path: StorePath # TODO: make this correct and work # TODO: ensure that this can be bound properly to subclass of AsyncGroup @classmethod async def from_store( cls, store: StoreLike, *, attributes: dict[str, Any] | None = None, overwrite: bool = False, zarr_format: ZarrFormat = 3, ) -> AsyncGroup: store_path = await make_store_path(store) if overwrite: if store_path.store.supports_deletes: await store_path.delete_dir() else: await ensure_no_existing_node(store_path, zarr_format=zarr_format) else: await ensure_no_existing_node(store_path, zarr_format=zarr_format) attributes = attributes or {} group = cls( metadata=GroupMetadata(attributes=attributes, zarr_format=zarr_format), store_path=store_path, ) await group._save_metadata(ensure_parents=True) return group @classmethod async def open( cls, store: StoreLike, zarr_format: ZarrFormat | None = 3, use_consolidated: bool | str | None = None, ) -> AsyncGroup: """Open a new AsyncGroup Parameters ---------- store : StoreLike zarr_format : {2, 3}, optional use_consolidated : bool or str, default None Whether to use consolidated metadata. By default, consolidated metadata is used if it's present in the store (in the ``zarr.json`` for Zarr format 3 and in the ``.zmetadata`` file for Zarr format 2). To explicitly require consolidated metadata, set ``use_consolidated=True``, which will raise an exception if consolidated metadata is not found. To explicitly *not* use consolidated metadata, set ``use_consolidated=False``, which will fall back to using the regular, non consolidated metadata. Zarr format 2 allowed configuring the key storing the consolidated metadata (``.zmetadata`` by default). Specify the custom key as ``use_consolidated`` to load consolidated metadata from a non-default key. """ store_path = await make_store_path(store) consolidated_key = ZMETADATA_V2_JSON if (zarr_format == 2 or zarr_format is None) and isinstance(use_consolidated, str): consolidated_key = use_consolidated if zarr_format == 2: paths = [store_path / ZGROUP_JSON, store_path / ZATTRS_JSON] if use_consolidated or use_consolidated is None: paths.append(store_path / consolidated_key) zgroup_bytes, zattrs_bytes, *rest = await asyncio.gather( *[path.get() for path in paths] ) if zgroup_bytes is None: raise FileNotFoundError(store_path) if use_consolidated or use_consolidated is None: maybe_consolidated_metadata_bytes = rest[0] else: maybe_consolidated_metadata_bytes = None elif zarr_format == 3: zarr_json_bytes = await (store_path / ZARR_JSON).get() if zarr_json_bytes is None: raise FileNotFoundError(store_path) elif zarr_format is None: ( zarr_json_bytes, zgroup_bytes, zattrs_bytes, maybe_consolidated_metadata_bytes, ) = await asyncio.gather( (store_path / ZARR_JSON).get(), (store_path / ZGROUP_JSON).get(), (store_path / ZATTRS_JSON).get(), (store_path / str(consolidated_key)).get(), ) if zarr_json_bytes is not None and zgroup_bytes is not None: # warn and favor v3 msg = f"Both zarr.json (Zarr format 3) and .zgroup (Zarr format 2) metadata objects exist at {store_path}. Zarr format 3 will be used." warnings.warn(msg, stacklevel=1) if zarr_json_bytes is None and zgroup_bytes is None: raise FileNotFoundError( f"could not find zarr.json or .zgroup objects in {store_path}" ) # set zarr_format based on which keys were found if zarr_json_bytes is not None: zarr_format = 3 else: zarr_format = 2 else: raise MetadataValidationError("zarr_format", "2, 3, or None", zarr_format) if zarr_format == 2: # this is checked above, asserting here for mypy assert zgroup_bytes is not None if use_consolidated and maybe_consolidated_metadata_bytes is None: # the user requested consolidated metadata, but it was missing raise ValueError(consolidated_key) elif use_consolidated is False: # the user explicitly opted out of consolidated_metadata. # Discard anything we might have read. maybe_consolidated_metadata_bytes = None return cls._from_bytes_v2( store_path, zgroup_bytes, zattrs_bytes, maybe_consolidated_metadata_bytes ) else: # V3 groups are comprised of a zarr.json object assert zarr_json_bytes is not None if not isinstance(use_consolidated, bool | None): raise TypeError("use_consolidated must be a bool or None for Zarr format 3.") return cls._from_bytes_v3( store_path, zarr_json_bytes, use_consolidated=use_consolidated, ) @classmethod def _from_bytes_v2( cls, store_path: StorePath, zgroup_bytes: Buffer, zattrs_bytes: Buffer | None, consolidated_metadata_bytes: Buffer | None, ) -> AsyncGroup: # V2 groups are comprised of a .zgroup and .zattrs objects zgroup = json.loads(zgroup_bytes.to_bytes()) zattrs = json.loads(zattrs_bytes.to_bytes()) if zattrs_bytes is not None else {} group_metadata = {**zgroup, "attributes": zattrs} if consolidated_metadata_bytes is not None: v2_consolidated_metadata = json.loads(consolidated_metadata_bytes.to_bytes()) v2_consolidated_metadata = v2_consolidated_metadata["metadata"] # We already read zattrs and zgroup. Should we ignore these? v2_consolidated_metadata.pop(".zattrs", None) v2_consolidated_metadata.pop(".zgroup", None) consolidated_metadata: defaultdict[str, dict[str, Any]] = defaultdict(dict) # keys like air/.zarray, air/.zattrs for k, v in v2_consolidated_metadata.items(): path, kind = k.rsplit("/.", 1) if kind == "zarray": consolidated_metadata[path].update(v) elif kind == "zattrs": consolidated_metadata[path]["attributes"] = v elif kind == "zgroup": consolidated_metadata[path].update(v) else: raise ValueError(f"Invalid file type '{kind}' at path '{path}") group_metadata["consolidated_metadata"] = { "metadata": dict(consolidated_metadata), "kind": "inline", "must_understand": False, } return cls.from_dict(store_path, group_metadata) @classmethod def _from_bytes_v3( cls, store_path: StorePath, zarr_json_bytes: Buffer, use_consolidated: bool | None, ) -> AsyncGroup: group_metadata = json.loads(zarr_json_bytes.to_bytes()) if use_consolidated and group_metadata.get("consolidated_metadata") is None: msg = f"Consolidated metadata requested with 'use_consolidated=True' but not found in '{store_path.path}'." raise ValueError(msg) elif use_consolidated is False: # Drop consolidated metadata if it's there. group_metadata.pop("consolidated_metadata", None) return cls.from_dict(store_path, group_metadata) @classmethod def from_dict( cls, store_path: StorePath, data: dict[str, Any], ) -> AsyncGroup: return cls( metadata=GroupMetadata.from_dict(data), store_path=store_path, ) async def setitem(self, key: str, value: Any) -> None: """ Fastpath for creating a new array New arrays will be created with default array settings for the array type. Parameters ---------- key : str Array name value : array-like Array data """ path = self.store_path / key await async_api.save_array( store=path, arr=value, zarr_format=self.metadata.zarr_format, overwrite=True ) async def getitem( self, key: str, ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | AsyncGroup: """ Get a subarray or subgroup from the group. Parameters ---------- key : str Array or group name Returns ------- AsyncArray or AsyncGroup """ store_path = self.store_path / key logger.debug("key=%s, store_path=%s", key, store_path) # Consolidated metadata lets us avoid some I/O operations so try that first. if self.metadata.consolidated_metadata is not None: return self._getitem_consolidated(store_path, key, prefix=self.name) try: return await get_node( store=store_path.store, path=store_path.path, zarr_format=self.metadata.zarr_format ) except FileNotFoundError as e: raise KeyError(key) from e def _getitem_consolidated( self, store_path: StorePath, key: str, prefix: str ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | AsyncGroup: # getitem, in the special case where we have consolidated metadata. # Note that this is a regular def (non async) function. # This shouldn't do any additional I/O. # the caller needs to verify this! assert self.metadata.consolidated_metadata is not None # we support nested getitems like group/subgroup/array indexers = key.split("/") indexers.reverse() metadata: ArrayV2Metadata | ArrayV3Metadata | GroupMetadata = self.metadata while indexers: indexer = indexers.pop() if isinstance(metadata, ArrayV2Metadata | ArrayV3Metadata): # we've indexed into an array with group["array/subarray"]. Invalid. raise KeyError(key) if metadata.consolidated_metadata is None: # we've indexed into a group without consolidated metadata. # This isn't normal; typically, consolidated metadata # will include explicit markers for when there are no child # nodes as metadata={}. # We have some freedom in exactly how we interpret this case. # For now, we treat None as the same as {}, i.e. we don't # have any children. raise KeyError(key) try: metadata = metadata.consolidated_metadata.metadata[indexer] except KeyError as e: # The Group Metadata has consolidated metadata, but the key # isn't present. We trust this to mean that the key isn't in # the hierarchy, and *don't* fall back to checking the store. msg = f"'{key}' not found in consolidated metadata." raise KeyError(msg) from e # update store_path to ensure that AsyncArray/Group.name is correct if prefix != "/": key = "/".join([prefix.lstrip("/"), key]) store_path = StorePath(store=store_path.store, path=key) if isinstance(metadata, GroupMetadata): return AsyncGroup(metadata=metadata, store_path=store_path) else: return AsyncArray(metadata=metadata, store_path=store_path) async def delitem(self, key: str) -> None: """Delete a group member. Parameters ---------- key : str Array or group name """ store_path = self.store_path / key await store_path.delete_dir() if self.metadata.consolidated_metadata: self.metadata.consolidated_metadata.metadata.pop(key, None) await self._save_metadata() async def get( self, key: str, default: DefaultT | None = None ) -> AsyncArray[Any] | AsyncGroup | DefaultT | None: """Obtain a group member, returning default if not found. Parameters ---------- key : str Group member name. default : object Default value to return if key is not found (default: None). Returns ------- object Group member (AsyncArray or AsyncGroup) or default if not found. """ try: return await self.getitem(key) except KeyError: return default async def _save_metadata(self, ensure_parents: bool = False) -> None: to_save = self.metadata.to_buffer_dict(default_buffer_prototype()) awaitables = [set_or_delete(self.store_path / key, value) for key, value in to_save.items()] if ensure_parents: parents = _build_parents(self) for parent in parents: awaitables.extend( [ (parent.store_path / key).set_if_not_exists(value) for key, value in parent.metadata.to_buffer_dict( default_buffer_prototype() ).items() ] ) await asyncio.gather(*awaitables) @property def path(self) -> str: """Storage path.""" return self.store_path.path @property def name(self) -> str: """Group name following h5py convention.""" if self.path: # follow h5py convention: add leading slash name = self.path if name[0] != "/": name = "/" + name return name return "/" @property def basename(self) -> str: """Final component of name.""" return self.name.split("/")[-1] @property def attrs(self) -> dict[str, Any]: return self.metadata.attributes @property def info(self) -> Any: """ Return a visual representation of the statically known information about a group. Note that this doesn't include dynamic information, like the number of child Groups or Arrays. Returns ------- GroupInfo See Also -------- AsyncGroup.info_complete All information about a group, including dynamic information """ if self.metadata.consolidated_metadata: members = list(self.metadata.consolidated_metadata.flattened_metadata.values()) else: members = None return self._info(members=members) async def info_complete(self) -> Any: """ Return all the information for a group. This includes dynamic information like the number of child Groups or Arrays. If this group doesn't contain consolidated metadata then this will need to read from the backing Store. Returns ------- GroupInfo See Also -------- AsyncGroup.info """ members = [x[1].metadata async for x in self.members(max_depth=None)] return self._info(members=members) def _info( self, members: list[ArrayV2Metadata | ArrayV3Metadata | GroupMetadata] | None = None ) -> Any: kwargs = {} if members is not None: kwargs["_count_members"] = len(members) count_arrays = 0 count_groups = 0 for member in members: if isinstance(member, GroupMetadata): count_groups += 1 else: count_arrays += 1 kwargs["_count_arrays"] = count_arrays kwargs["_count_groups"] = count_groups return GroupInfo( _name=self.store_path.path, _read_only=self.read_only, _store_type=type(self.store_path.store).__name__, _zarr_format=self.metadata.zarr_format, # maybe do a typeddict **kwargs, # type: ignore[arg-type] ) @property def store(self) -> Store: return self.store_path.store @property def read_only(self) -> bool: # Backwards compatibility for 2.x return self.store_path.read_only @property def synchronizer(self) -> None: # Backwards compatibility for 2.x # Not implemented in 3.x yet. return None async def create_group( self, name: str, *, overwrite: bool = False, attributes: dict[str, Any] | None = None, ) -> AsyncGroup: """Create a sub-group. Parameters ---------- name : str Group name. overwrite : bool, optional If True, do not raise an error if the group already exists. attributes : dict, optional Group attributes. Returns ------- g : AsyncGroup """ attributes = attributes or {} return await type(self).from_store( self.store_path / name, attributes=attributes, overwrite=overwrite, zarr_format=self.metadata.zarr_format, ) async def require_group(self, name: str, overwrite: bool = False) -> AsyncGroup: """Obtain a sub-group, creating one if it doesn't exist. Parameters ---------- name : str Group name. overwrite : bool, optional Overwrite any existing group with given `name` if present. Returns ------- g : AsyncGroup """ if overwrite: # TODO: check that overwrite=True errors if an array exists where the group is being created grp = await self.create_group(name, overwrite=True) else: try: item: ( AsyncGroup | AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] ) = await self.getitem(name) if not isinstance(item, AsyncGroup): raise TypeError( f"Incompatible object ({item.__class__.__name__}) already exists" ) assert isinstance(item, AsyncGroup) # make mypy happy grp = item except KeyError: grp = await self.create_group(name) return grp async def require_groups(self, *names: str) -> tuple[AsyncGroup, ...]: """Convenience method to require multiple groups in a single call. Parameters ---------- *names : str Group names. Returns ------- Tuple[AsyncGroup, ...] """ if not names: return () return tuple(await asyncio.gather(*(self.require_group(name) for name in names))) async def create_array( self, name: str, *, shape: ShapeLike, dtype: npt.DTypeLike, chunks: ChunkCoords | Literal["auto"] = "auto", shards: ShardsLike | None = None, filters: FiltersLike = "auto", compressors: CompressorsLike = "auto", compressor: CompressorLike = "auto", serializer: SerializerLike = "auto", fill_value: Any | None = 0, order: MemoryOrder | None = None, attributes: dict[str, JSON] | None = None, chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None = None, dimension_names: Iterable[str] | None = None, storage_options: dict[str, Any] | None = None, overwrite: bool = False, config: ArrayConfig | ArrayConfigLike | None = None, ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array within this group. This method lightly wraps :func:`zarr.core.array.create_array`. Parameters ---------- name : str The name of the array relative to the group. If ``path`` is ``None``, the array will be located at the root of the store. shape : ChunkCoords Shape of the array. dtype : npt.DTypeLike Data type of the array. chunks : ChunkCoords, optional Chunk shape of the array. If not specified, default are guessed based on the shape and dtype. shards : ChunkCoords, optional Shard shape of the array. The default value of ``None`` results in no sharding at all. filters : Iterable[Codec], optional Iterable of filters to apply to each chunk of the array, in order, before serializing that chunk to bytes. For Zarr format 3, a "filter" is a codec that takes an array and returns an array, and these values must be instances of ``ArrayArrayCodec``, or dict representations of ``ArrayArrayCodec``. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v3_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. For Zarr format 2, a "filter" can be any numcodecs codec; you should ensure that the the order if your filters is consistent with the behavior of each filter. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v2_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. compressors : Iterable[Codec], optional List of compressors to apply to the array. Compressors are applied in order, and after any filters are applied (if any are specified) and the data is serialized into bytes. For Zarr format 3, a "compressor" is a codec that takes a bytestream, and returns another bytestream. Multiple compressors my be provided for Zarr format 3. If no ``compressors`` are provided, a default set of compressors will be used. These defaults can be changed by modifying the value of ``array.v3_default_compressors`` in :mod:`zarr.core.config`. Use ``None`` to omit default compressors. For Zarr format 2, a "compressor" can be any numcodecs codec. Only a single compressor may be provided for Zarr format 2. If no ``compressor`` is provided, a default compressor will be used. in :mod:`zarr.core.config`. Use ``None`` to omit the default compressor. compressor : Codec, optional Deprecated in favor of ``compressors``. serializer : dict[str, JSON] | ArrayBytesCodec, optional Array-to-bytes codec to use for encoding the array data. Zarr format 3 only. Zarr format 2 arrays use implicit array-to-bytes conversion. If no ``serializer`` is provided, a default serializer will be used. These defaults can be changed by modifying the value of ``array.v3_default_serializer`` in :mod:`zarr.core.config`. fill_value : Any, optional Fill value for the array. order : {"C", "F"}, optional The memory of the array (default is "C"). For Zarr format 2, this parameter sets the memory order of the array. For Zarr format 3, this parameter is deprecated, because memory order is a runtime parameter for Zarr format 3 arrays. The recommended way to specify the memory order for Zarr format 3 arrays is via the ``config`` parameter, e.g. ``{'config': 'C'}``. If no ``order`` is provided, a default order will be used. This default can be changed by modifying the value of ``array.order`` in :mod:`zarr.core.config`. attributes : dict, optional Attributes for the array. chunk_key_encoding : ChunkKeyEncoding, optional A specification of how the chunk keys are represented in storage. For Zarr format 3, the default is ``{"name": "default", "separator": "/"}}``. For Zarr format 2, the default is ``{"name": "v2", "separator": "."}}``. dimension_names : Iterable[str], optional The names of the dimensions (default is None). Zarr format 3 only. Zarr format 2 arrays should not use this parameter. storage_options : dict, optional If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. overwrite : bool, default False Whether to overwrite an array with the same name in the store, if one exists. config : ArrayConfig or ArrayConfigLike, optional Runtime configuration for the array. Returns ------- AsyncArray """ compressors = _parse_deprecated_compressor( compressor, compressors, zarr_format=self.metadata.zarr_format ) return await create_array( store=self.store_path, name=name, shape=shape, dtype=dtype, chunks=chunks, shards=shards, filters=filters, compressors=compressors, serializer=serializer, fill_value=fill_value, order=order, zarr_format=self.metadata.zarr_format, attributes=attributes, chunk_key_encoding=chunk_key_encoding, dimension_names=dimension_names, storage_options=storage_options, overwrite=overwrite, config=config, ) @deprecated("Use AsyncGroup.create_array instead.") async def create_dataset( self, name: str, *, shape: ShapeLike, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array. .. deprecated:: 3.0.0 The h5py compatibility methods will be removed in 3.1.0. Use `AsyncGroup.create_array` instead. Arrays are known as "datasets" in HDF5 terminology. For compatibility with h5py, Zarr groups also implement the :func:`zarr.AsyncGroup.require_dataset` method. Parameters ---------- name : str Array name. **kwargs : dict Additional arguments passed to :func:`zarr.AsyncGroup.create_array`. Returns ------- a : AsyncArray """ data = kwargs.pop("data", None) # create_dataset in zarr 2.x requires shape but not dtype if data is # provided. Allow this configuration by inferring dtype from data if # necessary and passing it to create_array if "dtype" not in kwargs and data is not None: kwargs["dtype"] = data.dtype array = await self.create_array(name, shape=shape, **kwargs) if data is not None: await array.setitem(slice(None), data) return array @deprecated("Use AsyncGroup.require_array instead.") async def require_dataset( self, name: str, *, shape: ChunkCoords, dtype: npt.DTypeLike = None, exact: bool = False, **kwargs: Any, ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Obtain an array, creating if it doesn't exist. .. deprecated:: 3.0.0 The h5py compatibility methods will be removed in 3.1.0. Use `AsyncGroup.require_dataset` instead. Arrays are known as "datasets" in HDF5 terminology. For compatibility with h5py, Zarr groups also implement the :func:`zarr.AsyncGroup.create_dataset` method. Other `kwargs` are as per :func:`zarr.AsyncGroup.create_dataset`. Parameters ---------- name : str Array name. shape : int or tuple of ints Array shape. dtype : str or dtype, optional NumPy dtype. exact : bool, optional If True, require `dtype` to match exactly. If false, require `dtype` can be cast from array dtype. Returns ------- a : AsyncArray """ return await self.require_array(name, shape=shape, dtype=dtype, exact=exact, **kwargs) async def require_array( self, name: str, *, shape: ShapeLike, dtype: npt.DTypeLike = None, exact: bool = False, **kwargs: Any, ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Obtain an array, creating if it doesn't exist. Other `kwargs` are as per :func:`zarr.AsyncGroup.create_dataset`. Parameters ---------- name : str Array name. shape : int or tuple of ints Array shape. dtype : str or dtype, optional NumPy dtype. exact : bool, optional If True, require `dtype` to match exactly. If false, require `dtype` can be cast from array dtype. Returns ------- a : AsyncArray """ try: ds = await self.getitem(name) if not isinstance(ds, AsyncArray): raise TypeError(f"Incompatible object ({ds.__class__.__name__}) already exists") shape = parse_shapelike(shape) if shape != ds.shape: raise TypeError(f"Incompatible shape ({ds.shape} vs {shape})") dtype = np.dtype(dtype) if exact: if ds.dtype != dtype: raise TypeError(f"Incompatible dtype ({ds.dtype} vs {dtype})") else: if not np.can_cast(ds.dtype, dtype): raise TypeError(f"Incompatible dtype ({ds.dtype} vs {dtype})") except KeyError: ds = await self.create_array(name, shape=shape, dtype=dtype, **kwargs) return ds async def update_attributes(self, new_attributes: dict[str, Any]) -> AsyncGroup: """Update group attributes. Parameters ---------- new_attributes : dict New attributes to set on the group. Returns ------- self : AsyncGroup """ self.metadata.attributes.update(new_attributes) # Write new metadata await self._save_metadata() return self def __repr__(self) -> str: return f"" async def nmembers( self, max_depth: int | None = 0, ) -> int: """Count the number of members in this group. Parameters ---------- max_depth : int, default 0 The maximum number of levels of the hierarchy to include. By default, (``max_depth=0``) only immediate children are included. Set ``max_depth=None`` to include all nodes, and some positive integer to consider children within that many levels of the root Group. Returns ------- count : int """ # check if we can use consolidated metadata, which requires that we have non-None # consolidated metadata at all points in the hierarchy. if self.metadata.consolidated_metadata is not None: return len(self.metadata.consolidated_metadata.flattened_metadata) # TODO: consider using aioitertools.builtins.sum for this # return await aioitertools.builtins.sum((1 async for _ in self.members()), start=0) n = 0 async for _ in self.members(max_depth=max_depth): n += 1 return n async def members( self, max_depth: int | None = 0, ) -> AsyncGenerator[ tuple[str, AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | AsyncGroup], None, ]: """ Returns an AsyncGenerator over the arrays and groups contained in this group. This method requires that `store_path.store` supports directory listing. The results are not guaranteed to be ordered. Parameters ---------- max_depth : int, default 0 The maximum number of levels of the hierarchy to include. By default, (``max_depth=0``) only immediate children are included. Set ``max_depth=None`` to include all nodes, and some positive integer to consider children within that many levels of the root Group. Returns ------- path: A string giving the path to the target, relative to the Group ``self``. value: AsyncArray or AsyncGroup The AsyncArray or AsyncGroup that is a child of ``self``. """ if max_depth is not None and max_depth < 0: raise ValueError(f"max_depth must be None or >= 0. Got '{max_depth}' instead") async for item in self._members(max_depth=max_depth): yield item def _members_consolidated( self, max_depth: int | None, prefix: str = "" ) -> Generator[ tuple[str, AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | AsyncGroup], None, ]: consolidated_metadata = self.metadata.consolidated_metadata do_recursion = max_depth is None or max_depth > 0 # we kind of just want the top-level keys. if consolidated_metadata is not None: for key in consolidated_metadata.metadata: obj = self._getitem_consolidated( self.store_path, key, prefix=self.name ) # Metadata -> Group/Array key = f"{prefix}/{key}".lstrip("/") yield key, obj if do_recursion and isinstance(obj, AsyncGroup): if max_depth is None: new_depth = None else: new_depth = max_depth - 1 yield from obj._members_consolidated(new_depth, prefix=key) async def _members( self, max_depth: int | None ) -> AsyncGenerator[ tuple[str, AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata] | AsyncGroup], None ]: skip_keys: tuple[str, ...] if self.metadata.zarr_format == 2: skip_keys = (".zattrs", ".zgroup", ".zarray", ".zmetadata") elif self.metadata.zarr_format == 3: skip_keys = ("zarr.json",) else: raise ValueError(f"Unknown Zarr format: {self.metadata.zarr_format}") if self.metadata.consolidated_metadata is not None: members = self._members_consolidated(max_depth=max_depth) for member in members: yield member return if not self.store_path.store.supports_listing: msg = ( f"The store associated with this group ({type(self.store_path.store)}) " "does not support listing, " "specifically via the `list_dir` method. " "This function requires a store that supports listing." ) raise ValueError(msg) # enforce a concurrency limit by passing a semaphore to all the recursive functions semaphore = asyncio.Semaphore(config.get("async.concurrency")) async for member in _iter_members_deep( self, max_depth=max_depth, skip_keys=skip_keys, semaphore=semaphore ): yield member async def create_hierarchy( self, nodes: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata], *, overwrite: bool = False, ) -> AsyncIterator[ tuple[str, AsyncGroup | AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]] ]: """ Create a hierarchy of arrays or groups rooted at this group. This function will parse its input to ensure that the hierarchy is complete. Any implicit groups will be inserted as needed. For example, an input like ```{'a/b': GroupMetadata}``` will be parsed to ```{'': GroupMetadata, 'a': GroupMetadata, 'b': Groupmetadata}```. Explicitly specifying a root group, e.g. with ``nodes = {'': GroupMetadata()}`` is an error because this group instance is the root group. After input parsing, this function then creates all the nodes in the hierarchy concurrently. Arrays and Groups are yielded in the order they are created. This order is not stable and should not be relied on. Parameters ---------- nodes : dict[str, GroupMetadata | ArrayV3Metadata | ArrayV2Metadata] A dictionary defining the hierarchy. The keys are the paths of the nodes in the hierarchy, relative to the path of the group. The values are instances of ``GroupMetadata`` or ``ArrayMetadata``. Note that all values must have the same ``zarr_format`` as the parent group -- it is an error to mix zarr versions in the same hierarchy. Leading "/" characters from keys will be removed. overwrite : bool Whether to overwrite existing nodes. Defaults to ``False``, in which case an error is raised instead of overwriting an existing array or group. This function will not erase an existing group unless that group is explicitly named in ``nodes``. If ``nodes`` defines implicit groups, e.g. ``{`'a/b/c': GroupMetadata}``, and a group already exists at path ``a``, then this function will leave the group at ``a`` as-is. Yields ------- tuple[str, AsyncArray | AsyncGroup]. """ # check that all the nodes have the same zarr_format as Self prefix = self.path nodes_parsed = {} for key, value in nodes.items(): if value.zarr_format != self.metadata.zarr_format: msg = ( "The zarr_format of the nodes must be the same as the parent group. " f"The node at {key} has zarr_format {value.zarr_format}, but the parent group" f" has zarr_format {self.metadata.zarr_format}." ) raise ValueError(msg) if normalize_path(key) == "": msg = ( "The input defines a root node, but a root node already exists, namely this Group instance." "It is an error to use this method to create a root node. " "Remove the root node from the input dict, or use a function like " "create_rooted_hierarchy to create a rooted hierarchy." ) raise ValueError(msg) else: nodes_parsed[_join_paths([prefix, key])] = value async for key, node in create_hierarchy( store=self.store, nodes=nodes_parsed, overwrite=overwrite, ): if prefix == "": out_key = key else: out_key = key.removeprefix(prefix + "/") yield out_key, node async def keys(self) -> AsyncGenerator[str, None]: """Iterate over member names.""" async for key, _ in self.members(): yield key async def contains(self, member: str) -> bool: """Check if a member exists in the group. Parameters ---------- member : str Member name. Returns ------- bool """ # TODO: this can be made more efficient. try: await self.getitem(member) except KeyError: return False else: return True async def groups(self) -> AsyncGenerator[tuple[str, AsyncGroup], None]: """Iterate over subgroups.""" async for name, value in self.members(): if isinstance(value, AsyncGroup): yield name, value async def group_keys(self) -> AsyncGenerator[str, None]: """Iterate over group names.""" async for key, _ in self.groups(): yield key async def group_values(self) -> AsyncGenerator[AsyncGroup, None]: """Iterate over group values.""" async for _, group in self.groups(): yield group async def arrays( self, ) -> AsyncGenerator[ tuple[str, AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]], None ]: """Iterate over arrays.""" async for key, value in self.members(): if isinstance(value, AsyncArray): yield key, value async def array_keys(self) -> AsyncGenerator[str, None]: """Iterate over array names.""" async for key, _ in self.arrays(): yield key async def array_values( self, ) -> AsyncGenerator[AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata], None]: """Iterate over array values.""" async for _, array in self.arrays(): yield array async def tree(self, expand: bool | None = None, level: int | None = None) -> Any: """ Return a tree-like representation of a hierarchy. This requires the optional ``rich`` dependency. Parameters ---------- expand : bool, optional This keyword is not yet supported. A NotImplementedError is raised if it's used. level : int, optional The maximum depth below this Group to display in the tree. Returns ------- TreeRepr A pretty-printable object displaying the hierarchy. """ from zarr.core._tree import group_tree_async if expand is not None: raise NotImplementedError("'expand' is not yet implemented.") return await group_tree_async(self, max_depth=level) async def empty( self, *, name: str, shape: ChunkCoords, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an empty array with the specified shape in this Group. The contents will be filled with the array's fill value or zeros if no fill value is provided. Parameters ---------- name : str Name of the array. shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Notes ----- The contents of an empty Zarr array are not defined. On attempting to retrieve data from an empty Zarr array, any values may be returned, and these are not guaranteed to be stable from one access to the next. """ return await async_api.empty(shape=shape, store=self.store_path, path=name, **kwargs) async def zeros( self, *, name: str, shape: ChunkCoords, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array, with zero being used as the default value for uninitialized portions of the array. Parameters ---------- name : str Name of the array. shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- AsyncArray The new array. """ return await async_api.zeros(shape=shape, store=self.store_path, path=name, **kwargs) async def ones( self, *, name: str, shape: ChunkCoords, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array, with one being used as the default value for uninitialized portions of the array. Parameters ---------- name : str Name of the array. shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- AsyncArray The new array. """ return await async_api.ones(shape=shape, store=self.store_path, path=name, **kwargs) async def full( self, *, name: str, shape: ChunkCoords, fill_value: Any | None, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an array, with "fill_value" being used as the default value for uninitialized portions of the array. Parameters ---------- name : str Name of the array. shape : int or tuple of int Shape of the empty array. fill_value : scalar Value to fill the array with. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- AsyncArray The new array. """ return await async_api.full( shape=shape, fill_value=fill_value, store=self.store_path, path=name, **kwargs, ) async def empty_like( self, *, name: str, data: async_api.ArrayLike, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create an empty sub-array like `data`. The contents will be filled with the array's fill value or zeros if no fill value is provided. Parameters ---------- name : str Name of the array. data : array-like The array to create an empty array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- AsyncArray The new array. """ return await async_api.empty_like(a=data, store=self.store_path, path=name, **kwargs) async def zeros_like( self, *, name: str, data: async_api.ArrayLike, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create a sub-array of zeros like `data`. Parameters ---------- name : str Name of the array. data : array-like The array to create the new array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- AsyncArray The new array. """ return await async_api.zeros_like(a=data, store=self.store_path, path=name, **kwargs) async def ones_like( self, *, name: str, data: async_api.ArrayLike, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create a sub-array of ones like `data`. Parameters ---------- name : str Name of the array. data : array-like The array to create the new array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- AsyncArray The new array. """ return await async_api.ones_like(a=data, store=self.store_path, path=name, **kwargs) async def full_like( self, *, name: str, data: async_api.ArrayLike, **kwargs: Any ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """Create a sub-array like `data` filled with the `fill_value` of `data` . Parameters ---------- name : str Name of the array. data : array-like The array to create the new array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- AsyncArray The new array. """ return await async_api.full_like(a=data, store=self.store_path, path=name, **kwargs) async def move(self, source: str, dest: str) -> None: """Move a sub-group or sub-array from one path to another. Notes ----- Not implemented """ raise NotImplementedError @dataclass(frozen=True) class Group(SyncMixin): _async_group: AsyncGroup @classmethod def from_store( cls, store: StoreLike, *, attributes: dict[str, Any] | None = None, zarr_format: ZarrFormat = 3, overwrite: bool = False, ) -> Group: """Instantiate a group from an initialized store. Parameters ---------- store : StoreLike StoreLike containing the Group. attributes : dict, optional A dictionary of JSON-serializable values with user-defined attributes. zarr_format : {2, 3}, optional Zarr storage format version. overwrite : bool, optional If True, do not raise an error if the group already exists. Returns ------- Group Group instantiated from the store. Raises ------ ContainsArrayError, ContainsGroupError, ContainsArrayAndGroupError """ attributes = attributes or {} obj = sync( AsyncGroup.from_store( store, attributes=attributes, overwrite=overwrite, zarr_format=zarr_format, ), ) return cls(obj) @classmethod def open( cls, store: StoreLike, zarr_format: ZarrFormat | None = 3, ) -> Group: """Open a group from an initialized store. Parameters ---------- store : StoreLike Store containing the Group. zarr_format : {2, 3, None}, optional Zarr storage format version. Returns ------- Group Group instantiated from the store. """ obj = sync(AsyncGroup.open(store, zarr_format=zarr_format)) return cls(obj) def __getitem__(self, path: str) -> Array | Group: """Obtain a group member. Parameters ---------- path : str Group member name. Returns ------- Array | Group Group member (Array or Group) at the specified key Examples -------- >>> import zarr >>> group = Group.from_store(zarr.storage.MemoryStore() >>> group.create_array(name="subarray", shape=(10,), chunks=(10,)) >>> group.create_group(name="subgroup").create_array(name="subarray", shape=(10,), chunks=(10,)) >>> group["subarray"] >>> group["subgroup"] >>> group["subgroup"]["subarray"] """ obj = self._sync(self._async_group.getitem(path)) if isinstance(obj, AsyncArray): return Array(obj) else: return Group(obj) def get(self, path: str, default: DefaultT | None = None) -> Array | Group | DefaultT | None: """Obtain a group member, returning default if not found. Parameters ---------- path : str Group member name. default : object Default value to return if key is not found (default: None). Returns ------- object Group member (Array or Group) or default if not found. Examples -------- >>> import zarr >>> group = Group.from_store(zarr.storage.MemoryStore() >>> group.create_array(name="subarray", shape=(10,), chunks=(10,)) >>> group.create_group(name="subgroup") >>> group.get("subarray") >>> group.get("subgroup") >>> group.get("nonexistent", None) """ try: return self[path] except KeyError: return default def __delitem__(self, key: str) -> None: """Delete a group member. Parameters ---------- key : str Group member name. Examples -------- >>> import zarr >>> group = Group.from_store(zarr.storage.MemoryStore() >>> group.create_array(name="subarray", shape=(10,), chunks=(10,)) >>> del group["subarray"] >>> "subarray" in group False """ self._sync(self._async_group.delitem(key)) def __iter__(self) -> Iterator[str]: """Return an iterator over group member names. Examples -------- >>> import zarr >>> g1 = zarr.group() >>> g2 = g1.create_group('foo') >>> g3 = g1.create_group('bar') >>> d1 = g1.create_array('baz', shape=(10,), chunks=(10,)) >>> d2 = g1.create_array('quux', shape=(10,), chunks=(10,)) >>> for name in g1: ... print(name) baz bar foo quux """ yield from self.keys() def __len__(self) -> int: """Number of members.""" return self.nmembers() def __setitem__(self, key: str, value: Any) -> None: """Fastpath for creating a new array. New arrays will be created using default settings for the array type. If you need to create an array with custom settings, use the `create_array` method. Parameters ---------- key : str Array name. value : Any Array data. Examples -------- >>> import zarr >>> group = zarr.group() >>> group["foo"] = zarr.zeros((10,)) >>> group["foo"] """ self._sync(self._async_group.setitem(key, value)) def __repr__(self) -> str: return f"" async def update_attributes_async(self, new_attributes: dict[str, Any]) -> Group: """Update the attributes of this group. Examples -------- >>> import zarr >>> group = zarr.group() >>> await group.update_attributes_async({"foo": "bar"}) >>> group.attrs.asdict() {'foo': 'bar'} """ new_metadata = replace(self.metadata, attributes=new_attributes) # Write new metadata to_save = new_metadata.to_buffer_dict(default_buffer_prototype()) awaitables = [set_or_delete(self.store_path / key, value) for key, value in to_save.items()] await asyncio.gather(*awaitables) async_group = replace(self._async_group, metadata=new_metadata) return replace(self, _async_group=async_group) @property def store_path(self) -> StorePath: """Path-like interface for the Store.""" return self._async_group.store_path @property def metadata(self) -> GroupMetadata: """Group metadata.""" return self._async_group.metadata @property def path(self) -> str: """Storage path.""" return self._async_group.path @property def name(self) -> str: """Group name following h5py convention.""" return self._async_group.name @property def basename(self) -> str: """Final component of name.""" return self._async_group.basename @property def attrs(self) -> Attributes: """Attributes of this Group""" return Attributes(self) @property def info(self) -> Any: """ Return the statically known information for a group. Returns ------- GroupInfo See Also -------- Group.info_complete All information about a group, including dynamic information like the children members. """ return self._async_group.info def info_complete(self) -> Any: """ Return information for a group. If this group doesn't contain consolidated metadata then this will need to read from the backing Store. Returns ------- GroupInfo See Also -------- Group.info """ return self._sync(self._async_group.info_complete()) @property def store(self) -> Store: # Backwards compatibility for 2.x return self._async_group.store @property def read_only(self) -> bool: # Backwards compatibility for 2.x return self._async_group.read_only @property def synchronizer(self) -> None: # Backwards compatibility for 2.x # Not implemented in 3.x yet. return self._async_group.synchronizer def update_attributes(self, new_attributes: dict[str, Any]) -> Group: """Update the attributes of this group. Examples -------- >>> import zarr >>> group = zarr.group() >>> group.update_attributes({"foo": "bar"}) >>> group.attrs.asdict() {'foo': 'bar'} """ self._sync(self._async_group.update_attributes(new_attributes)) return self def nmembers(self, max_depth: int | None = 0) -> int: """Count the number of members in this group. Parameters ---------- max_depth : int, default 0 The maximum number of levels of the hierarchy to include. By default, (``max_depth=0``) only immediate children are included. Set ``max_depth=None`` to include all nodes, and some positive integer to consider children within that many levels of the root Group. Returns ------- count : int """ return self._sync(self._async_group.nmembers(max_depth=max_depth)) def members(self, max_depth: int | None = 0) -> tuple[tuple[str, Array | Group], ...]: """ Return the sub-arrays and sub-groups of this group as a tuple of (name, array | group) pairs """ _members = self._sync_iter(self._async_group.members(max_depth=max_depth)) return tuple((kv[0], _parse_async_node(kv[1])) for kv in _members) def create_hierarchy( self, nodes: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata], *, overwrite: bool = False, ) -> Iterator[tuple[str, Group | Array]]: """ Create a hierarchy of arrays or groups rooted at this group. This function will parse its input to ensure that the hierarchy is complete. Any implicit groups will be inserted as needed. For example, an input like ```{'a/b': GroupMetadata}``` will be parsed to ```{'': GroupMetadata, 'a': GroupMetadata, 'b': Groupmetadata}```. Explicitly specifying a root group, e.g. with ``nodes = {'': GroupMetadata()}`` is an error because this group instance is the root group. After input parsing, this function then creates all the nodes in the hierarchy concurrently. Arrays and Groups are yielded in the order they are created. This order is not stable and should not be relied on. Parameters ---------- nodes : dict[str, GroupMetadata | ArrayV3Metadata | ArrayV2Metadata] A dictionary defining the hierarchy. The keys are the paths of the nodes in the hierarchy, relative to the path of the group. The values are instances of ``GroupMetadata`` or ``ArrayMetadata``. Note that all values must have the same ``zarr_format`` as the parent group -- it is an error to mix zarr versions in the same hierarchy. Leading "/" characters from keys will be removed. overwrite : bool Whether to overwrite existing nodes. Defaults to ``False``, in which case an error is raised instead of overwriting an existing array or group. This function will not erase an existing group unless that group is explicitly named in ``nodes``. If ``nodes`` defines implicit groups, e.g. ``{`'a/b/c': GroupMetadata}``, and a group already exists at path ``a``, then this function will leave the group at ``a`` as-is. Yields ------- tuple[str, Array | Group]. Examples -------- >>> import zarr >>> from zarr.core.group import GroupMetadata >>> root = zarr.create_group(store={}) >>> for key, val in root.create_hierarchy({'a/b/c': GroupMetadata()}): ... print(key, val) ... """ for key, node in self._sync_iter( self._async_group.create_hierarchy(nodes, overwrite=overwrite) ): yield (key, _parse_async_node(node)) def keys(self) -> Generator[str, None]: """Return an iterator over group member names. Examples -------- >>> import zarr >>> g1 = zarr.group() >>> g2 = g1.create_group('foo') >>> g3 = g1.create_group('bar') >>> d1 = g1.create_array('baz', shape=(10,), chunks=(10,)) >>> d2 = g1.create_array('quux', shape=(10,), chunks=(10,)) >>> for name in g1.keys(): ... print(name) baz bar foo quux """ yield from self._sync_iter(self._async_group.keys()) def __contains__(self, member: str) -> bool: """Test for group membership. Examples -------- >>> import zarr >>> g1 = zarr.group() >>> g2 = g1.create_group('foo') >>> d1 = g1.create_array('bar', shape=(10,), chunks=(10,)) >>> 'foo' in g1 True >>> 'bar' in g1 True >>> 'baz' in g1 False """ return self._sync(self._async_group.contains(member)) def groups(self) -> Generator[tuple[str, Group], None]: """Return the sub-groups of this group as a generator of (name, group) pairs. Examples -------- >>> import zarr >>> group = zarr.group() >>> group.create_group("subgroup") >>> for name, subgroup in group.groups(): ... print(name, subgroup) subgroup """ for name, async_group in self._sync_iter(self._async_group.groups()): yield name, Group(async_group) def group_keys(self) -> Generator[str, None]: """Return an iterator over group member names. Examples -------- >>> import zarr >>> group = zarr.group() >>> group.create_group("subgroup") >>> for name in group.group_keys(): ... print(name) subgroup """ for name, _ in self.groups(): yield name def group_values(self) -> Generator[Group, None]: """Return an iterator over group members. Examples -------- >>> import zarr >>> group = zarr.group() >>> group.create_group("subgroup") >>> for subgroup in group.group_values(): ... print(subgroup) """ for _, group in self.groups(): yield group def arrays(self) -> Generator[tuple[str, Array], None]: """Return the sub-arrays of this group as a generator of (name, array) pairs Examples -------- >>> import zarr >>> group = zarr.group() >>> group.create_array("subarray", shape=(10,), chunks=(10,)) >>> for name, subarray in group.arrays(): ... print(name, subarray) subarray """ for name, async_array in self._sync_iter(self._async_group.arrays()): yield name, Array(async_array) def array_keys(self) -> Generator[str, None]: """Return an iterator over group member names. Examples -------- >>> import zarr >>> group = zarr.group() >>> group.create_array("subarray", shape=(10,), chunks=(10,)) >>> for name in group.array_keys(): ... print(name) subarray """ for name, _ in self.arrays(): yield name def array_values(self) -> Generator[Array, None]: """Return an iterator over group members. Examples -------- >>> import zarr >>> group = zarr.group() >>> group.create_array("subarray", shape=(10,), chunks=(10,)) >>> for subarray in group.array_values(): ... print(subarray) """ for _, array in self.arrays(): yield array def tree(self, expand: bool | None = None, level: int | None = None) -> Any: """ Return a tree-like representation of a hierarchy. This requires the optional ``rich`` dependency. Parameters ---------- expand : bool, optional This keyword is not yet supported. A NotImplementedError is raised if it's used. level : int, optional The maximum depth below this Group to display in the tree. Returns ------- TreeRepr A pretty-printable object displaying the hierarchy. """ return self._sync(self._async_group.tree(expand=expand, level=level)) def create_group(self, name: str, **kwargs: Any) -> Group: """Create a sub-group. Parameters ---------- name : str Name of the new subgroup. Returns ------- Group Examples -------- >>> import zarr >>> group = zarr.group() >>> subgroup = group.create_group("subgroup") >>> subgroup """ return Group(self._sync(self._async_group.create_group(name, **kwargs))) def require_group(self, name: str, **kwargs: Any) -> Group: """Obtain a sub-group, creating one if it doesn't exist. Parameters ---------- name : str Group name. Returns ------- g : Group """ return Group(self._sync(self._async_group.require_group(name, **kwargs))) def require_groups(self, *names: str) -> tuple[Group, ...]: """Convenience method to require multiple groups in a single call. Parameters ---------- *names : str Group names. Returns ------- groups : tuple of Groups """ return tuple(map(Group, self._sync(self._async_group.require_groups(*names)))) def create(self, *args: Any, **kwargs: Any) -> Array: # Backwards compatibility for 2.x return self.create_array(*args, **kwargs) @_deprecate_positional_args def create_array( self, name: str, *, shape: ShapeLike, dtype: npt.DTypeLike, chunks: ChunkCoords | Literal["auto"] = "auto", shards: ShardsLike | None = None, filters: FiltersLike = "auto", compressors: CompressorsLike = "auto", compressor: CompressorLike = "auto", serializer: SerializerLike = "auto", fill_value: Any | None = 0, order: MemoryOrder | None = "C", attributes: dict[str, JSON] | None = None, chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None = None, dimension_names: Iterable[str] | None = None, storage_options: dict[str, Any] | None = None, overwrite: bool = False, config: ArrayConfig | ArrayConfigLike | None = None, ) -> Array: """Create an array within this group. This method lightly wraps :func:`zarr.core.array.create_array`. Parameters ---------- name : str The name of the array relative to the group. If ``path`` is ``None``, the array will be located at the root of the store. shape : ChunkCoords Shape of the array. dtype : npt.DTypeLike Data type of the array. chunks : ChunkCoords, optional Chunk shape of the array. If not specified, default are guessed based on the shape and dtype. shards : ChunkCoords, optional Shard shape of the array. The default value of ``None`` results in no sharding at all. filters : Iterable[Codec], optional Iterable of filters to apply to each chunk of the array, in order, before serializing that chunk to bytes. For Zarr format 3, a "filter" is a codec that takes an array and returns an array, and these values must be instances of ``ArrayArrayCodec``, or dict representations of ``ArrayArrayCodec``. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v3_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. For Zarr format 2, a "filter" can be any numcodecs codec; you should ensure that the the order if your filters is consistent with the behavior of each filter. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v2_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. compressors : Iterable[Codec], optional List of compressors to apply to the array. Compressors are applied in order, and after any filters are applied (if any are specified) and the data is serialized into bytes. For Zarr format 3, a "compressor" is a codec that takes a bytestream, and returns another bytestream. Multiple compressors my be provided for Zarr format 3. If no ``compressors`` are provided, a default set of compressors will be used. These defaults can be changed by modifying the value of ``array.v3_default_compressors`` in :mod:`zarr.core.config`. Use ``None`` to omit default compressors. For Zarr format 2, a "compressor" can be any numcodecs codec. Only a single compressor may be provided for Zarr format 2. If no ``compressor`` is provided, a default compressor will be used. in :mod:`zarr.core.config`. Use ``None`` to omit the default compressor. compressor : Codec, optional Deprecated in favor of ``compressors``. serializer : dict[str, JSON] | ArrayBytesCodec, optional Array-to-bytes codec to use for encoding the array data. Zarr format 3 only. Zarr format 2 arrays use implicit array-to-bytes conversion. If no ``serializer`` is provided, a default serializer will be used. These defaults can be changed by modifying the value of ``array.v3_default_serializer`` in :mod:`zarr.core.config`. fill_value : Any, optional Fill value for the array. order : {"C", "F"}, optional The memory of the array (default is "C"). For Zarr format 2, this parameter sets the memory order of the array. For Zarr format 3, this parameter is deprecated, because memory order is a runtime parameter for Zarr format 3 arrays. The recommended way to specify the memory order for Zarr format 3 arrays is via the ``config`` parameter, e.g. ``{'config': 'C'}``. If no ``order`` is provided, a default order will be used. This default can be changed by modifying the value of ``array.order`` in :mod:`zarr.core.config`. attributes : dict, optional Attributes for the array. chunk_key_encoding : ChunkKeyEncoding, optional A specification of how the chunk keys are represented in storage. For Zarr format 3, the default is ``{"name": "default", "separator": "/"}}``. For Zarr format 2, the default is ``{"name": "v2", "separator": "."}}``. dimension_names : Iterable[str], optional The names of the dimensions (default is None). Zarr format 3 only. Zarr format 2 arrays should not use this parameter. storage_options : dict, optional If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. overwrite : bool, default False Whether to overwrite an array with the same name in the store, if one exists. config : ArrayConfig or ArrayConfigLike, optional Runtime configuration for the array. Returns ------- AsyncArray """ compressors = _parse_deprecated_compressor( compressor, compressors, zarr_format=self.metadata.zarr_format ) return Array( self._sync( self._async_group.create_array( name=name, shape=shape, dtype=dtype, chunks=chunks, shards=shards, fill_value=fill_value, attributes=attributes, chunk_key_encoding=chunk_key_encoding, compressors=compressors, serializer=serializer, dimension_names=dimension_names, order=order, filters=filters, overwrite=overwrite, storage_options=storage_options, config=config, ) ) ) @deprecated("Use Group.create_array instead.") def create_dataset(self, name: str, **kwargs: Any) -> Array: """Create an array. .. deprecated:: 3.0.0 The h5py compatibility methods will be removed in 3.1.0. Use `Group.create_array` instead. Arrays are known as "datasets" in HDF5 terminology. For compatibility with h5py, Zarr groups also implement the :func:`zarr.Group.require_dataset` method. Parameters ---------- name : str Array name. **kwargs : dict Additional arguments passed to :func:`zarr.Group.create_array` Returns ------- a : Array """ return Array(self._sync(self._async_group.create_dataset(name, **kwargs))) @deprecated("Use Group.require_array instead.") def require_dataset(self, name: str, *, shape: ShapeLike, **kwargs: Any) -> Array: """Obtain an array, creating if it doesn't exist. .. deprecated:: 3.0.0 The h5py compatibility methods will be removed in 3.1.0. Use `Group.require_array` instead. Arrays are known as "datasets" in HDF5 terminology. For compatibility with h5py, Zarr groups also implement the :func:`zarr.Group.create_dataset` method. Other `kwargs` are as per :func:`zarr.Group.create_dataset`. Parameters ---------- name : str Array name. **kwargs : See :func:`zarr.Group.create_dataset`. Returns ------- a : Array """ return Array(self._sync(self._async_group.require_array(name, shape=shape, **kwargs))) def require_array(self, name: str, *, shape: ShapeLike, **kwargs: Any) -> Array: """Obtain an array, creating if it doesn't exist. Other `kwargs` are as per :func:`zarr.Group.create_array`. Parameters ---------- name : str Array name. **kwargs : See :func:`zarr.Group.create_array`. Returns ------- a : Array """ return Array(self._sync(self._async_group.require_array(name, shape=shape, **kwargs))) @_deprecate_positional_args def empty(self, *, name: str, shape: ChunkCoords, **kwargs: Any) -> Array: """Create an empty array with the specified shape in this Group. The contents will be filled with the array's fill value or zeros if no fill value is provided. Parameters ---------- name : str Name of the array. shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Notes ----- The contents of an empty Zarr array are not defined. On attempting to retrieve data from an empty Zarr array, any values may be returned, and these are not guaranteed to be stable from one access to the next. """ return Array(self._sync(self._async_group.empty(name=name, shape=shape, **kwargs))) @_deprecate_positional_args def zeros(self, *, name: str, shape: ChunkCoords, **kwargs: Any) -> Array: """Create an array, with zero being used as the default value for uninitialized portions of the array. Parameters ---------- name : str Name of the array. shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(self._sync(self._async_group.zeros(name=name, shape=shape, **kwargs))) @_deprecate_positional_args def ones(self, *, name: str, shape: ChunkCoords, **kwargs: Any) -> Array: """Create an array, with one being used as the default value for uninitialized portions of the array. Parameters ---------- name : str Name of the array. shape : int or tuple of int Shape of the empty array. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(self._sync(self._async_group.ones(name=name, shape=shape, **kwargs))) @_deprecate_positional_args def full( self, *, name: str, shape: ChunkCoords, fill_value: Any | None, **kwargs: Any ) -> Array: """Create an array, with "fill_value" being used as the default value for uninitialized portions of the array. Parameters ---------- name : str Name of the array. shape : int or tuple of int Shape of the empty array. fill_value : scalar Value to fill the array with. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array( self._sync( self._async_group.full(name=name, shape=shape, fill_value=fill_value, **kwargs) ) ) @_deprecate_positional_args def empty_like(self, *, name: str, data: async_api.ArrayLike, **kwargs: Any) -> Array: """Create an empty sub-array like `data`. The contents will be filled with the array's fill value or zeros if no fill value is provided. Parameters ---------- name : str Name of the array. data : array-like The array to create an empty array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. Notes ----- The contents of an empty Zarr array are not defined. On attempting to retrieve data from an empty Zarr array, any values may be returned, and these are not guaranteed to be stable from one access to the next. """ return Array(self._sync(self._async_group.empty_like(name=name, data=data, **kwargs))) @_deprecate_positional_args def zeros_like(self, *, name: str, data: async_api.ArrayLike, **kwargs: Any) -> Array: """Create a sub-array of zeros like `data`. Parameters ---------- name : str Name of the array. data : array-like The array to create the new array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(self._sync(self._async_group.zeros_like(name=name, data=data, **kwargs))) @_deprecate_positional_args def ones_like(self, *, name: str, data: async_api.ArrayLike, **kwargs: Any) -> Array: """Create a sub-array of ones like `data`. Parameters ---------- name : str Name of the array. data : array-like The array to create the new array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(self._sync(self._async_group.ones_like(name=name, data=data, **kwargs))) @_deprecate_positional_args def full_like(self, *, name: str, data: async_api.ArrayLike, **kwargs: Any) -> Array: """Create a sub-array like `data` filled with the `fill_value` of `data` . Parameters ---------- name : str Name of the array. data : array-like The array to create the new array like. **kwargs Keyword arguments passed to :func:`zarr.api.asynchronous.create`. Returns ------- Array The new array. """ return Array(self._sync(self._async_group.full_like(name=name, data=data, **kwargs))) def move(self, source: str, dest: str) -> None: """Move a sub-group or sub-array from one path to another. Notes ----- Not implemented """ return self._sync(self._async_group.move(source, dest)) @deprecated("Use Group.create_array instead.") @_deprecate_positional_args def array( self, name: str, *, shape: ShapeLike, dtype: npt.DTypeLike, chunks: ChunkCoords | Literal["auto"] = "auto", shards: ChunkCoords | Literal["auto"] | None = None, filters: FiltersLike = "auto", compressors: CompressorsLike = "auto", compressor: CompressorLike = None, serializer: SerializerLike = "auto", fill_value: Any | None = 0, order: MemoryOrder | None = "C", attributes: dict[str, JSON] | None = None, chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None = None, dimension_names: Iterable[str] | None = None, storage_options: dict[str, Any] | None = None, overwrite: bool = False, config: ArrayConfig | ArrayConfigLike | None = None, data: npt.ArrayLike | None = None, ) -> Array: """Create an array within this group. .. deprecated:: 3.0.0 Use `Group.create_array` instead. This method lightly wraps :func:`zarr.core.array.create_array`. Parameters ---------- name : str The name of the array relative to the group. If ``path`` is ``None``, the array will be located at the root of the store. shape : ChunkCoords Shape of the array. dtype : npt.DTypeLike Data type of the array. chunks : ChunkCoords, optional Chunk shape of the array. If not specified, default are guessed based on the shape and dtype. shards : ChunkCoords, optional Shard shape of the array. The default value of ``None`` results in no sharding at all. filters : Iterable[Codec], optional Iterable of filters to apply to each chunk of the array, in order, before serializing that chunk to bytes. For Zarr format 3, a "filter" is a codec that takes an array and returns an array, and these values must be instances of ``ArrayArrayCodec``, or dict representations of ``ArrayArrayCodec``. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v3_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. For Zarr format 2, a "filter" can be any numcodecs codec; you should ensure that the the order if your filters is consistent with the behavior of each filter. If no ``filters`` are provided, a default set of filters will be used. These defaults can be changed by modifying the value of ``array.v2_default_filters`` in :mod:`zarr.core.config`. Use ``None`` to omit default filters. compressors : Iterable[Codec], optional List of compressors to apply to the array. Compressors are applied in order, and after any filters are applied (if any are specified) and the data is serialized into bytes. For Zarr format 3, a "compressor" is a codec that takes a bytestream, and returns another bytestream. Multiple compressors my be provided for Zarr format 3. If no ``compressors`` are provided, a default set of compressors will be used. These defaults can be changed by modifying the value of ``array.v3_default_compressors`` in :mod:`zarr.core.config`. Use ``None`` to omit default compressors. For Zarr format 2, a "compressor" can be any numcodecs codec. Only a single compressor may be provided for Zarr format 2. If no ``compressor`` is provided, a default compressor will be used. in :mod:`zarr.core.config`. Use ``None`` to omit the default compressor. compressor : Codec, optional Deprecated in favor of ``compressors``. serializer : dict[str, JSON] | ArrayBytesCodec, optional Array-to-bytes codec to use for encoding the array data. Zarr format 3 only. Zarr format 2 arrays use implicit array-to-bytes conversion. If no ``serializer`` is provided, a default serializer will be used. These defaults can be changed by modifying the value of ``array.v3_default_serializer`` in :mod:`zarr.core.config`. fill_value : Any, optional Fill value for the array. order : {"C", "F"}, optional The memory of the array (default is "C"). For Zarr format 2, this parameter sets the memory order of the array. For Zarr format 3, this parameter is deprecated, because memory order is a runtime parameter for Zarr format 3 arrays. The recommended way to specify the memory order for Zarr format 3 arrays is via the ``config`` parameter, e.g. ``{'config': 'C'}``. If no ``order`` is provided, a default order will be used. This default can be changed by modifying the value of ``array.order`` in :mod:`zarr.core.config`. attributes : dict, optional Attributes for the array. chunk_key_encoding : ChunkKeyEncoding, optional A specification of how the chunk keys are represented in storage. For Zarr format 3, the default is ``{"name": "default", "separator": "/"}}``. For Zarr format 2, the default is ``{"name": "v2", "separator": "."}}``. dimension_names : Iterable[str], optional The names of the dimensions (default is None). Zarr format 3 only. Zarr format 2 arrays should not use this parameter. storage_options : dict, optional If using an fsspec URL to create the store, these will be passed to the backend implementation. Ignored otherwise. overwrite : bool, default False Whether to overwrite an array with the same name in the store, if one exists. config : ArrayConfig or ArrayConfigLike, optional Runtime configuration for the array. data : array_like The data to fill the array with. Returns ------- AsyncArray """ compressors = _parse_deprecated_compressor(compressor, compressors) return Array( self._sync( self._async_group.create_dataset( name=name, shape=shape, dtype=dtype, chunks=chunks, shards=shards, fill_value=fill_value, attributes=attributes, chunk_key_encoding=chunk_key_encoding, compressors=compressors, serializer=serializer, dimension_names=dimension_names, order=order, filters=filters, overwrite=overwrite, storage_options=storage_options, config=config, data=data, ) ) ) async def create_hierarchy( *, store: Store, nodes: dict[str, GroupMetadata | ArrayV2Metadata | ArrayV3Metadata], overwrite: bool = False, ) -> AsyncIterator[ tuple[str, AsyncGroup | AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]] ]: """ Create a complete zarr hierarchy from a collection of metadata objects. This function will parse its input to ensure that the hierarchy is complete. Any implicit groups will be inserted as needed. For example, an input like ```{'a/b': GroupMetadata}``` will be parsed to ```{'': GroupMetadata, 'a': GroupMetadata, 'b': Groupmetadata}``` After input parsing, this function then creates all the nodes in the hierarchy concurrently. Arrays and Groups are yielded in the order they are created. This order is not stable and should not be relied on. Parameters ---------- store : Store The storage backend to use. nodes : dict[str, GroupMetadata | ArrayV3Metadata | ArrayV2Metadata] A dictionary defining the hierarchy. The keys are the paths of the nodes in the hierarchy, relative to the root of the ``Store``. The root of the store can be specified with the empty string ``''``. The values are instances of ``GroupMetadata`` or ``ArrayMetadata``. Note that all values must have the same ``zarr_format`` -- it is an error to mix zarr versions in the same hierarchy. Leading "/" characters from keys will be removed. overwrite : bool Whether to overwrite existing nodes. Defaults to ``False``, in which case an error is raised instead of overwriting an existing array or group. This function will not erase an existing group unless that group is explicitly named in ``nodes``. If ``nodes`` defines implicit groups, e.g. ``{`'a/b/c': GroupMetadata}``, and a group already exists at path ``a``, then this function will leave the group at ``a`` as-is. Yields ------ tuple[str, AsyncGroup | AsyncArray] This function yields (path, node) pairs, in the order the nodes were created. Examples -------- >>> from zarr.api.asynchronous import create_hierarchy >>> from zarr.storage import MemoryStore >>> from zarr.core.group import GroupMetadata >>> import asyncio >>> store = MemoryStore() >>> nodes = {'a': GroupMetadata(attributes={'name': 'leaf'})} >>> async def run(): ... print(dict([x async for x in create_hierarchy(store=store, nodes=nodes)])) >>> asyncio.run(run()) # {'a': , '': } """ # normalize the keys to be valid paths nodes_normed_keys = _normalize_path_keys(nodes) # ensure that all nodes have the same zarr_format, and add implicit groups as needed nodes_parsed = _parse_hierarchy_dict(data=nodes_normed_keys) redundant_implicit_groups = [] # empty hierarchies should be a no-op if len(nodes_parsed) > 0: # figure out which zarr format we are using zarr_format = next(iter(nodes_parsed.values())).zarr_format # check which implicit groups will require materialization implicit_group_keys = tuple( filter(lambda k: isinstance(nodes_parsed[k], ImplicitGroupMarker), nodes_parsed) ) # read potential group metadata for each implicit group maybe_extant_group_coros = ( _read_group_metadata(store, k, zarr_format=zarr_format) for k in implicit_group_keys ) maybe_extant_groups = await asyncio.gather( *maybe_extant_group_coros, return_exceptions=True ) for key, value in zip(implicit_group_keys, maybe_extant_groups, strict=True): if isinstance(value, BaseException): if isinstance(value, FileNotFoundError): # this is fine -- there was no group there, so we will create one pass else: raise value else: # a loop exists already at ``key``, so we can avoid creating anything there redundant_implicit_groups.append(key) if overwrite: # we will remove any nodes that collide with arrays and non-implicit groups defined in # nodes # track the keys of nodes we need to delete to_delete_keys = [] to_delete_keys.extend( [k for k, v in nodes_parsed.items() if k not in implicit_group_keys] ) await asyncio.gather(*(store.delete_dir(key) for key in to_delete_keys)) else: # This type is long. coros: ( Generator[Coroutine[Any, Any, ArrayV2Metadata | GroupMetadata], None, None] | Generator[Coroutine[Any, Any, ArrayV3Metadata | GroupMetadata], None, None] ) if zarr_format == 2: coros = (_read_metadata_v2(store=store, path=key) for key in nodes_parsed) elif zarr_format == 3: coros = (_read_metadata_v3(store=store, path=key) for key in nodes_parsed) else: # pragma: no cover raise ValueError(f"Invalid zarr_format: {zarr_format}") # pragma: no cover extant_node_query = dict( zip( nodes_parsed.keys(), await asyncio.gather(*coros, return_exceptions=True), strict=False, ) ) # iterate over the existing arrays / groups and figure out which of them conflict # with the arrays / groups we want to create for key, extant_node in extant_node_query.items(): proposed_node = nodes_parsed[key] if isinstance(extant_node, BaseException): if isinstance(extant_node, FileNotFoundError): # ignore FileNotFoundError, because they represent nodes we can safely create pass else: # Any other exception is a real error raise extant_node else: # this is a node that already exists, but a node with the same key was specified # in nodes_parsed. if isinstance(extant_node, GroupMetadata): # a group already exists where we want to create a group if isinstance(proposed_node, ImplicitGroupMarker): # we have proposed an implicit group, which is OK -- we will just skip # creating this particular metadata document redundant_implicit_groups.append(key) else: # we have proposed an explicit group, which is an error, given that a # group already exists. raise ContainsGroupError(store, key) elif isinstance(extant_node, ArrayV2Metadata | ArrayV3Metadata): # we are trying to overwrite an existing array. this is an error. raise ContainsArrayError(store, key) nodes_explicit: dict[str, GroupMetadata | ArrayV2Metadata | ArrayV3Metadata] = {} for k, v in nodes_parsed.items(): if k not in redundant_implicit_groups: if isinstance(v, ImplicitGroupMarker): nodes_explicit[k] = GroupMetadata(zarr_format=v.zarr_format) else: nodes_explicit[k] = v async for key, node in create_nodes(store=store, nodes=nodes_explicit): yield key, node async def create_nodes( *, store: Store, nodes: dict[str, GroupMetadata | ArrayV2Metadata | ArrayV3Metadata], ) -> AsyncIterator[ tuple[str, AsyncGroup | AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]] ]: """Create a collection of arrays and / or groups concurrently. Note: no attempt is made to validate that these arrays and / or groups collectively form a valid Zarr hierarchy. It is the responsibility of the caller of this function to ensure that the ``nodes`` parameter satisfies any correctness constraints. Parameters ---------- store : Store The storage backend to use. nodes : dict[str, GroupMetadata | ArrayV3Metadata | ArrayV2Metadata] A dictionary defining the hierarchy. The keys are the paths of the nodes in the hierarchy, and the values are the metadata of the nodes. The metadata must be either an instance of GroupMetadata, ArrayV3Metadata or ArrayV2Metadata. Yields ------ AsyncGroup | AsyncArray The created nodes in the order they are created. """ # Note: the only way to alter this value is via the config. If that's undesirable for some reason, # then we should consider adding a keyword argument this this function semaphore = asyncio.Semaphore(config.get("async.concurrency")) create_tasks: list[Coroutine[None, None, str]] = [] for key, value in nodes.items(): # make the key absolute create_tasks.extend(_persist_metadata(store, key, value, semaphore=semaphore)) created_object_keys = [] for coro in asyncio.as_completed(create_tasks): created_key = await coro # we need this to track which metadata documents were written so that we can yield a # complete v2 Array / Group class after both .zattrs and the metadata JSON was created. created_object_keys.append(created_key) # get the node name from the object key if len(created_key.split("/")) == 1: # this is the root node meta_out = nodes[""] node_name = "" else: # turn "foo/" into "foo" node_name = created_key[: created_key.rfind("/")] meta_out = nodes[node_name] if meta_out.zarr_format == 3: yield node_name, _build_node(store=store, path=node_name, metadata=meta_out) else: # For zarr v2 # we only want to yield when both the metadata and attributes are created # so we track which keys have been created, and wait for both the meta key and # the attrs key to be created before yielding back the AsyncArray / AsyncGroup attrs_done = _join_paths([node_name, ZATTRS_JSON]) in created_object_keys if isinstance(meta_out, GroupMetadata): meta_done = _join_paths([node_name, ZGROUP_JSON]) in created_object_keys else: meta_done = _join_paths([node_name, ZARRAY_JSON]) in created_object_keys if meta_done and attrs_done: yield node_name, _build_node(store=store, path=node_name, metadata=meta_out) continue def _get_roots( data: Iterable[str], ) -> tuple[str, ...]: """ Return the keys of the root(s) of the hierarchy. A root is a key with the fewest number of path segments. """ if "" in data: return ("",) keys_split = sorted((key.split("/") for key in data), key=len) groups: defaultdict[int, list[str]] = defaultdict(list) for key_split in keys_split: groups[len(key_split)].append("/".join(key_split)) return tuple(groups[min(groups.keys())]) def _parse_hierarchy_dict( *, data: Mapping[str, ImplicitGroupMarker | GroupMetadata | ArrayV2Metadata | ArrayV3Metadata], ) -> dict[str, ImplicitGroupMarker | GroupMetadata | ArrayV2Metadata | ArrayV3Metadata]: """ Take an input with type Mapping[str, ArrayMetadata | GroupMetadata] and parse it into a dict of str: node pairs that models a valid, complete Zarr hierarchy. If the input represents a complete Zarr hierarchy, i.e. one with no implicit groups, then return a dict with the exact same data as the input. Otherwise, return a dict derived from the input with GroupMetadata inserted as needed to make the hierarchy complete. For example, an input of {'a/b': ArrayMetadata} is incomplete, because it references two groups (the root group '' and a group at 'a') that are not specified in the input. Applying this function to that input will result in a return value of {'': GroupMetadata, 'a': GroupMetadata, 'a/b': ArrayMetadata}, i.e. the implied groups were added. The input is also checked for the following conditions; an error is raised if any are violated: - No arrays can contain group or arrays (i.e., all arrays must be leaf nodes). - All arrays and groups must have the same ``zarr_format`` value. This function ensures that the input is transformed into a specification of a complete and valid Zarr hierarchy. """ # ensure that all nodes have the same zarr format data_purified = _ensure_consistent_zarr_format(data) # ensure that keys are normalized to zarr paths data_normed_keys = _normalize_path_keys(data_purified) # insert an implicit root group if a root was not specified # but not if an empty dict was provided, because any empty hierarchy has no nodes if len(data_normed_keys) > 0 and "" not in data_normed_keys: z_format = next(iter(data_normed_keys.values())).zarr_format data_normed_keys = data_normed_keys | {"": ImplicitGroupMarker(zarr_format=z_format)} out: dict[str, GroupMetadata | ArrayV2Metadata | ArrayV3Metadata] = {**data_normed_keys} for k, v in data_normed_keys.items(): key_split = k.split("/") # get every parent path *subpaths, _ = accumulate(key_split, lambda a, b: _join_paths([a, b])) for subpath in subpaths: # If a component is not already in the output dict, add ImplicitGroupMetadata if subpath not in out: out[subpath] = ImplicitGroupMarker(zarr_format=v.zarr_format) else: if not isinstance(out[subpath], GroupMetadata | ImplicitGroupMarker): msg = ( f"The node at {subpath} contains other nodes, but it is not a Zarr group. " "This is invalid. Only Zarr groups can contain other nodes." ) raise ValueError(msg) return out def _ensure_consistent_zarr_format( data: Mapping[str, GroupMetadata | ArrayV2Metadata | ArrayV3Metadata], ) -> Mapping[str, GroupMetadata | ArrayV2Metadata] | Mapping[str, GroupMetadata | ArrayV3Metadata]: """ Ensure that all values of the input dict have the same zarr format. If any do not, then a value error is raised. """ observed_zarr_formats: dict[ZarrFormat, list[str]] = {2: [], 3: []} for k, v in data.items(): observed_zarr_formats[v.zarr_format].append(k) if len(observed_zarr_formats[2]) > 0 and len(observed_zarr_formats[3]) > 0: msg = ( "Got data with both Zarr v2 and Zarr v3 nodes, which is invalid. " f"The following keys map to Zarr v2 nodes: {observed_zarr_formats.get(2)}. " f"The following keys map to Zarr v3 nodes: {observed_zarr_formats.get(3)}." "Ensure that all nodes have the same Zarr format." ) raise ValueError(msg) return cast( Mapping[str, GroupMetadata | ArrayV2Metadata] | Mapping[str, GroupMetadata | ArrayV3Metadata], data, ) async def _getitem_semaphore( node: AsyncGroup, key: str, semaphore: asyncio.Semaphore | None ) -> AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata] | AsyncGroup: """ Wrap Group.getitem with an optional semaphore. If the semaphore parameter is an asyncio.Semaphore instance, then the getitem operation is performed inside an async context manager provided by that semaphore. If the semaphore parameter is None, then getitem is invoked without a context manager. """ if semaphore is not None: async with semaphore: return await node.getitem(key) else: return await node.getitem(key) async def _iter_members( node: AsyncGroup, skip_keys: tuple[str, ...], semaphore: asyncio.Semaphore | None, ) -> AsyncGenerator[ tuple[str, AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata] | AsyncGroup], None ]: """ Iterate over the arrays and groups contained in a group. Parameters ---------- node : AsyncGroup The group to traverse. skip_keys : tuple[str, ...] A tuple of keys to skip when iterating over the possible members of the group. semaphore : asyncio.Semaphore | None An optional semaphore to use for concurrency control. Yields ------ tuple[str, AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata] | AsyncGroup] """ # retrieve keys from storage keys = [key async for key in node.store.list_dir(node.path)] keys_filtered = tuple(filter(lambda v: v not in skip_keys, keys)) node_tasks = tuple( asyncio.create_task(_getitem_semaphore(node, key, semaphore), name=key) for key in keys_filtered ) for fetched_node_coro in asyncio.as_completed(node_tasks): try: fetched_node = await fetched_node_coro except KeyError as e: # keyerror is raised when `key` names an object (in the object storage sense), # as opposed to a prefix, in the store under the prefix associated with this group # in which case `key` cannot be the name of a sub-array or sub-group. warnings.warn( f"Object at {e.args[0]} is not recognized as a component of a Zarr hierarchy.", UserWarning, stacklevel=1, ) continue match fetched_node: case AsyncArray() | AsyncGroup(): yield fetched_node.basename, fetched_node case _: raise ValueError(f"Unexpected type: {type(fetched_node)}") async def _iter_members_deep( group: AsyncGroup, *, max_depth: int | None, skip_keys: tuple[str, ...], semaphore: asyncio.Semaphore | None = None, ) -> AsyncGenerator[ tuple[str, AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata] | AsyncGroup], None ]: """ Iterate over the arrays and groups contained in a group, and optionally the arrays and groups contained in those groups. Parameters ---------- group : AsyncGroup The group to traverse. max_depth : int | None The maximum depth of recursion. skip_keys : tuple[str, ...] A tuple of keys to skip when iterating over the possible members of the group. semaphore : asyncio.Semaphore | None An optional semaphore to use for concurrency control. Yields ------ tuple[str, AsyncArray[ArrayV3Metadata] | AsyncArray[ArrayV2Metadata] | AsyncGroup] """ to_recurse = {} do_recursion = max_depth is None or max_depth > 0 if max_depth is None: new_depth = None else: new_depth = max_depth - 1 async for name, node in _iter_members(group, skip_keys=skip_keys, semaphore=semaphore): yield name, node if isinstance(node, AsyncGroup) and do_recursion: to_recurse[name] = _iter_members_deep( node, max_depth=new_depth, skip_keys=skip_keys, semaphore=semaphore ) for prefix, subgroup_iter in to_recurse.items(): async for name, node in subgroup_iter: key = f"{prefix}/{name}".lstrip("/") yield key, node async def _read_metadata_v3(store: Store, path: str) -> ArrayV3Metadata | GroupMetadata: """ Given a store_path, return ArrayV3Metadata or GroupMetadata defined by the metadata document stored at store_path.path / zarr.json. If no such document is found, raise a FileNotFoundError. """ zarr_json_bytes = await store.get( _join_paths([path, ZARR_JSON]), prototype=default_buffer_prototype() ) if zarr_json_bytes is None: raise FileNotFoundError(path) else: zarr_json = json.loads(zarr_json_bytes.to_bytes()) return _build_metadata_v3(zarr_json) async def _read_metadata_v2(store: Store, path: str) -> ArrayV2Metadata | GroupMetadata: """ Given a store_path, return ArrayV2Metadata or GroupMetadata defined by the metadata document stored at store_path.path / (.zgroup | .zarray). If no such document is found, raise a FileNotFoundError. """ # TODO: consider first fetching array metadata, and only fetching group metadata when we don't # find an array zarray_bytes, zgroup_bytes, zattrs_bytes = await asyncio.gather( store.get(_join_paths([path, ZARRAY_JSON]), prototype=default_buffer_prototype()), store.get(_join_paths([path, ZGROUP_JSON]), prototype=default_buffer_prototype()), store.get(_join_paths([path, ZATTRS_JSON]), prototype=default_buffer_prototype()), ) if zattrs_bytes is None: zattrs = {} else: zattrs = json.loads(zattrs_bytes.to_bytes()) # TODO: decide how to handle finding both array and group metadata. The spec does not seem to # consider this situation. A practical approach would be to ignore that combination, and only # return the array metadata. if zarray_bytes is not None: zmeta = json.loads(zarray_bytes.to_bytes()) else: if zgroup_bytes is None: # neither .zarray or .zgroup were found results in KeyError raise FileNotFoundError(path) else: zmeta = json.loads(zgroup_bytes.to_bytes()) return _build_metadata_v2(zmeta, zattrs) async def _read_group_metadata_v2(store: Store, path: str) -> GroupMetadata: """ Read group metadata or error """ meta = await _read_metadata_v2(store=store, path=path) if not isinstance(meta, GroupMetadata): raise FileNotFoundError(f"Group metadata was not found in {store} at {path}") return meta async def _read_group_metadata_v3(store: Store, path: str) -> GroupMetadata: """ Read group metadata or error """ meta = await _read_metadata_v3(store=store, path=path) if not isinstance(meta, GroupMetadata): raise FileNotFoundError(f"Group metadata was not found in {store} at {path}") return meta async def _read_group_metadata( store: Store, path: str, *, zarr_format: ZarrFormat ) -> GroupMetadata: if zarr_format == 2: return await _read_group_metadata_v2(store=store, path=path) return await _read_group_metadata_v3(store=store, path=path) def _build_metadata_v3(zarr_json: dict[str, JSON]) -> ArrayV3Metadata | GroupMetadata: """ Convert a dict representation of Zarr V3 metadata into the corresponding metadata class. """ if "node_type" not in zarr_json: raise MetadataValidationError("node_type", "array or group", "nothing (the key is missing)") match zarr_json: case {"node_type": "array"}: return ArrayV3Metadata.from_dict(zarr_json) case {"node_type": "group"}: return GroupMetadata.from_dict(zarr_json) case _: # pragma: no cover raise ValueError( "invalid value for `node_type` key in metadata document" ) # pragma: no cover def _build_metadata_v2( zarr_json: dict[str, object], attrs_json: dict[str, JSON] ) -> ArrayV2Metadata | GroupMetadata: """ Convert a dict representation of Zarr V2 metadata into the corresponding metadata class. """ match zarr_json: case {"shape": _}: return ArrayV2Metadata.from_dict(zarr_json | {"attributes": attrs_json}) case _: # pragma: no cover return GroupMetadata.from_dict(zarr_json | {"attributes": attrs_json}) @overload def _build_node( *, store: Store, path: str, metadata: ArrayV2Metadata ) -> AsyncArray[ArrayV2Metadata]: ... @overload def _build_node( *, store: Store, path: str, metadata: ArrayV3Metadata ) -> AsyncArray[ArrayV3Metadata]: ... @overload def _build_node(*, store: Store, path: str, metadata: GroupMetadata) -> AsyncGroup: ... def _build_node( *, store: Store, path: str, metadata: ArrayV3Metadata | ArrayV2Metadata | GroupMetadata ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | AsyncGroup: """ Take a metadata object and return a node (AsyncArray or AsyncGroup). """ store_path = StorePath(store=store, path=path) match metadata: case ArrayV2Metadata() | ArrayV3Metadata(): return AsyncArray(metadata, store_path=store_path) case GroupMetadata(): return AsyncGroup(metadata, store_path=store_path) case _: # pragma: no cover raise ValueError(f"Unexpected metadata type: {type(metadata)}") # pragma: no cover async def _get_node_v2(store: Store, path: str) -> AsyncArray[ArrayV2Metadata] | AsyncGroup: """ Read a Zarr v2 AsyncArray or AsyncGroup from a path in a Store. Parameters ---------- store : Store The store-like object to read from. path : str The path to the node to read. Returns ------- AsyncArray | AsyncGroup """ metadata = await _read_metadata_v2(store=store, path=path) return _build_node(store=store, path=path, metadata=metadata) async def _get_node_v3(store: Store, path: str) -> AsyncArray[ArrayV3Metadata] | AsyncGroup: """ Read a Zarr v3 AsyncArray or AsyncGroup from a path in a Store. Parameters ---------- store : Store The store-like object to read from. path : str The path to the node to read. Returns ------- AsyncArray | AsyncGroup """ metadata = await _read_metadata_v3(store=store, path=path) return _build_node(store=store, path=path, metadata=metadata) async def get_node( store: Store, path: str, zarr_format: ZarrFormat ) -> AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | AsyncGroup: """ Get an AsyncArray or AsyncGroup from a path in a Store. Parameters ---------- store : Store The store-like object to read from. path : str The path to the node to read. zarr_format : {2, 3} The zarr format of the node to read. Returns ------- AsyncArray | AsyncGroup """ match zarr_format: case 2: return await _get_node_v2(store=store, path=path) case 3: return await _get_node_v3(store=store, path=path) case _: # pragma: no cover raise ValueError(f"Unexpected zarr format: {zarr_format}") # pragma: no cover async def _set_return_key( *, store: Store, key: str, value: Buffer, semaphore: asyncio.Semaphore | None = None ) -> str: """ Write a value to storage at the given key. The key is returned. Useful when saving values via routines that return results in execution order, like asyncio.as_completed, because in this case we need to know which key was saved in order to yield the right object to the caller. Parameters ---------- store : Store The store to save the value to. key : str The key to save the value to. value : Buffer The value to save. semaphore : asyncio.Semaphore | None An optional semaphore to use to limit the number of concurrent writes. """ if semaphore is not None: async with semaphore: await store.set(key, value) else: await store.set(key, value) return key def _persist_metadata( store: Store, path: str, metadata: ArrayV2Metadata | ArrayV3Metadata | GroupMetadata, semaphore: asyncio.Semaphore | None = None, ) -> tuple[Coroutine[None, None, str], ...]: """ Prepare to save a metadata document to storage, returning a tuple of coroutines that must be awaited. """ to_save = metadata.to_buffer_dict(default_buffer_prototype()) return tuple( _set_return_key(store=store, key=_join_paths([path, key]), value=value, semaphore=semaphore) for key, value in to_save.items() ) async def create_rooted_hierarchy( *, store: Store, nodes: dict[str, GroupMetadata | ArrayV2Metadata | ArrayV3Metadata], overwrite: bool = False, ) -> AsyncGroup | AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata]: """ Create an ``AsyncGroup`` or ``AsyncArray`` from a store and a dict of metadata documents. This function ensures that its input contains a specification of a root node, calls ``create_hierarchy`` to create nodes, and returns the root node of the hierarchy. """ roots = _get_roots(nodes.keys()) if len(roots) != 1: msg = ( "The input does not specify a root node. " "This function can only create hierarchies that contain a root node, which is " "defined as a group that is ancestral to all the other arrays and " "groups in the hierarchy, or a single array." ) raise ValueError(msg) else: root_key = roots[0] nodes_created = [ x async for x in create_hierarchy(store=store, nodes=nodes, overwrite=overwrite) ] return dict(nodes_created)[root_key] zarr-python-3.0.6/src/zarr/core/indexing.py000066400000000000000000001434131476711733500207150ustar00rootroot00000000000000from __future__ import annotations import itertools import math import numbers import operator from collections.abc import Iterator, Sequence from dataclasses import dataclass from enum import Enum from functools import reduce from types import EllipsisType from typing import ( TYPE_CHECKING, Any, Literal, NamedTuple, Protocol, TypeAlias, TypeGuard, TypeVar, cast, runtime_checkable, ) import numpy as np import numpy.typing as npt from zarr.core.common import product if TYPE_CHECKING: from zarr.core.array import Array from zarr.core.buffer import NDArrayLike from zarr.core.chunk_grids import ChunkGrid from zarr.core.common import ChunkCoords IntSequence = list[int] | npt.NDArray[np.intp] ArrayOfIntOrBool = npt.NDArray[np.intp] | npt.NDArray[np.bool_] BasicSelector = int | slice | EllipsisType Selector = BasicSelector | ArrayOfIntOrBool BasicSelection = BasicSelector | tuple[BasicSelector, ...] # also used for BlockIndex CoordinateSelection = IntSequence | tuple[IntSequence, ...] MaskSelection = npt.NDArray[np.bool_] OrthogonalSelection = Selector | tuple[Selector, ...] Selection = BasicSelection | CoordinateSelection | MaskSelection | OrthogonalSelection CoordinateSelectionNormalized = tuple[npt.NDArray[np.intp], ...] SelectionNormalized = tuple[Selector, ...] | ArrayOfIntOrBool SelectionWithFields = Selection | str | Sequence[str] SelectorTuple = tuple[Selector, ...] | npt.NDArray[np.intp] | slice Fields = str | list[str] | tuple[str, ...] class ArrayIndexError(IndexError): pass class BoundsCheckError(IndexError): _msg = "" def __init__(self, dim_len: int) -> None: self._msg = f"index out of bounds for dimension with length {dim_len}" class NegativeStepError(IndexError): _msg = "only slices with step >= 1 are supported" class VindexInvalidSelectionError(IndexError): _msg = ( "unsupported selection type for vectorized indexing; only " "coordinate selection (tuple of integer arrays) and mask selection " "(single Boolean array) are supported; got {!r}" ) def err_too_many_indices(selection: Any, shape: ChunkCoords) -> None: raise IndexError(f"too many indices for array; expected {len(shape)}, got {len(selection)}") def _zarr_array_to_int_or_bool_array(arr: Array) -> npt.NDArray[np.intp] | npt.NDArray[np.bool_]: if arr.dtype.kind in ("i", "b"): return np.asarray(arr) else: raise IndexError( f"Invalid array dtype: {arr.dtype}. Arrays used as indices must be of integer or boolean type" ) @runtime_checkable class Indexer(Protocol): shape: ChunkCoords drop_axes: ChunkCoords def __iter__(self) -> Iterator[ChunkProjection]: ... def ceildiv(a: float, b: float) -> int: if a == 0: return 0 return math.ceil(a / b) _ArrayIndexingOrder: TypeAlias = Literal["lexicographic"] def _iter_grid( grid_shape: Sequence[int], *, origin: Sequence[int] | None = None, selection_shape: Sequence[int] | None = None, order: _ArrayIndexingOrder = "lexicographic", ) -> Iterator[ChunkCoords]: """ Iterate over the elements of grid of integers, with the option to restrict the domain of iteration to a contiguous subregion of that grid. Parameters ---------- grid_shape : Sequence[int] The size of the domain to iterate over. origin : Sequence[int] | None, default=None The first coordinate of the domain to return. selection_shape : Sequence[int] | None, default=None The shape of the selection. order : Literal["lexicographic"], default="lexicographic" The linear indexing order to use. Returns ------- itertools.product object An iterator over tuples of integers Examples -------- >>> tuple(iter_grid((1,))) ((0,),) >>> tuple(iter_grid((2,3))) ((0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)) >>> tuple(iter_grid((2,3)), origin=(1,1)) ((1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3)) >>> tuple(iter_grid((2,3)), origin=(1,1), selection_shape=(2,2)) ((1, 1), (1, 2), (1, 3), (2, 1)) """ if origin is None: origin_parsed = (0,) * len(grid_shape) else: if len(origin) != len(grid_shape): msg = ( "Shape and origin parameters must have the same length." f"Got {len(grid_shape)} elements in shape, but {len(origin)} elements in origin." ) raise ValueError(msg) origin_parsed = tuple(origin) if selection_shape is None: selection_shape_parsed = tuple( g - o for o, g in zip(origin_parsed, grid_shape, strict=True) ) else: selection_shape_parsed = tuple(selection_shape) if order == "lexicographic": dimensions: tuple[range, ...] = () for idx, (o, gs, ss) in enumerate( zip(origin_parsed, grid_shape, selection_shape_parsed, strict=True) ): if o + ss > gs: raise IndexError( f"Invalid selection shape ({selection_shape}) for origin ({origin}) and grid shape ({grid_shape}) at axis {idx}." ) dimensions += (range(o, o + ss),) yield from itertools.product(*(dimensions)) else: msg = f"Indexing order {order} is not supported at this time." # type: ignore[unreachable] raise NotImplementedError(msg) def is_integer(x: Any) -> TypeGuard[int]: """True if x is an integer (both pure Python or NumPy).""" return isinstance(x, numbers.Integral) and not is_bool(x) def is_bool(x: Any) -> TypeGuard[bool | np.bool_]: """True if x is a boolean (both pure Python or NumPy).""" return type(x) in [bool, np.bool_] def is_integer_list(x: Any) -> TypeGuard[list[int]]: """True if x is a list of integers.""" return isinstance(x, list) and len(x) > 0 and all(is_integer(i) for i in x) def is_bool_list(x: Any) -> TypeGuard[list[bool | np.bool_]]: """True if x is a list of boolean.""" return isinstance(x, list) and len(x) > 0 and all(is_bool(i) for i in x) def is_integer_array(x: Any, ndim: int | None = None) -> TypeGuard[npt.NDArray[np.intp]]: t = not np.isscalar(x) and hasattr(x, "shape") and hasattr(x, "dtype") and x.dtype.kind in "ui" if ndim is not None: t = t and hasattr(x, "shape") and len(x.shape) == ndim return t def is_bool_array(x: Any, ndim: int | None = None) -> TypeGuard[npt.NDArray[np.bool_]]: t = hasattr(x, "shape") and hasattr(x, "dtype") and x.dtype == bool if ndim is not None: t = t and hasattr(x, "shape") and len(x.shape) == ndim return t def is_int_or_bool_iterable(x: Any) -> bool: return is_integer_list(x) or is_integer_array(x) or is_bool_array(x) or is_bool_list(x) def is_scalar(value: Any, dtype: np.dtype[Any]) -> bool: if np.isscalar(value): return True if hasattr(value, "shape") and value.shape == (): return True return isinstance(value, tuple) and dtype.names is not None and len(value) == len(dtype.names) def is_pure_fancy_indexing(selection: Any, ndim: int) -> bool: """Check whether a selection contains only scalars or integer/bool array-likes. Parameters ---------- selection : tuple, slice, or scalar A valid selection value for indexing into arrays. Returns ------- is_pure : bool True if the selection is a pure fancy indexing expression (ie not mixed with boolean or slices). """ if is_bool_array(selection): # is mask selection return True if ndim == 1 and ( is_integer_list(selection) or is_integer_array(selection) or is_bool_list(selection) ): return True # if not, we go through the normal path below, because a 1-tuple # of integers is also allowed. no_slicing = ( isinstance(selection, tuple) and len(selection) == ndim and not (any(isinstance(elem, slice) or elem is Ellipsis for elem in selection)) ) return ( no_slicing and all( is_integer(elem) or is_integer_list(elem) or is_integer_array(elem) for elem in selection ) and any(is_integer_list(elem) or is_integer_array(elem) for elem in selection) ) def is_pure_orthogonal_indexing(selection: Selection, ndim: int) -> TypeGuard[OrthogonalSelection]: if not ndim: return False selection_normalized = (selection,) if not isinstance(selection, tuple) else selection # Case 1: Selection contains of iterable of integers or boolean if len(selection_normalized) == ndim and all( is_int_or_bool_iterable(s) for s in selection_normalized ): return True # Case 2: selection contains either zero or one integer iterables. # All other selection elements are slices or integers return ( len(selection_normalized) <= ndim and sum(is_int_or_bool_iterable(s) for s in selection_normalized) <= 1 and all( is_int_or_bool_iterable(s) or isinstance(s, int | slice) for s in selection_normalized ) ) def get_chunk_shape(chunk_grid: ChunkGrid) -> ChunkCoords: from zarr.core.chunk_grids import RegularChunkGrid assert isinstance(chunk_grid, RegularChunkGrid), ( "Only regular chunk grid is supported, currently." ) return chunk_grid.chunk_shape def normalize_integer_selection(dim_sel: int, dim_len: int) -> int: # normalize type to int dim_sel = int(dim_sel) # handle wraparound if dim_sel < 0: dim_sel = dim_len + dim_sel # handle out of bounds if dim_sel >= dim_len or dim_sel < 0: raise BoundsCheckError(dim_len) return dim_sel class ChunkDimProjection(NamedTuple): """A mapping from chunk to output array for a single dimension. Attributes ---------- dim_chunk_ix Index of chunk. dim_chunk_sel Selection of items from chunk array. dim_out_sel Selection of items in target (output) array. """ dim_chunk_ix: int dim_chunk_sel: Selector dim_out_sel: Selector | None is_complete_chunk: bool @dataclass(frozen=True) class IntDimIndexer: dim_sel: int dim_len: int dim_chunk_len: int nitems: int = 1 def __init__(self, dim_sel: int, dim_len: int, dim_chunk_len: int) -> None: object.__setattr__(self, "dim_sel", normalize_integer_selection(dim_sel, dim_len)) object.__setattr__(self, "dim_len", dim_len) object.__setattr__(self, "dim_chunk_len", dim_chunk_len) def __iter__(self) -> Iterator[ChunkDimProjection]: dim_chunk_ix = self.dim_sel // self.dim_chunk_len dim_offset = dim_chunk_ix * self.dim_chunk_len dim_chunk_sel = self.dim_sel - dim_offset dim_out_sel = None is_complete_chunk = self.dim_chunk_len == 1 yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel, is_complete_chunk) @dataclass(frozen=True) class SliceDimIndexer: dim_len: int dim_chunk_len: int nitems: int nchunks: int start: int stop: int step: int def __init__(self, dim_sel: slice, dim_len: int, dim_chunk_len: int) -> None: # normalize start, stop, step = dim_sel.indices(dim_len) if step < 1: raise NegativeStepError object.__setattr__(self, "start", start) object.__setattr__(self, "stop", stop) object.__setattr__(self, "step", step) object.__setattr__(self, "dim_len", dim_len) object.__setattr__(self, "dim_chunk_len", dim_chunk_len) object.__setattr__(self, "nitems", max(0, ceildiv((stop - start), step))) object.__setattr__(self, "nchunks", ceildiv(dim_len, dim_chunk_len)) def __iter__(self) -> Iterator[ChunkDimProjection]: # figure out the range of chunks we need to visit dim_chunk_ix_from = 0 if self.start == 0 else self.start // self.dim_chunk_len dim_chunk_ix_to = ceildiv(self.stop, self.dim_chunk_len) # iterate over chunks in range for dim_chunk_ix in range(dim_chunk_ix_from, dim_chunk_ix_to): # compute offsets for chunk within overall array dim_offset = dim_chunk_ix * self.dim_chunk_len dim_limit = min(self.dim_len, (dim_chunk_ix + 1) * self.dim_chunk_len) # determine chunk length, accounting for trailing chunk dim_chunk_len = dim_limit - dim_offset if self.start < dim_offset: # selection starts before current chunk dim_chunk_sel_start = 0 remainder = (dim_offset - self.start) % self.step if remainder: dim_chunk_sel_start += self.step - remainder # compute number of previous items, provides offset into output array dim_out_offset = ceildiv((dim_offset - self.start), self.step) else: # selection starts within current chunk dim_chunk_sel_start = self.start - dim_offset dim_out_offset = 0 if self.stop > dim_limit: # selection ends after current chunk dim_chunk_sel_stop = dim_chunk_len else: # selection ends within current chunk dim_chunk_sel_stop = self.stop - dim_offset dim_chunk_sel = slice(dim_chunk_sel_start, dim_chunk_sel_stop, self.step) dim_chunk_nitems = ceildiv((dim_chunk_sel_stop - dim_chunk_sel_start), self.step) # If there are no elements on the selection within this chunk, then skip if dim_chunk_nitems == 0: continue dim_out_sel = slice(dim_out_offset, dim_out_offset + dim_chunk_nitems) is_complete_chunk = ( dim_chunk_sel_start == 0 and (self.stop >= dim_limit) and self.step in [1, None] ) yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel, is_complete_chunk) def check_selection_length(selection: SelectionNormalized, shape: ChunkCoords) -> None: if len(selection) > len(shape): err_too_many_indices(selection, shape) def replace_ellipsis(selection: Any, shape: ChunkCoords) -> SelectionNormalized: selection = ensure_tuple(selection) # count number of ellipsis present n_ellipsis = sum(1 for i in selection if i is Ellipsis) if n_ellipsis > 1: # more than 1 is an error raise IndexError("an index can only have a single ellipsis ('...')") elif n_ellipsis == 1: # locate the ellipsis, count how many items to left and right n_items_l = selection.index(Ellipsis) # items to left of ellipsis n_items_r = len(selection) - (n_items_l + 1) # items to right of ellipsis n_items = len(selection) - 1 # all non-ellipsis items if n_items >= len(shape): # ellipsis does nothing, just remove it selection = tuple(i for i in selection if i != Ellipsis) else: # replace ellipsis with as many slices are needed for number of dims new_item = selection[:n_items_l] + ((slice(None),) * (len(shape) - n_items)) if n_items_r: new_item += selection[-n_items_r:] selection = new_item # fill out selection if not completely specified if len(selection) < len(shape): selection += (slice(None),) * (len(shape) - len(selection)) # check selection not too long check_selection_length(selection, shape) return cast(SelectionNormalized, selection) def replace_lists(selection: SelectionNormalized) -> SelectionNormalized: return tuple( np.asarray(dim_sel) if isinstance(dim_sel, list) else dim_sel for dim_sel in selection ) T = TypeVar("T") def ensure_tuple(v: Any) -> SelectionNormalized: if not isinstance(v, tuple): v = (v,) return cast(SelectionNormalized, v) class ChunkProjection(NamedTuple): """A mapping of items from chunk to output array. Can be used to extract items from the chunk array for loading into an output array. Can also be used to extract items from a value array for setting/updating in a chunk array. Attributes ---------- chunk_coords Indices of chunk. chunk_selection Selection of items from chunk array. out_selection Selection of items in target (output) array. is_complete_chunk: True if a complete chunk is indexed """ chunk_coords: ChunkCoords chunk_selection: tuple[Selector, ...] | npt.NDArray[np.intp] out_selection: tuple[Selector, ...] | npt.NDArray[np.intp] | slice is_complete_chunk: bool def is_slice(s: Any) -> TypeGuard[slice]: return isinstance(s, slice) def is_contiguous_slice(s: Any) -> TypeGuard[slice]: return is_slice(s) and (s.step is None or s.step == 1) def is_positive_slice(s: Any) -> TypeGuard[slice]: return is_slice(s) and (s.step is None or s.step >= 1) def is_contiguous_selection(selection: Any) -> TypeGuard[slice]: selection = ensure_tuple(selection) return all((is_integer_array(s) or is_contiguous_slice(s) or s == Ellipsis) for s in selection) def is_basic_selection(selection: Any) -> TypeGuard[BasicSelection]: selection = ensure_tuple(selection) return all(is_integer(s) or is_positive_slice(s) for s in selection) @dataclass(frozen=True) class BasicIndexer(Indexer): dim_indexers: list[IntDimIndexer | SliceDimIndexer] shape: ChunkCoords drop_axes: ChunkCoords def __init__( self, selection: BasicSelection, shape: ChunkCoords, chunk_grid: ChunkGrid, ) -> None: chunk_shape = get_chunk_shape(chunk_grid) # handle ellipsis selection_normalized = replace_ellipsis(selection, shape) # setup per-dimension indexers dim_indexers: list[IntDimIndexer | SliceDimIndexer] = [] for dim_sel, dim_len, dim_chunk_len in zip( selection_normalized, shape, chunk_shape, strict=True ): dim_indexer: IntDimIndexer | SliceDimIndexer if is_integer(dim_sel): dim_indexer = IntDimIndexer(dim_sel, dim_len, dim_chunk_len) elif is_slice(dim_sel): dim_indexer = SliceDimIndexer(dim_sel, dim_len, dim_chunk_len) else: raise IndexError( "unsupported selection item for basic indexing; " f"expected integer or slice, got {type(dim_sel)!r}" ) dim_indexers.append(dim_indexer) object.__setattr__(self, "dim_indexers", dim_indexers) object.__setattr__( self, "shape", tuple(s.nitems for s in self.dim_indexers if not isinstance(s, IntDimIndexer)), ) object.__setattr__(self, "drop_axes", ()) def __iter__(self) -> Iterator[ChunkProjection]: for dim_projections in itertools.product(*self.dim_indexers): chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections) chunk_selection = tuple(p.dim_chunk_sel for p in dim_projections) out_selection = tuple( p.dim_out_sel for p in dim_projections if p.dim_out_sel is not None ) is_complete_chunk = all(p.is_complete_chunk for p in dim_projections) yield ChunkProjection(chunk_coords, chunk_selection, out_selection, is_complete_chunk) @dataclass(frozen=True) class BoolArrayDimIndexer: dim_sel: npt.NDArray[np.bool_] dim_len: int dim_chunk_len: int nchunks: int chunk_nitems: npt.NDArray[Any] chunk_nitems_cumsum: npt.NDArray[Any] nitems: int dim_chunk_ixs: npt.NDArray[np.intp] def __init__(self, dim_sel: npt.NDArray[np.bool_], dim_len: int, dim_chunk_len: int) -> None: # check number of dimensions if not is_bool_array(dim_sel, 1): raise IndexError("Boolean arrays in an orthogonal selection must be 1-dimensional only") # check shape if dim_sel.shape[0] != dim_len: raise IndexError( f"Boolean array has the wrong length for dimension; expected {dim_len}, got {dim_sel.shape[0]}" ) # precompute number of selected items for each chunk nchunks = ceildiv(dim_len, dim_chunk_len) chunk_nitems = np.zeros(nchunks, dtype="i8") for dim_chunk_ix in range(nchunks): dim_offset = dim_chunk_ix * dim_chunk_len chunk_nitems[dim_chunk_ix] = np.count_nonzero( dim_sel[dim_offset : dim_offset + dim_chunk_len] ) chunk_nitems_cumsum = np.cumsum(chunk_nitems) nitems = chunk_nitems_cumsum[-1] dim_chunk_ixs = np.nonzero(chunk_nitems)[0] # store attributes object.__setattr__(self, "dim_sel", dim_sel) object.__setattr__(self, "dim_len", dim_len) object.__setattr__(self, "dim_chunk_len", dim_chunk_len) object.__setattr__(self, "nchunks", nchunks) object.__setattr__(self, "chunk_nitems", chunk_nitems) object.__setattr__(self, "chunk_nitems_cumsum", chunk_nitems_cumsum) object.__setattr__(self, "nitems", nitems) object.__setattr__(self, "dim_chunk_ixs", dim_chunk_ixs) def __iter__(self) -> Iterator[ChunkDimProjection]: # iterate over chunks with at least one item for dim_chunk_ix in self.dim_chunk_ixs: # find region in chunk dim_offset = dim_chunk_ix * self.dim_chunk_len dim_chunk_sel = self.dim_sel[dim_offset : dim_offset + self.dim_chunk_len] # pad out if final chunk if dim_chunk_sel.shape[0] < self.dim_chunk_len: tmp = np.zeros(self.dim_chunk_len, dtype=bool) tmp[: dim_chunk_sel.shape[0]] = dim_chunk_sel dim_chunk_sel = tmp # find region in output if dim_chunk_ix == 0: start = 0 else: start = self.chunk_nitems_cumsum[dim_chunk_ix - 1] stop = self.chunk_nitems_cumsum[dim_chunk_ix] dim_out_sel = slice(start, stop) is_complete_chunk = False # TODO yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel, is_complete_chunk) class Order(Enum): """ Enum for indexing order. """ UNKNOWN = 0 INCREASING = 1 DECREASING = 2 UNORDERED = 3 @staticmethod def check(a: npt.NDArray[Any]) -> Order: diff = np.diff(a) diff_positive = diff >= 0 n_diff_positive = np.count_nonzero(diff_positive) all_increasing = n_diff_positive == len(diff_positive) any_increasing = n_diff_positive > 0 if all_increasing: order = Order.INCREASING elif any_increasing: order = Order.UNORDERED else: order = Order.DECREASING return order def wraparound_indices(x: npt.NDArray[Any], dim_len: int) -> None: loc_neg = x < 0 if np.any(loc_neg): x[loc_neg] += dim_len def boundscheck_indices(x: npt.NDArray[Any], dim_len: int) -> None: if np.any(x < 0) or np.any(x >= dim_len): raise BoundsCheckError(dim_len) @dataclass(frozen=True) class IntArrayDimIndexer: """Integer array selection against a single dimension.""" dim_len: int dim_chunk_len: int nchunks: int nitems: int order: Order dim_sel: npt.NDArray[np.intp] dim_out_sel: npt.NDArray[np.intp] chunk_nitems: int dim_chunk_ixs: npt.NDArray[np.intp] chunk_nitems_cumsum: npt.NDArray[np.intp] def __init__( self, dim_sel: npt.NDArray[np.intp], dim_len: int, dim_chunk_len: int, wraparound: bool = True, boundscheck: bool = True, order: Order = Order.UNKNOWN, ) -> None: # ensure 1d array dim_sel = np.asanyarray(dim_sel) if not is_integer_array(dim_sel, 1): raise IndexError("integer arrays in an orthogonal selection must be 1-dimensional only") nitems = len(dim_sel) nchunks = ceildiv(dim_len, dim_chunk_len) # handle wraparound if wraparound: wraparound_indices(dim_sel, dim_len) # handle out of bounds if boundscheck: boundscheck_indices(dim_sel, dim_len) # determine which chunk is needed for each selection item # note: for dense integer selections, the division operation here is the # bottleneck dim_sel_chunk = dim_sel // dim_chunk_len # determine order of indices if order == Order.UNKNOWN: order = Order.check(dim_sel) order = Order(order) if order == Order.INCREASING: dim_out_sel = None elif order == Order.DECREASING: dim_sel = dim_sel[::-1] # TODO should be possible to do this without creating an arange dim_out_sel = np.arange(nitems - 1, -1, -1) else: # sort indices to group by chunk dim_out_sel = np.argsort(dim_sel_chunk) dim_sel = np.take(dim_sel, dim_out_sel) # precompute number of selected items for each chunk chunk_nitems = np.bincount(dim_sel_chunk, minlength=nchunks) # find chunks that we need to visit dim_chunk_ixs = np.nonzero(chunk_nitems)[0] # compute offsets into the output array chunk_nitems_cumsum = np.cumsum(chunk_nitems) # store attributes object.__setattr__(self, "dim_len", dim_len) object.__setattr__(self, "dim_chunk_len", dim_chunk_len) object.__setattr__(self, "nchunks", nchunks) object.__setattr__(self, "nitems", nitems) object.__setattr__(self, "order", order) object.__setattr__(self, "dim_sel", dim_sel) object.__setattr__(self, "dim_out_sel", dim_out_sel) object.__setattr__(self, "chunk_nitems", chunk_nitems) object.__setattr__(self, "dim_chunk_ixs", dim_chunk_ixs) object.__setattr__(self, "chunk_nitems_cumsum", chunk_nitems_cumsum) def __iter__(self) -> Iterator[ChunkDimProjection]: for dim_chunk_ix in self.dim_chunk_ixs: dim_out_sel: slice | npt.NDArray[np.intp] # find region in output if dim_chunk_ix == 0: start = 0 else: start = self.chunk_nitems_cumsum[dim_chunk_ix - 1] stop = self.chunk_nitems_cumsum[dim_chunk_ix] if self.order == Order.INCREASING: dim_out_sel = slice(start, stop) else: dim_out_sel = self.dim_out_sel[start:stop] # find region in chunk dim_offset = dim_chunk_ix * self.dim_chunk_len dim_chunk_sel = self.dim_sel[start:stop] - dim_offset is_complete_chunk = False # TODO yield ChunkDimProjection(dim_chunk_ix, dim_chunk_sel, dim_out_sel, is_complete_chunk) def slice_to_range(s: slice, length: int) -> range: return range(*s.indices(length)) def ix_(selection: Any, shape: ChunkCoords) -> npt.NDArray[np.intp]: """Convert an orthogonal selection to a numpy advanced (fancy) selection, like ``numpy.ix_`` but with support for slices and single ints.""" # normalisation selection = replace_ellipsis(selection, shape) # replace slice and int as these are not supported by numpy.ix_ selection = [ slice_to_range(dim_sel, dim_len) if isinstance(dim_sel, slice) else [dim_sel] if is_integer(dim_sel) else dim_sel for dim_sel, dim_len in zip(selection, shape, strict=True) ] # now get numpy to convert to a coordinate selection selection = np.ix_(*selection) return cast(npt.NDArray[np.intp], selection) def oindex(a: npt.NDArray[Any], selection: Selection) -> npt.NDArray[Any]: """Implementation of orthogonal indexing with slices and ints.""" selection = replace_ellipsis(selection, a.shape) drop_axes = tuple(i for i, s in enumerate(selection) if is_integer(s)) selection = ix_(selection, a.shape) result = a[selection] if drop_axes: result = result.squeeze(axis=drop_axes) return result def oindex_set(a: npt.NDArray[Any], selection: Selection, value: Any) -> None: selection = replace_ellipsis(selection, a.shape) drop_axes = tuple(i for i, s in enumerate(selection) if is_integer(s)) selection = ix_(selection, a.shape) if not np.isscalar(value) and drop_axes: value = np.asanyarray(value) value_selection: list[Selector | None] = [slice(None)] * len(a.shape) for i in drop_axes: value_selection[i] = np.newaxis value = value[tuple(value_selection)] a[selection] = value @dataclass(frozen=True) class OrthogonalIndexer(Indexer): dim_indexers: list[IntDimIndexer | SliceDimIndexer | IntArrayDimIndexer | BoolArrayDimIndexer] shape: ChunkCoords chunk_shape: ChunkCoords is_advanced: bool drop_axes: tuple[int, ...] def __init__(self, selection: Selection, shape: ChunkCoords, chunk_grid: ChunkGrid) -> None: chunk_shape = get_chunk_shape(chunk_grid) # handle ellipsis selection = replace_ellipsis(selection, shape) # normalize list to array selection = replace_lists(selection) # setup per-dimension indexers dim_indexers: list[ IntDimIndexer | SliceDimIndexer | IntArrayDimIndexer | BoolArrayDimIndexer ] = [] for dim_sel, dim_len, dim_chunk_len in zip(selection, shape, chunk_shape, strict=True): dim_indexer: IntDimIndexer | SliceDimIndexer | IntArrayDimIndexer | BoolArrayDimIndexer if is_integer(dim_sel): dim_indexer = IntDimIndexer(dim_sel, dim_len, dim_chunk_len) elif isinstance(dim_sel, slice): dim_indexer = SliceDimIndexer(dim_sel, dim_len, dim_chunk_len) elif is_integer_array(dim_sel): dim_indexer = IntArrayDimIndexer(dim_sel, dim_len, dim_chunk_len) elif is_bool_array(dim_sel): dim_indexer = BoolArrayDimIndexer(dim_sel, dim_len, dim_chunk_len) else: raise IndexError( "unsupported selection item for orthogonal indexing; " "expected integer, slice, integer array or Boolean " f"array, got {type(dim_sel)!r}" ) dim_indexers.append(dim_indexer) shape = tuple(s.nitems for s in dim_indexers if not isinstance(s, IntDimIndexer)) is_advanced = not is_basic_selection(selection) if is_advanced: drop_axes = tuple( i for i, dim_indexer in enumerate(dim_indexers) if isinstance(dim_indexer, IntDimIndexer) ) else: drop_axes = () object.__setattr__(self, "dim_indexers", dim_indexers) object.__setattr__(self, "shape", shape) object.__setattr__(self, "chunk_shape", chunk_shape) object.__setattr__(self, "is_advanced", is_advanced) object.__setattr__(self, "drop_axes", drop_axes) def __iter__(self) -> Iterator[ChunkProjection]: for dim_projections in itertools.product(*self.dim_indexers): chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections) chunk_selection: tuple[Selector, ...] | npt.NDArray[Any] = tuple( p.dim_chunk_sel for p in dim_projections ) out_selection: tuple[Selector, ...] | npt.NDArray[Any] = tuple( p.dim_out_sel for p in dim_projections if p.dim_out_sel is not None ) # handle advanced indexing arrays orthogonally if self.is_advanced: # N.B., numpy doesn't support orthogonal indexing directly as yet, # so need to work around via np.ix_. Also np.ix_ does not support a # mixture of arrays and slices or integers, so need to convert slices # and integers into ranges. chunk_selection = ix_(chunk_selection, self.chunk_shape) # special case for non-monotonic indices if not is_basic_selection(out_selection): out_selection = ix_(out_selection, self.shape) is_complete_chunk = all(p.is_complete_chunk for p in dim_projections) yield ChunkProjection(chunk_coords, chunk_selection, out_selection, is_complete_chunk) @dataclass(frozen=True) class OIndex: array: Array # TODO: develop Array generic and move zarr.Array[np.intp] | zarr.Array[np.bool_] to ArrayOfIntOrBool def __getitem__(self, selection: OrthogonalSelection | Array) -> NDArrayLike: from zarr.core.array import Array # if input is a Zarr array, we materialize it now. if isinstance(selection, Array): selection = _zarr_array_to_int_or_bool_array(selection) fields, new_selection = pop_fields(selection) new_selection = ensure_tuple(new_selection) new_selection = replace_lists(new_selection) return self.array.get_orthogonal_selection( cast(OrthogonalSelection, new_selection), fields=fields ) def __setitem__(self, selection: OrthogonalSelection, value: npt.ArrayLike) -> None: fields, new_selection = pop_fields(selection) new_selection = ensure_tuple(new_selection) new_selection = replace_lists(new_selection) return self.array.set_orthogonal_selection( cast(OrthogonalSelection, new_selection), value, fields=fields ) @dataclass(frozen=True) class BlockIndexer(Indexer): dim_indexers: list[SliceDimIndexer] shape: ChunkCoords drop_axes: ChunkCoords def __init__( self, selection: BasicSelection, shape: ChunkCoords, chunk_grid: ChunkGrid ) -> None: chunk_shape = get_chunk_shape(chunk_grid) # handle ellipsis selection_normalized = replace_ellipsis(selection, shape) # normalize list to array selection_normalized = replace_lists(selection_normalized) # setup per-dimension indexers dim_indexers = [] for dim_sel, dim_len, dim_chunk_size in zip( selection_normalized, shape, chunk_shape, strict=True ): dim_numchunks = int(np.ceil(dim_len / dim_chunk_size)) if is_integer(dim_sel): if dim_sel < 0: dim_sel = dim_numchunks + dim_sel start = dim_sel * dim_chunk_size stop = start + dim_chunk_size slice_ = slice(start, stop) elif is_slice(dim_sel): start = dim_sel.start if dim_sel.start is not None else 0 stop = dim_sel.stop if dim_sel.stop is not None else dim_numchunks if dim_sel.step not in {1, None}: raise IndexError( "unsupported selection item for block indexing; " f"expected integer or slice with step=1, got {type(dim_sel)!r}" ) # Can't reuse wraparound_indices because it expects a numpy array # We have integers here. if start < 0: start = dim_numchunks + start if stop < 0: stop = dim_numchunks + stop start *= dim_chunk_size stop *= dim_chunk_size slice_ = slice(start, stop) else: raise IndexError( "unsupported selection item for block indexing; " f"expected integer or slice, got {type(dim_sel)!r}" ) dim_indexer = SliceDimIndexer(slice_, dim_len, dim_chunk_size) dim_indexers.append(dim_indexer) if start >= dim_len or start < 0: raise BoundsCheckError(dim_len) shape = tuple(s.nitems for s in dim_indexers) object.__setattr__(self, "dim_indexers", dim_indexers) object.__setattr__(self, "shape", shape) object.__setattr__(self, "drop_axes", ()) def __iter__(self) -> Iterator[ChunkProjection]: for dim_projections in itertools.product(*self.dim_indexers): chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections) chunk_selection = tuple(p.dim_chunk_sel for p in dim_projections) out_selection = tuple( p.dim_out_sel for p in dim_projections if p.dim_out_sel is not None ) is_complete_chunk = all(p.is_complete_chunk for p in dim_projections) yield ChunkProjection(chunk_coords, chunk_selection, out_selection, is_complete_chunk) @dataclass(frozen=True) class BlockIndex: array: Array def __getitem__(self, selection: BasicSelection) -> NDArrayLike: fields, new_selection = pop_fields(selection) new_selection = ensure_tuple(new_selection) new_selection = replace_lists(new_selection) return self.array.get_block_selection(cast(BasicSelection, new_selection), fields=fields) def __setitem__(self, selection: BasicSelection, value: npt.ArrayLike) -> None: fields, new_selection = pop_fields(selection) new_selection = ensure_tuple(new_selection) new_selection = replace_lists(new_selection) return self.array.set_block_selection( cast(BasicSelection, new_selection), value, fields=fields ) def is_coordinate_selection( selection: SelectionNormalized, shape: ChunkCoords ) -> TypeGuard[CoordinateSelectionNormalized]: return ( isinstance(selection, tuple) and len(selection) == len(shape) and all(is_integer(dim_sel) or is_integer_array(dim_sel) for dim_sel in selection) ) def is_mask_selection(selection: Selection, shape: ChunkCoords) -> TypeGuard[MaskSelection]: return ( isinstance(selection, tuple) and len(selection) == 1 and is_bool_array(selection[0]) and selection[0].shape == shape ) @dataclass(frozen=True) class CoordinateIndexer(Indexer): sel_shape: ChunkCoords selection: CoordinateSelectionNormalized sel_sort: npt.NDArray[np.intp] | None chunk_nitems_cumsum: npt.NDArray[np.intp] chunk_rixs: npt.NDArray[np.intp] chunk_mixs: tuple[npt.NDArray[np.intp], ...] shape: ChunkCoords chunk_shape: ChunkCoords drop_axes: ChunkCoords def __init__( self, selection: CoordinateSelection, shape: ChunkCoords, chunk_grid: ChunkGrid ) -> None: chunk_shape = get_chunk_shape(chunk_grid) cdata_shape: ChunkCoords if shape == (): cdata_shape = (1,) else: cdata_shape = tuple(math.ceil(s / c) for s, c in zip(shape, chunk_shape, strict=True)) nchunks = reduce(operator.mul, cdata_shape, 1) # some initial normalization selection_normalized = cast(CoordinateSelectionNormalized, ensure_tuple(selection)) selection_normalized = tuple( np.asarray([i]) if is_integer(i) else i for i in selection_normalized ) selection_normalized = cast( CoordinateSelectionNormalized, replace_lists(selection_normalized) ) # validation if not is_coordinate_selection(selection_normalized, shape): raise IndexError( "invalid coordinate selection; expected one integer " "(coordinate) array per dimension of the target array, " f"got {selection!r}" ) # handle wraparound, boundscheck for dim_sel, dim_len in zip(selection_normalized, shape, strict=True): # handle wraparound wraparound_indices(dim_sel, dim_len) # handle out of bounds boundscheck_indices(dim_sel, dim_len) # compute chunk index for each point in the selection chunks_multi_index = tuple( dim_sel // dim_chunk_len for (dim_sel, dim_chunk_len) in zip(selection_normalized, chunk_shape, strict=True) ) # broadcast selection - this will raise error if array dimensions don't match selection_broadcast = tuple(np.broadcast_arrays(*selection_normalized)) chunks_multi_index_broadcast = np.broadcast_arrays(*chunks_multi_index) # remember shape of selection, because we will flatten indices for processing sel_shape = selection_broadcast[0].shape or (1,) # flatten selection selection_broadcast = tuple(dim_sel.reshape(-1) for dim_sel in selection_broadcast) chunks_multi_index_broadcast = tuple( dim_chunks.reshape(-1) for dim_chunks in chunks_multi_index_broadcast ) # ravel chunk indices chunks_raveled_indices = np.ravel_multi_index( chunks_multi_index_broadcast, dims=cdata_shape ) # group points by chunk if np.any(np.diff(chunks_raveled_indices) < 0): # optimisation, only sort if needed sel_sort = np.argsort(chunks_raveled_indices) selection_broadcast = tuple(dim_sel[sel_sort] for dim_sel in selection_broadcast) else: sel_sort = None shape = selection_broadcast[0].shape or (1,) # precompute number of selected items for each chunk chunk_nitems = np.bincount(chunks_raveled_indices, minlength=nchunks) chunk_nitems_cumsum = np.cumsum(chunk_nitems) # locate the chunks we need to process chunk_rixs = np.nonzero(chunk_nitems)[0] # unravel chunk indices chunk_mixs = np.unravel_index(chunk_rixs, cdata_shape) object.__setattr__(self, "sel_shape", sel_shape) object.__setattr__(self, "selection", selection_broadcast) object.__setattr__(self, "sel_sort", sel_sort) object.__setattr__(self, "chunk_nitems_cumsum", chunk_nitems_cumsum) object.__setattr__(self, "chunk_rixs", chunk_rixs) object.__setattr__(self, "chunk_mixs", chunk_mixs) object.__setattr__(self, "chunk_shape", chunk_shape) object.__setattr__(self, "shape", shape) object.__setattr__(self, "drop_axes", ()) def __iter__(self) -> Iterator[ChunkProjection]: # iterate over chunks for i, chunk_rix in enumerate(self.chunk_rixs): chunk_coords = tuple(m[i] for m in self.chunk_mixs) if chunk_rix == 0: start = 0 else: start = self.chunk_nitems_cumsum[chunk_rix - 1] stop = self.chunk_nitems_cumsum[chunk_rix] out_selection: slice | npt.NDArray[np.intp] if self.sel_sort is None: out_selection = slice(start, stop) else: out_selection = self.sel_sort[start:stop] chunk_offsets = tuple( dim_chunk_ix * dim_chunk_len for dim_chunk_ix, dim_chunk_len in zip(chunk_coords, self.chunk_shape, strict=True) ) chunk_selection = tuple( dim_sel[start:stop] - dim_chunk_offset for (dim_sel, dim_chunk_offset) in zip(self.selection, chunk_offsets, strict=True) ) is_complete_chunk = False # TODO yield ChunkProjection(chunk_coords, chunk_selection, out_selection, is_complete_chunk) @dataclass(frozen=True) class MaskIndexer(CoordinateIndexer): def __init__(self, selection: MaskSelection, shape: ChunkCoords, chunk_grid: ChunkGrid) -> None: # some initial normalization selection_normalized = cast(tuple[MaskSelection], ensure_tuple(selection)) selection_normalized = cast(tuple[MaskSelection], replace_lists(selection_normalized)) # validation if not is_mask_selection(selection_normalized, shape): raise IndexError( "invalid mask selection; expected one Boolean (mask)" f"array with the same shape as the target array, got {selection_normalized!r}" ) # convert to indices selection_indices = np.nonzero(selection_normalized[0]) # delegate the rest to superclass super().__init__(selection_indices, shape, chunk_grid) @dataclass(frozen=True) class VIndex: array: Array # TODO: develop Array generic and move zarr.Array[np.intp] | zarr.Array[np.bool_] to ArrayOfIntOrBool def __getitem__(self, selection: CoordinateSelection | MaskSelection | Array) -> NDArrayLike: from zarr.core.array import Array # if input is a Zarr array, we materialize it now. if isinstance(selection, Array): selection = _zarr_array_to_int_or_bool_array(selection) fields, new_selection = pop_fields(selection) new_selection = ensure_tuple(new_selection) new_selection = replace_lists(new_selection) if is_coordinate_selection(new_selection, self.array.shape): return self.array.get_coordinate_selection(new_selection, fields=fields) elif is_mask_selection(new_selection, self.array.shape): return self.array.get_mask_selection(new_selection, fields=fields) else: raise VindexInvalidSelectionError(new_selection) def __setitem__( self, selection: CoordinateSelection | MaskSelection, value: npt.ArrayLike ) -> None: fields, new_selection = pop_fields(selection) new_selection = ensure_tuple(new_selection) new_selection = replace_lists(new_selection) if is_coordinate_selection(new_selection, self.array.shape): self.array.set_coordinate_selection(new_selection, value, fields=fields) elif is_mask_selection(new_selection, self.array.shape): self.array.set_mask_selection(new_selection, value, fields=fields) else: raise VindexInvalidSelectionError(new_selection) def check_fields(fields: Fields | None, dtype: np.dtype[Any]) -> np.dtype[Any]: # early out if fields is None: return dtype # check type if not isinstance(fields, str | list | tuple): raise IndexError( f"'fields' argument must be a string or list of strings; found {type(fields)!r}" ) if fields: if dtype.names is None: raise IndexError("invalid 'fields' argument, array does not have any fields") try: if isinstance(fields, str): # single field selection out_dtype = dtype[fields] else: # multiple field selection out_dtype = np.dtype([(f, dtype[f]) for f in fields]) except KeyError as e: raise IndexError(f"invalid 'fields' argument, field not found: {e!r}") from e else: return out_dtype else: return dtype def check_no_multi_fields(fields: Fields | None) -> Fields | None: if isinstance(fields, list): if len(fields) == 1: return fields[0] elif len(fields) > 1: raise IndexError("multiple fields are not supported for this operation") return fields def pop_fields(selection: SelectionWithFields) -> tuple[Fields | None, Selection]: if isinstance(selection, str): # single field selection return selection, () elif not isinstance(selection, tuple): # single selection item, no fields # leave selection as-is return None, cast(Selection, selection) else: # multiple items, split fields from selection items fields: Fields = [f for f in selection if isinstance(f, str)] fields = fields[0] if len(fields) == 1 else fields selection_tuple = tuple(s for s in selection if not isinstance(s, str)) selection = cast( Selection, selection_tuple[0] if len(selection_tuple) == 1 else selection_tuple ) return fields, selection def make_slice_selection(selection: Any) -> list[slice]: ls: list[slice] = [] for dim_selection in selection: if is_integer(dim_selection): ls.append(slice(int(dim_selection), int(dim_selection) + 1, 1)) elif isinstance(dim_selection, np.ndarray): if len(dim_selection) == 1: ls.append(slice(int(dim_selection[0]), int(dim_selection[0]) + 1, 1)) else: raise ArrayIndexError else: ls.append(dim_selection) return ls def decode_morton(z: int, chunk_shape: ChunkCoords) -> ChunkCoords: # Inspired by compressed morton code as implemented in Neuroglancer # https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/volume.md#compressed-morton-code bits = tuple(math.ceil(math.log2(c)) for c in chunk_shape) max_coords_bits = max(bits) input_bit = 0 input_value = z out = [0] * len(chunk_shape) for coord_bit in range(max_coords_bits): for dim in range(len(chunk_shape)): if coord_bit < bits[dim]: bit = (input_value >> input_bit) & 1 out[dim] |= bit << coord_bit input_bit += 1 return tuple(out) def morton_order_iter(chunk_shape: ChunkCoords) -> Iterator[ChunkCoords]: i = 0 order: list[ChunkCoords] = [] while len(order) < product(chunk_shape): m = decode_morton(i, chunk_shape) if m not in order and all(x < y for x, y in zip(m, chunk_shape, strict=False)): order.append(m) i += 1 for j in range(product(chunk_shape)): yield order[j] def c_order_iter(chunks_per_shard: ChunkCoords) -> Iterator[ChunkCoords]: return itertools.product(*(range(x) for x in chunks_per_shard)) def get_indexer( selection: SelectionWithFields, shape: ChunkCoords, chunk_grid: ChunkGrid ) -> Indexer: _, pure_selection = pop_fields(selection) if is_pure_fancy_indexing(pure_selection, len(shape)): new_selection = ensure_tuple(selection) new_selection = replace_lists(new_selection) if is_coordinate_selection(new_selection, shape): return CoordinateIndexer(cast(CoordinateSelection, selection), shape, chunk_grid) elif is_mask_selection(new_selection, shape): return MaskIndexer(cast(MaskSelection, selection), shape, chunk_grid) else: raise VindexInvalidSelectionError(new_selection) elif is_pure_orthogonal_indexing(pure_selection, len(shape)): return OrthogonalIndexer(cast(OrthogonalSelection, selection), shape, chunk_grid) else: return BasicIndexer(cast(BasicSelection, selection), shape, chunk_grid) zarr-python-3.0.6/src/zarr/core/metadata/000077500000000000000000000000001476711733500203105ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/core/metadata/__init__.py000066400000000000000000000010101476711733500224110ustar00rootroot00000000000000from typing import TypeAlias, TypeVar from .v2 import ArrayV2Metadata, ArrayV2MetadataDict from .v3 import ArrayV3Metadata, ArrayV3MetadataDict ArrayMetadata: TypeAlias = ArrayV2Metadata | ArrayV3Metadata ArrayMetadataDict: TypeAlias = ArrayV2MetadataDict | ArrayV3MetadataDict T_ArrayMetadata = TypeVar("T_ArrayMetadata", ArrayV2Metadata, ArrayV3Metadata) __all__ = [ "ArrayMetadata", "ArrayMetadataDict", "ArrayV2Metadata", "ArrayV2MetadataDict", "ArrayV3Metadata", "ArrayV3MetadataDict", ] zarr-python-3.0.6/src/zarr/core/metadata/common.py000066400000000000000000000003771476711733500221610ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING if TYPE_CHECKING: from zarr.core.common import JSON def parse_attributes(data: dict[str, JSON] | None) -> dict[str, JSON]: if data is None: return {} return data zarr-python-3.0.6/src/zarr/core/metadata/v2.py000066400000000000000000000357531476711733500212260ustar00rootroot00000000000000from __future__ import annotations import base64 import warnings from collections.abc import Iterable from enum import Enum from functools import cached_property from typing import TYPE_CHECKING, TypedDict, cast import numcodecs.abc from zarr.abc.metadata import Metadata if TYPE_CHECKING: from typing import Any, Literal, Self import numpy.typing as npt from zarr.core.buffer import Buffer, BufferPrototype from zarr.core.common import ChunkCoords import json from dataclasses import dataclass, field, fields, replace import numcodecs import numpy as np from zarr.core.array_spec import ArrayConfig, ArraySpec from zarr.core.chunk_grids import RegularChunkGrid from zarr.core.chunk_key_encodings import parse_separator from zarr.core.common import JSON, ZARRAY_JSON, ZATTRS_JSON, MemoryOrder, parse_shapelike from zarr.core.config import config, parse_indexing_order from zarr.core.metadata.common import parse_attributes class ArrayV2MetadataDict(TypedDict): """ A typed dictionary model for Zarr format 2 metadata. """ zarr_format: Literal[2] attributes: dict[str, JSON] @dataclass(frozen=True, kw_only=True) class ArrayV2Metadata(Metadata): shape: ChunkCoords chunks: ChunkCoords dtype: np.dtype[Any] fill_value: int | float | str | bytes | None = 0 order: MemoryOrder = "C" filters: tuple[numcodecs.abc.Codec, ...] | None = None dimension_separator: Literal[".", "/"] = "." compressor: numcodecs.abc.Codec | None = None attributes: dict[str, JSON] = field(default_factory=dict) zarr_format: Literal[2] = field(init=False, default=2) def __init__( self, *, shape: ChunkCoords, dtype: npt.DTypeLike, chunks: ChunkCoords, fill_value: Any, order: MemoryOrder, dimension_separator: Literal[".", "/"] = ".", compressor: numcodecs.abc.Codec | dict[str, JSON] | None = None, filters: Iterable[numcodecs.abc.Codec | dict[str, JSON]] | None = None, attributes: dict[str, JSON] | None = None, ) -> None: """ Metadata for a Zarr format 2 array. """ shape_parsed = parse_shapelike(shape) dtype_parsed = parse_dtype(dtype) chunks_parsed = parse_shapelike(chunks) compressor_parsed = parse_compressor(compressor) order_parsed = parse_indexing_order(order) dimension_separator_parsed = parse_separator(dimension_separator) filters_parsed = parse_filters(filters) fill_value_parsed = parse_fill_value(fill_value, dtype=dtype_parsed) attributes_parsed = parse_attributes(attributes) object.__setattr__(self, "shape", shape_parsed) object.__setattr__(self, "dtype", dtype_parsed) object.__setattr__(self, "chunks", chunks_parsed) object.__setattr__(self, "compressor", compressor_parsed) object.__setattr__(self, "order", order_parsed) object.__setattr__(self, "dimension_separator", dimension_separator_parsed) object.__setattr__(self, "filters", filters_parsed) object.__setattr__(self, "fill_value", fill_value_parsed) object.__setattr__(self, "attributes", attributes_parsed) # ensure that the metadata document is consistent _ = parse_metadata(self) @property def ndim(self) -> int: return len(self.shape) @cached_property def chunk_grid(self) -> RegularChunkGrid: return RegularChunkGrid(chunk_shape=self.chunks) @property def shards(self) -> ChunkCoords | None: return None def to_buffer_dict(self, prototype: BufferPrototype) -> dict[str, Buffer]: def _json_convert( o: Any, ) -> Any: if isinstance(o, np.dtype): if o.fields is None: return o.str else: return o.descr if isinstance(o, numcodecs.abc.Codec): codec_config = o.get_config() # Hotfix for https://github.com/zarr-developers/zarr-python/issues/2647 if codec_config["id"] == "zstd" and not codec_config.get("checksum", False): codec_config.pop("checksum", None) return codec_config if np.isscalar(o): out: Any if hasattr(o, "dtype") and o.dtype.kind == "M" and hasattr(o, "view"): # https://github.com/zarr-developers/zarr-python/issues/2119 # `.item()` on a datetime type might or might not return an # integer, depending on the value. # Explicitly cast to an int first, and then grab .item() out = o.view("i8").item() else: # convert numpy scalar to python type, and pass # python types through out = getattr(o, "item", lambda: o)() if isinstance(out, complex): # python complex types are not JSON serializable, so we use the # serialization defined in the zarr v3 spec return [out.real, out.imag] return out if isinstance(o, Enum): return o.name raise TypeError zarray_dict = self.to_dict() zattrs_dict = zarray_dict.pop("attributes", {}) json_indent = config.get("json_indent") return { ZARRAY_JSON: prototype.buffer.from_bytes( json.dumps(zarray_dict, default=_json_convert, indent=json_indent).encode() ), ZATTRS_JSON: prototype.buffer.from_bytes( json.dumps(zattrs_dict, indent=json_indent).encode() ), } @classmethod def from_dict(cls, data: dict[str, Any]) -> ArrayV2Metadata: # make a copy to protect the original from modification _data = data.copy() # check that the zarr_format attribute is correct _ = parse_zarr_format(_data.pop("zarr_format")) dtype = parse_dtype(_data["dtype"]) if dtype.kind in "SV": fill_value_encoded = _data.get("fill_value") if fill_value_encoded is not None: fill_value = base64.standard_b64decode(fill_value_encoded) _data["fill_value"] = fill_value # zarr v2 allowed arbitrary keys here. # We don't want the ArrayV2Metadata constructor to fail just because someone put an # extra key in the metadata. expected = {x.name for x in fields(cls)} # https://github.com/zarr-developers/zarr-python/issues/2269 # handle the renames expected |= {"dtype", "chunks"} # check if `filters` is an empty sequence; if so use None instead and raise a warning if _data["filters"] is not None and len(_data["filters"]) == 0: msg = ( "Found an empty list of filters in the array metadata document. " "This is contrary to the Zarr V2 specification, and will cause an error in the future. " "Use None (or Null in a JSON document) instead of an empty list of filters." ) warnings.warn(msg, UserWarning, stacklevel=1) _data["filters"] = None _data = {k: v for k, v in _data.items() if k in expected} return cls(**_data) def to_dict(self) -> dict[str, JSON]: zarray_dict = super().to_dict() if self.dtype.kind in "SV" and self.fill_value is not None: # There's a relationship between self.dtype and self.fill_value # that mypy isn't aware of. The fact that we have S or V dtype here # means we should have a bytes-type fill_value. fill_value = base64.standard_b64encode(cast(bytes, self.fill_value)).decode("ascii") zarray_dict["fill_value"] = fill_value _ = zarray_dict.pop("dtype") dtype_json: JSON # In the case of zarr v2, the simplest i.e., '|VXX' dtype is represented as a string dtype_descr = self.dtype.descr if self.dtype.kind == "V" and dtype_descr[0][0] != "" and len(dtype_descr) != 0: dtype_json = tuple(self.dtype.descr) else: dtype_json = self.dtype.str zarray_dict["dtype"] = dtype_json return zarray_dict def get_chunk_spec( self, _chunk_coords: ChunkCoords, array_config: ArrayConfig, prototype: BufferPrototype ) -> ArraySpec: return ArraySpec( shape=self.chunks, dtype=self.dtype, fill_value=self.fill_value, config=array_config, prototype=prototype, ) def encode_chunk_key(self, chunk_coords: ChunkCoords) -> str: chunk_identifier = self.dimension_separator.join(map(str, chunk_coords)) return "0" if chunk_identifier == "" else chunk_identifier def update_shape(self, shape: ChunkCoords) -> Self: return replace(self, shape=shape) def update_attributes(self, attributes: dict[str, JSON]) -> Self: return replace(self, attributes=attributes) def parse_dtype(data: npt.DTypeLike) -> np.dtype[Any]: if isinstance(data, list): # this is a valid _VoidDTypeLike check data = [tuple(d) for d in data] return np.dtype(data) def parse_zarr_format(data: object) -> Literal[2]: if data == 2: return 2 raise ValueError(f"Invalid value. Expected 2. Got {data}.") def parse_filters(data: object) -> tuple[numcodecs.abc.Codec, ...] | None: """ Parse a potential tuple of filters """ out: list[numcodecs.abc.Codec] = [] if data is None: return data if isinstance(data, Iterable): for idx, val in enumerate(data): if isinstance(val, numcodecs.abc.Codec): out.append(val) elif isinstance(val, dict): out.append(numcodecs.get_codec(val)) else: msg = f"Invalid filter at index {idx}. Expected a numcodecs.abc.Codec or a dict representation of numcodecs.abc.Codec. Got {type(val)} instead." raise TypeError(msg) if len(out) == 0: # Per the v2 spec, an empty tuple is not allowed -- use None to express "no filters" return None else: return tuple(out) # take a single codec instance and wrap it in a tuple if isinstance(data, numcodecs.abc.Codec): return (data,) msg = f"Invalid filters. Expected None, an iterable of numcodecs.abc.Codec or dict representations of numcodecs.abc.Codec. Got {type(data)} instead." raise TypeError(msg) def parse_compressor(data: object) -> numcodecs.abc.Codec | None: """ Parse a potential compressor. """ if data is None or isinstance(data, numcodecs.abc.Codec): return data if isinstance(data, dict): return numcodecs.get_codec(data) msg = f"Invalid compressor. Expected None, a numcodecs.abc.Codec, or a dict representation of a numcodecs.abc.Codec. Got {type(data)} instead." raise ValueError(msg) def parse_metadata(data: ArrayV2Metadata) -> ArrayV2Metadata: if (l_chunks := len(data.chunks)) != (l_shape := len(data.shape)): msg = ( f"The `shape` and `chunks` attributes must have the same length. " f"`chunks` has length {l_chunks}, but `shape` has length {l_shape}." ) raise ValueError(msg) return data def parse_fill_value(fill_value: object, dtype: np.dtype[Any]) -> Any: """ Parse a potential fill value into a value that is compatible with the provided dtype. Parameters ---------- fill_value : Any A potential fill value. dtype : np.dtype[Any] A numpy dtype. Returns ------- An instance of `dtype`, or `None`, or any python object (in the case of an object dtype) """ if fill_value is None or dtype.hasobject: # no fill value pass elif not isinstance(fill_value, np.void) and fill_value == 0: # this should be compatible across numpy versions for any array type, including # structured arrays fill_value = np.zeros((), dtype=dtype)[()] elif dtype.kind == "U": # special case unicode because of encoding issues on Windows if passed through numpy # https://github.com/alimanfoo/zarr/pull/172#issuecomment-343782713 if not isinstance(fill_value, str): raise ValueError( f"fill_value {fill_value!r} is not valid for dtype {dtype}; must be a unicode string" ) else: try: if isinstance(fill_value, bytes) and dtype.kind == "V": # special case for numpy 1.14 compatibility fill_value = np.array(fill_value, dtype=dtype.str).view(dtype)[()] else: fill_value = np.array(fill_value, dtype=dtype)[()] except Exception as e: msg = f"Fill_value {fill_value} is not valid for dtype {dtype}." raise ValueError(msg) from e return fill_value def _default_fill_value(dtype: np.dtype[Any]) -> Any: """ Get the default fill value for a type. Notes ----- This differs from :func:`parse_fill_value`, which parses a fill value stored in the Array metadata into an in-memory value. This only gives the default fill value for some type. This is useful for reading Zarr format 2 arrays, which allow the fill value to be unspecified. """ if dtype.kind == "S": return b"" elif dtype.kind in "UO": return "" elif dtype.kind in "Mm": return dtype.type("nat") elif dtype.kind == "V": if dtype.fields is not None: default = tuple(_default_fill_value(field[0]) for field in dtype.fields.values()) return np.array([default], dtype=dtype) else: return np.zeros(1, dtype=dtype) else: return dtype.type(0) def _default_compressor( dtype: np.dtype[Any], ) -> dict[str, JSON] | None: """Get the default filters and compressor for a dtype. https://numpy.org/doc/2.1/reference/generated/numpy.dtype.kind.html """ default_compressor = config.get("array.v2_default_compressor") if dtype.kind in "biufcmM": dtype_key = "numeric" elif dtype.kind in "U": dtype_key = "string" elif dtype.kind in "OSV": dtype_key = "bytes" else: raise ValueError(f"Unsupported dtype kind {dtype.kind}") return cast(dict[str, JSON] | None, default_compressor.get(dtype_key, None)) def _default_filters( dtype: np.dtype[Any], ) -> list[dict[str, JSON]] | None: """Get the default filters and compressor for a dtype. https://numpy.org/doc/2.1/reference/generated/numpy.dtype.kind.html """ default_filters = config.get("array.v2_default_filters") if dtype.kind in "biufcmM": dtype_key = "numeric" elif dtype.kind in "U": dtype_key = "string" elif dtype.kind in "OS": dtype_key = "bytes" elif dtype.kind == "V": dtype_key = "raw" else: raise ValueError(f"Unsupported dtype kind {dtype.kind}") return cast(list[dict[str, JSON]] | None, default_filters.get(dtype_key, None)) zarr-python-3.0.6/src/zarr/core/metadata/v3.py000066400000000000000000000635401476711733500212220ustar00rootroot00000000000000from __future__ import annotations import warnings from typing import TYPE_CHECKING, TypedDict, overload from zarr.abc.metadata import Metadata from zarr.core.buffer.core import default_buffer_prototype if TYPE_CHECKING: from collections.abc import Callable from typing import Self from zarr.core.buffer import Buffer, BufferPrototype from zarr.core.chunk_grids import ChunkGrid from zarr.core.common import JSON, ChunkCoords import json from collections.abc import Iterable, Sequence from dataclasses import dataclass, field, replace from enum import Enum from typing import Any, Literal, cast import numcodecs.abc import numpy as np import numpy.typing as npt from zarr.abc.codec import ArrayArrayCodec, ArrayBytesCodec, BytesBytesCodec, Codec from zarr.core.array_spec import ArrayConfig, ArraySpec from zarr.core.chunk_grids import ChunkGrid, RegularChunkGrid from zarr.core.chunk_key_encodings import ChunkKeyEncoding, ChunkKeyEncodingLike from zarr.core.common import ( JSON, ZARR_JSON, ChunkCoords, parse_named_configuration, parse_shapelike, ) from zarr.core.config import config from zarr.core.metadata.common import parse_attributes from zarr.core.strings import _NUMPY_SUPPORTS_VLEN_STRING from zarr.core.strings import _STRING_DTYPE as STRING_NP_DTYPE from zarr.errors import MetadataValidationError, NodeTypeValidationError from zarr.registry import get_codec_class DEFAULT_DTYPE = "float64" # Keep in sync with _replace_special_floats SPECIAL_FLOATS_ENCODED = { "Infinity": np.inf, "-Infinity": -np.inf, "NaN": np.nan, } def parse_zarr_format(data: object) -> Literal[3]: if data == 3: return 3 raise MetadataValidationError("zarr_format", 3, data) def parse_node_type_array(data: object) -> Literal["array"]: if data == "array": return "array" raise NodeTypeValidationError("node_type", "array", data) def parse_codecs(data: object) -> tuple[Codec, ...]: out: tuple[Codec, ...] = () if not isinstance(data, Iterable): raise TypeError(f"Expected iterable, got {type(data)}") for c in data: if isinstance( c, ArrayArrayCodec | ArrayBytesCodec | BytesBytesCodec ): # Can't use Codec here because of mypy limitation out += (c,) else: name_parsed, _ = parse_named_configuration(c, require_configuration=False) out += (get_codec_class(name_parsed).from_dict(c),) return out def validate_array_bytes_codec(codecs: tuple[Codec, ...]) -> ArrayBytesCodec: # ensure that we have at least one ArrayBytesCodec abcs: list[ArrayBytesCodec] = [codec for codec in codecs if isinstance(codec, ArrayBytesCodec)] if len(abcs) == 0: raise ValueError("At least one ArrayBytesCodec is required.") elif len(abcs) > 1: raise ValueError("Only one ArrayBytesCodec is allowed.") return abcs[0] def validate_codecs(codecs: tuple[Codec, ...], dtype: DataType) -> None: """Check that the codecs are valid for the given dtype""" from zarr.codecs.sharding import ShardingCodec abc = validate_array_bytes_codec(codecs) # Recursively resolve array-bytes codecs within sharding codecs while isinstance(abc, ShardingCodec): abc = validate_array_bytes_codec(abc.codecs) # we need to have special codecs if we are decoding vlen strings or bytestrings # TODO: use codec ID instead of class name codec_class_name = abc.__class__.__name__ if dtype == DataType.string and not codec_class_name == "VLenUTF8Codec": raise ValueError( f"For string dtype, ArrayBytesCodec must be `VLenUTF8Codec`, got `{codec_class_name}`." ) if dtype == DataType.bytes and not codec_class_name == "VLenBytesCodec": raise ValueError( f"For bytes dtype, ArrayBytesCodec must be `VLenBytesCodec`, got `{codec_class_name}`." ) def parse_dimension_names(data: object) -> tuple[str | None, ...] | None: if data is None: return data elif isinstance(data, Iterable) and all(isinstance(x, type(None) | str) for x in data): return tuple(data) else: msg = f"Expected either None or a iterable of str, got {type(data)}" raise TypeError(msg) def parse_storage_transformers(data: object) -> tuple[dict[str, JSON], ...]: """ Parse storage_transformers. Zarr python cannot use storage transformers at this time, so this function doesn't attempt to validate them. """ if data is None: return () if isinstance(data, Iterable): if len(tuple(data)) >= 1: return data # type: ignore[return-value] else: return () raise TypeError( f"Invalid storage_transformers. Expected an iterable of dicts. Got {type(data)} instead." ) class V3JsonEncoder(json.JSONEncoder): def __init__( self, *, skipkeys: bool = False, ensure_ascii: bool = True, check_circular: bool = True, allow_nan: bool = True, sort_keys: bool = False, indent: int | None = None, separators: tuple[str, str] | None = None, default: Callable[[object], object] | None = None, ) -> None: if indent is None: indent = config.get("json_indent") super().__init__( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, sort_keys=sort_keys, indent=indent, separators=separators, default=default, ) def default(self, o: object) -> Any: if isinstance(o, np.dtype): return str(o) if np.isscalar(o): out: Any if hasattr(o, "dtype") and o.dtype.kind == "M" and hasattr(o, "view"): # https://github.com/zarr-developers/zarr-python/issues/2119 # `.item()` on a datetime type might or might not return an # integer, depending on the value. # Explicitly cast to an int first, and then grab .item() out = o.view("i8").item() else: # convert numpy scalar to python type, and pass # python types through out = getattr(o, "item", lambda: o)() if isinstance(out, complex): # python complex types are not JSON serializable, so we use the # serialization defined in the zarr v3 spec return _replace_special_floats([out.real, out.imag]) elif np.isnan(out): return "NaN" elif np.isinf(out): return "Infinity" if out > 0 else "-Infinity" return out elif isinstance(o, Enum): return o.name # this serializes numcodecs compressors # todo: implement to_dict for codecs elif isinstance(o, numcodecs.abc.Codec): config: dict[str, Any] = o.get_config() return config else: return super().default(o) def _replace_special_floats(obj: object) -> Any: """Helper function to replace NaN/Inf/-Inf values with special strings Note: this cannot be done in the V3JsonEncoder because Python's `json.dumps` optimistically converts NaN/Inf values to special types outside of the encoding step. """ if isinstance(obj, float): if np.isnan(obj): return "NaN" elif np.isinf(obj): return "Infinity" if obj > 0 else "-Infinity" elif isinstance(obj, dict): # Recursively replace in dictionaries return {k: _replace_special_floats(v) for k, v in obj.items()} elif isinstance(obj, list): # Recursively replace in lists return [_replace_special_floats(item) for item in obj] return obj class ArrayV3MetadataDict(TypedDict): """ A typed dictionary model for zarr v3 metadata. """ zarr_format: Literal[3] attributes: dict[str, JSON] @dataclass(frozen=True, kw_only=True) class ArrayV3Metadata(Metadata): shape: ChunkCoords data_type: DataType chunk_grid: ChunkGrid chunk_key_encoding: ChunkKeyEncoding fill_value: Any codecs: tuple[Codec, ...] attributes: dict[str, Any] = field(default_factory=dict) dimension_names: tuple[str, ...] | None = None zarr_format: Literal[3] = field(default=3, init=False) node_type: Literal["array"] = field(default="array", init=False) storage_transformers: tuple[dict[str, JSON], ...] def __init__( self, *, shape: Iterable[int], data_type: npt.DTypeLike | DataType, chunk_grid: dict[str, JSON] | ChunkGrid, chunk_key_encoding: ChunkKeyEncodingLike, fill_value: Any, codecs: Iterable[Codec | dict[str, JSON]], attributes: dict[str, JSON] | None, dimension_names: Iterable[str] | None, storage_transformers: Iterable[dict[str, JSON]] | None = None, ) -> None: """ Because the class is a frozen dataclass, we set attributes using object.__setattr__ """ shape_parsed = parse_shapelike(shape) data_type_parsed = DataType.parse(data_type) chunk_grid_parsed = ChunkGrid.from_dict(chunk_grid) chunk_key_encoding_parsed = ChunkKeyEncoding.from_dict(chunk_key_encoding) dimension_names_parsed = parse_dimension_names(dimension_names) if fill_value is None: fill_value = default_fill_value(data_type_parsed) # we pass a string here rather than an enum to make mypy happy fill_value_parsed = parse_fill_value( fill_value, dtype=cast(ALL_DTYPES, data_type_parsed.value) ) attributes_parsed = parse_attributes(attributes) codecs_parsed_partial = parse_codecs(codecs) storage_transformers_parsed = parse_storage_transformers(storage_transformers) array_spec = ArraySpec( shape=shape_parsed, dtype=data_type_parsed.to_numpy(), fill_value=fill_value_parsed, config=ArrayConfig.from_dict({}), # TODO: config is not needed here. prototype=default_buffer_prototype(), # TODO: prototype is not needed here. ) codecs_parsed = tuple(c.evolve_from_array_spec(array_spec) for c in codecs_parsed_partial) validate_codecs(codecs_parsed_partial, data_type_parsed) object.__setattr__(self, "shape", shape_parsed) object.__setattr__(self, "data_type", data_type_parsed) object.__setattr__(self, "chunk_grid", chunk_grid_parsed) object.__setattr__(self, "chunk_key_encoding", chunk_key_encoding_parsed) object.__setattr__(self, "codecs", codecs_parsed) object.__setattr__(self, "dimension_names", dimension_names_parsed) object.__setattr__(self, "fill_value", fill_value_parsed) object.__setattr__(self, "attributes", attributes_parsed) object.__setattr__(self, "storage_transformers", storage_transformers_parsed) self._validate_metadata() def _validate_metadata(self) -> None: if isinstance(self.chunk_grid, RegularChunkGrid) and len(self.shape) != len( self.chunk_grid.chunk_shape ): raise ValueError( "`chunk_shape` and `shape` need to have the same number of dimensions." ) if self.dimension_names is not None and len(self.shape) != len(self.dimension_names): raise ValueError( "`dimension_names` and `shape` need to have the same number of dimensions." ) if self.fill_value is None: raise ValueError("`fill_value` is required.") for codec in self.codecs: codec.validate( shape=self.shape, dtype=self.data_type.to_numpy(), chunk_grid=self.chunk_grid ) @property def dtype(self) -> np.dtype[Any]: """Interpret Zarr dtype as NumPy dtype""" return self.data_type.to_numpy() @property def ndim(self) -> int: return len(self.shape) @property def chunks(self) -> ChunkCoords: if isinstance(self.chunk_grid, RegularChunkGrid): from zarr.codecs.sharding import ShardingCodec if len(self.codecs) == 1 and isinstance(self.codecs[0], ShardingCodec): sharding_codec = self.codecs[0] assert isinstance(sharding_codec, ShardingCodec) # for mypy return sharding_codec.chunk_shape else: return self.chunk_grid.chunk_shape msg = ( f"The `chunks` attribute is only defined for arrays using `RegularChunkGrid`." f"This array has a {self.chunk_grid} instead." ) raise NotImplementedError(msg) @property def shards(self) -> ChunkCoords | None: if isinstance(self.chunk_grid, RegularChunkGrid): from zarr.codecs.sharding import ShardingCodec if len(self.codecs) == 1 and isinstance(self.codecs[0], ShardingCodec): return self.chunk_grid.chunk_shape else: return None msg = ( f"The `shards` attribute is only defined for arrays using `RegularChunkGrid`." f"This array has a {self.chunk_grid} instead." ) raise NotImplementedError(msg) @property def inner_codecs(self) -> tuple[Codec, ...]: if isinstance(self.chunk_grid, RegularChunkGrid): from zarr.codecs.sharding import ShardingCodec if len(self.codecs) == 1 and isinstance(self.codecs[0], ShardingCodec): return self.codecs[0].codecs return self.codecs def get_chunk_spec( self, _chunk_coords: ChunkCoords, array_config: ArrayConfig, prototype: BufferPrototype ) -> ArraySpec: assert isinstance(self.chunk_grid, RegularChunkGrid), ( "Currently, only regular chunk grid is supported" ) return ArraySpec( shape=self.chunk_grid.chunk_shape, dtype=self.dtype, fill_value=self.fill_value, config=array_config, prototype=prototype, ) def encode_chunk_key(self, chunk_coords: ChunkCoords) -> str: return self.chunk_key_encoding.encode_chunk_key(chunk_coords) def to_buffer_dict(self, prototype: BufferPrototype) -> dict[str, Buffer]: d = _replace_special_floats(self.to_dict()) return {ZARR_JSON: prototype.buffer.from_bytes(json.dumps(d, cls=V3JsonEncoder).encode())} @classmethod def from_dict(cls, data: dict[str, JSON]) -> Self: # make a copy because we are modifying the dict _data = data.copy() # check that the zarr_format attribute is correct _ = parse_zarr_format(_data.pop("zarr_format")) # check that the node_type attribute is correct _ = parse_node_type_array(_data.pop("node_type")) # check that the data_type attribute is valid data_type = DataType.parse(_data.pop("data_type")) # dimension_names key is optional, normalize missing to `None` _data["dimension_names"] = _data.pop("dimension_names", None) # attributes key is optional, normalize missing to `None` _data["attributes"] = _data.pop("attributes", None) return cls(**_data, data_type=data_type) # type: ignore[arg-type] def to_dict(self) -> dict[str, JSON]: out_dict = super().to_dict() if not isinstance(out_dict, dict): raise TypeError(f"Expected dict. Got {type(out_dict)}.") # if `dimension_names` is `None`, we do not include it in # the metadata document if out_dict["dimension_names"] is None: out_dict.pop("dimension_names") return out_dict def update_shape(self, shape: ChunkCoords) -> Self: return replace(self, shape=shape) def update_attributes(self, attributes: dict[str, JSON]) -> Self: return replace(self, attributes=attributes) # enum Literals can't be used in typing, so we have to restate all of the V3 dtypes as types # https://github.com/python/typing/issues/781 BOOL_DTYPE = Literal["bool"] BOOL = np.bool_ INTEGER_DTYPE = Literal["int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"] INTEGER = np.int8 | np.int16 | np.int32 | np.int64 | np.uint8 | np.uint16 | np.uint32 | np.uint64 FLOAT_DTYPE = Literal["float16", "float32", "float64"] FLOAT = np.float16 | np.float32 | np.float64 COMPLEX_DTYPE = Literal["complex64", "complex128"] COMPLEX = np.complex64 | np.complex128 STRING_DTYPE = Literal["string"] STRING = np.str_ BYTES_DTYPE = Literal["bytes"] BYTES = np.bytes_ ALL_DTYPES = BOOL_DTYPE | INTEGER_DTYPE | FLOAT_DTYPE | COMPLEX_DTYPE | STRING_DTYPE | BYTES_DTYPE @overload def parse_fill_value( fill_value: complex | str | bytes | np.generic | Sequence[Any] | bool, dtype: BOOL_DTYPE, ) -> BOOL: ... @overload def parse_fill_value( fill_value: complex | str | bytes | np.generic | Sequence[Any] | bool, dtype: INTEGER_DTYPE, ) -> INTEGER: ... @overload def parse_fill_value( fill_value: complex | str | bytes | np.generic | Sequence[Any] | bool, dtype: FLOAT_DTYPE, ) -> FLOAT: ... @overload def parse_fill_value( fill_value: complex | str | bytes | np.generic | Sequence[Any] | bool, dtype: COMPLEX_DTYPE, ) -> COMPLEX: ... @overload def parse_fill_value( fill_value: complex | str | bytes | np.generic | Sequence[Any] | bool, dtype: STRING_DTYPE, ) -> STRING: ... @overload def parse_fill_value( fill_value: complex | str | bytes | np.generic | Sequence[Any] | bool, dtype: BYTES_DTYPE, ) -> BYTES: ... def parse_fill_value( fill_value: Any, dtype: ALL_DTYPES, ) -> Any: """ Parse `fill_value`, a potential fill value, into an instance of `dtype`, a data type. If `fill_value` is `None`, then this function will return the result of casting the value 0 to the provided data type. Otherwise, `fill_value` will be cast to the provided data type. Note that some numpy dtypes use very permissive casting rules. For example, `np.bool_({'not remotely a bool'})` returns `True`. Thus this function should not be used for validating that the provided fill value is a valid instance of the data type. Parameters ---------- fill_value : Any A potential fill value. dtype : str A valid Zarr format 3 DataType. Returns ------- A scalar instance of `dtype` """ data_type = DataType(dtype) if fill_value is None: raise ValueError("Fill value cannot be None") if data_type == DataType.string: return np.str_(fill_value) if data_type == DataType.bytes: return np.bytes_(fill_value) # the rest are numeric types np_dtype = cast(np.dtype[Any], data_type.to_numpy()) if isinstance(fill_value, Sequence) and not isinstance(fill_value, str): if data_type in (DataType.complex64, DataType.complex128): if len(fill_value) == 2: decoded_fill_value = tuple( SPECIAL_FLOATS_ENCODED.get(value, value) for value in fill_value ) # complex datatypes serialize to JSON arrays with two elements return np_dtype.type(complex(*decoded_fill_value)) else: msg = ( f"Got an invalid fill value for complex data type {data_type.value}." f"Expected a sequence with 2 elements, but {fill_value!r} has " f"length {len(fill_value)}." ) raise ValueError(msg) msg = f"Cannot parse non-string sequence {fill_value!r} as a scalar with type {data_type.value}." raise TypeError(msg) # Cast the fill_value to the given dtype try: # This warning filter can be removed after Zarr supports numpy>=2.0 # The warning is saying that the future behavior of out of bounds casting will be to raise # an OverflowError. In the meantime, we allow overflow and catch cases where # fill_value != casted_value below. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) casted_value = np.dtype(np_dtype).type(fill_value) except (ValueError, OverflowError, TypeError) as e: raise ValueError(f"fill value {fill_value!r} is not valid for dtype {data_type}") from e # Check if the value is still representable by the dtype if (fill_value == "NaN" and np.isnan(casted_value)) or ( fill_value in ["Infinity", "-Infinity"] and not np.isfinite(casted_value) ): pass elif np_dtype.kind == "f": # float comparison is not exact, especially when dtype str | bytes | np.generic: if dtype == DataType.string: return "" elif dtype == DataType.bytes: return b"" else: np_dtype = dtype.to_numpy() np_dtype = cast(np.dtype[Any], np_dtype) return np_dtype.type(0) # type: ignore[misc] # For type checking _bool = bool class DataType(Enum): bool = "bool" int8 = "int8" int16 = "int16" int32 = "int32" int64 = "int64" uint8 = "uint8" uint16 = "uint16" uint32 = "uint32" uint64 = "uint64" float16 = "float16" float32 = "float32" float64 = "float64" complex64 = "complex64" complex128 = "complex128" string = "string" bytes = "bytes" @property def byte_count(self) -> int | None: data_type_byte_counts = { DataType.bool: 1, DataType.int8: 1, DataType.int16: 2, DataType.int32: 4, DataType.int64: 8, DataType.uint8: 1, DataType.uint16: 2, DataType.uint32: 4, DataType.uint64: 8, DataType.float16: 2, DataType.float32: 4, DataType.float64: 8, DataType.complex64: 8, DataType.complex128: 16, } try: return data_type_byte_counts[self] except KeyError: # string and bytes have variable length return None @property def has_endianness(self) -> _bool: return self.byte_count is not None and self.byte_count != 1 def to_numpy_shortname(self) -> str: data_type_to_numpy = { DataType.bool: "bool", DataType.int8: "i1", DataType.int16: "i2", DataType.int32: "i4", DataType.int64: "i8", DataType.uint8: "u1", DataType.uint16: "u2", DataType.uint32: "u4", DataType.uint64: "u8", DataType.float16: "f2", DataType.float32: "f4", DataType.float64: "f8", DataType.complex64: "c8", DataType.complex128: "c16", } return data_type_to_numpy[self] def to_numpy(self) -> np.dtypes.StringDType | np.dtypes.ObjectDType | np.dtype[Any]: # note: it is not possible to round trip DataType <-> np.dtype # due to the fact that DataType.string and DataType.bytes both # generally return np.dtype("O") from this function, even though # they can originate as fixed-length types (e.g. " DataType: if dtype.kind in "UT": return DataType.string elif dtype.kind == "S": return DataType.bytes elif not _NUMPY_SUPPORTS_VLEN_STRING and dtype.kind == "O": # numpy < 2.0 does not support vlen string dtype # so we fall back on object array of strings return DataType.string dtype_to_data_type = { "|b1": "bool", "bool": "bool", "|i1": "int8", " DataType: if dtype is None: return DataType[DEFAULT_DTYPE] if isinstance(dtype, DataType): return dtype try: return DataType(dtype) except ValueError: pass try: dtype = np.dtype(dtype) except (ValueError, TypeError) as e: raise ValueError(f"Invalid Zarr format 3 data_type: {dtype}") from e # check that this is a valid v3 data_type try: data_type = DataType.from_numpy(dtype) except KeyError as e: raise ValueError(f"Invalid Zarr format 3 data_type: {dtype}") from e return data_type zarr-python-3.0.6/src/zarr/core/strings.py000066400000000000000000000067151476711733500206040ustar00rootroot00000000000000"""This module contains utilities for working with string arrays across different versions of Numpy. """ from typing import Any, Union, cast from warnings import warn import numpy as np # _STRING_DTYPE is the in-memory datatype that will be used for V3 string arrays # when reading data back from Zarr. # Any valid string-like datatype should be fine for *setting* data. _STRING_DTYPE: Union["np.dtypes.StringDType", "np.dtypes.ObjectDType"] _NUMPY_SUPPORTS_VLEN_STRING: bool def cast_array( data: np.ndarray[Any, np.dtype[Any]], ) -> np.ndarray[Any, Union["np.dtypes.StringDType", "np.dtypes.ObjectDType"]]: raise NotImplementedError try: # this new vlen string dtype was added in NumPy 2.0 _STRING_DTYPE = np.dtypes.StringDType() _NUMPY_SUPPORTS_VLEN_STRING = True def cast_array( data: np.ndarray[Any, np.dtype[Any]], ) -> np.ndarray[Any, np.dtypes.StringDType | np.dtypes.ObjectDType]: out = data.astype(_STRING_DTYPE, copy=False) return cast(np.ndarray[Any, np.dtypes.StringDType], out) except AttributeError: # if not available, we fall back on an object array of strings, as in Zarr < 3 _STRING_DTYPE = np.dtypes.ObjectDType() _NUMPY_SUPPORTS_VLEN_STRING = False def cast_array( data: np.ndarray[Any, np.dtype[Any]], ) -> np.ndarray[Any, Union["np.dtypes.StringDType", "np.dtypes.ObjectDType"]]: out = data.astype(_STRING_DTYPE, copy=False) return cast(np.ndarray[Any, np.dtypes.ObjectDType], out) def cast_to_string_dtype( data: np.ndarray[Any, np.dtype[Any]], safe: bool = False ) -> np.ndarray[Any, Union["np.dtypes.StringDType", "np.dtypes.ObjectDType"]]: """Take any data and attempt to cast to to our preferred string dtype. data : np.ndarray The data to cast safe : bool If True, do not issue a warning if the data is cast from object to string dtype. """ if np.issubdtype(data.dtype, np.str_): # legacy fixed-width string type (e.g. "= 2.", stacklevel=2, ) return cast_array(data) raise ValueError(f"Cannot cast dtype {data.dtype} to string dtype") zarr-python-3.0.6/src/zarr/core/sync.py000066400000000000000000000163451476711733500200670ustar00rootroot00000000000000from __future__ import annotations import asyncio import atexit import logging import os import threading from concurrent.futures import ThreadPoolExecutor, wait from typing import TYPE_CHECKING, Any, TypeVar from typing_extensions import ParamSpec from zarr.core.config import config if TYPE_CHECKING: from collections.abc import AsyncIterator, Awaitable, Callable, Coroutine from typing import Any logger = logging.getLogger(__name__) P = ParamSpec("P") T = TypeVar("T") # From https://github.com/fsspec/filesystem_spec/blob/master/fsspec/asyn.py iothread: list[threading.Thread | None] = [None] # dedicated IO thread loop: list[asyncio.AbstractEventLoop | None] = [ None ] # global event loop for any non-async instance _lock: threading.Lock | None = None # global lock placeholder _executor: ThreadPoolExecutor | None = None # global executor placeholder class SyncError(Exception): pass def _get_lock() -> threading.Lock: """Allocate or return a threading lock. The lock is allocated on first use to allow setting one lock per forked process. """ global _lock if not _lock: _lock = threading.Lock() return _lock def _get_executor() -> ThreadPoolExecutor: """Return Zarr Thread Pool Executor The executor is allocated on first use. """ global _executor if not _executor: max_workers = config.get("threading.max_workers", None) logger.debug("Creating Zarr ThreadPoolExecutor with max_workers=%s", max_workers) _executor = ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix="zarr_pool") _get_loop().set_default_executor(_executor) return _executor def cleanup_resources() -> None: global _executor if _executor: _executor.shutdown(wait=True, cancel_futures=True) _executor = None if loop[0] is not None: with _get_lock(): # Stop the event loop safely loop[0].call_soon_threadsafe(loop[0].stop) # Stop loop from another thread if iothread[0] is not None: iothread[0].join(timeout=0.2) # Add a timeout to avoid hanging if iothread[0].is_alive(): logger.warning( "Thread did not finish cleanly; forcefully closing the event loop." ) # Forcefully close the event loop to release resources loop[0].close() # dereference the loop and iothread loop[0] = None iothread[0] = None atexit.register(cleanup_resources) def reset_resources_after_fork() -> None: """ Ensure that global resources are reset after a fork. Without this function, forked processes will retain invalid references to the parent process's resources. """ global loop, iothread, _executor # These lines are excluded from coverage because this function only runs in a child process, # which is not observed by the test coverage instrumentation. Despite the apparent lack of # test coverage, this function should be adequately tested by any test that uses Zarr IO with # multiprocessing. loop[0] = None # pragma: no cover iothread[0] = None # pragma: no cover _executor = None # pragma: no cover # this is only available on certain operating systems if hasattr(os, "register_at_fork"): os.register_at_fork(after_in_child=reset_resources_after_fork) async def _runner(coro: Coroutine[Any, Any, T]) -> T | BaseException: """ Await a coroutine and return the result of running it. If awaiting the coroutine raises an exception, the exception will be returned. """ try: return await coro except Exception as ex: return ex def sync( coro: Coroutine[Any, Any, T], loop: asyncio.AbstractEventLoop | None = None, timeout: float | None = None, ) -> T: """ Make loop run coroutine until it returns. Runs in other thread Examples -------- >>> sync(async_function(), existing_loop) """ if loop is None: # NB: if the loop is not running *yet*, it is OK to submit work # and we will wait for it loop = _get_loop() if _executor is None and config.get("threading.max_workers", None) is not None: # trigger executor creation and attach to loop _ = _get_executor() if not isinstance(loop, asyncio.AbstractEventLoop): raise TypeError(f"loop cannot be of type {type(loop)}") if loop.is_closed(): raise RuntimeError("Loop is not running") try: loop0 = asyncio.events.get_running_loop() if loop0 is loop: raise SyncError("Calling sync() from within a running loop") except RuntimeError: pass future = asyncio.run_coroutine_threadsafe(_runner(coro), loop) finished, unfinished = wait([future], return_when=asyncio.ALL_COMPLETED, timeout=timeout) if len(unfinished) > 0: raise TimeoutError(f"Coroutine {coro} failed to finish within {timeout} s") assert len(finished) == 1 return_result = next(iter(finished)).result() if isinstance(return_result, BaseException): raise return_result else: return return_result def _get_loop() -> asyncio.AbstractEventLoop: """Create or return the default fsspec IO loop The loop will be running on a separate thread. """ if loop[0] is None: with _get_lock(): # repeat the check just in case the loop got filled between the # previous two calls from another thread if loop[0] is None: logger.debug("Creating Zarr event loop") new_loop = asyncio.new_event_loop() loop[0] = new_loop iothread[0] = threading.Thread(target=new_loop.run_forever, name="zarr_io") assert iothread[0] is not None iothread[0].daemon = True iothread[0].start() assert loop[0] is not None return loop[0] async def _collect_aiterator(data: AsyncIterator[T]) -> tuple[T, ...]: """ Collect an entire async iterator into a tuple """ result = [x async for x in data] return tuple(result) def collect_aiterator(data: AsyncIterator[T]) -> tuple[T, ...]: """ Synchronously collect an entire async iterator into a tuple. """ return sync(_collect_aiterator(data)) class SyncMixin: def _sync(self, coroutine: Coroutine[Any, Any, T]) -> T: # TODO: refactor this to to take *args and **kwargs and pass those to the method # this should allow us to better type the sync wrapper return sync( coroutine, timeout=config.get("async.timeout"), ) def _sync_iter(self, async_iterator: AsyncIterator[T]) -> list[T]: async def iter_to_list() -> list[T]: return [item async for item in async_iterator] return self._sync(iter_to_list()) async def _with_semaphore( func: Callable[[], Awaitable[T]], semaphore: asyncio.Semaphore | None = None ) -> T: """ Await the result of invoking the no-argument-callable ``func`` within the context manager provided by a Semaphore, if one is provided. Otherwise, just await the result of invoking ``func``. """ if semaphore is None: return await func() async with semaphore: return await func() zarr-python-3.0.6/src/zarr/core/sync_group.py000066400000000000000000000136531476711733500213020ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING from zarr.core.group import Group, GroupMetadata, _parse_async_node from zarr.core.group import create_hierarchy as create_hierarchy_async from zarr.core.group import create_nodes as create_nodes_async from zarr.core.group import create_rooted_hierarchy as create_rooted_hierarchy_async from zarr.core.group import get_node as get_node_async from zarr.core.sync import _collect_aiterator, sync if TYPE_CHECKING: from collections.abc import Iterator from zarr.abc.store import Store from zarr.core.array import Array from zarr.core.common import ZarrFormat from zarr.core.metadata import ArrayV2Metadata, ArrayV3Metadata def create_nodes( *, store: Store, nodes: dict[str, GroupMetadata | ArrayV2Metadata | ArrayV3Metadata] ) -> Iterator[tuple[str, Group | Array]]: """Create a collection of arrays and / or groups concurrently. Note: no attempt is made to validate that these arrays and / or groups collectively form a valid Zarr hierarchy. It is the responsibility of the caller of this function to ensure that the ``nodes`` parameter satisfies any correctness constraints. Parameters ---------- store : Store The storage backend to use. nodes : dict[str, GroupMetadata | ArrayV3Metadata | ArrayV2Metadata] A dictionary defining the hierarchy. The keys are the paths of the nodes in the hierarchy, and the values are the metadata of the nodes. The metadata must be either an instance of GroupMetadata, ArrayV3Metadata or ArrayV2Metadata. Yields ------ Group | Array The created nodes. """ coro = create_nodes_async(store=store, nodes=nodes) for key, value in sync(_collect_aiterator(coro)): yield key, _parse_async_node(value) def create_hierarchy( *, store: Store, nodes: dict[str, GroupMetadata | ArrayV2Metadata | ArrayV3Metadata], overwrite: bool = False, ) -> Iterator[tuple[str, Group | Array]]: """ Create a complete zarr hierarchy from a collection of metadata objects. This function will parse its input to ensure that the hierarchy is complete. Any implicit groups will be inserted as needed. For example, an input like ```{'a/b': GroupMetadata}``` will be parsed to ```{'': GroupMetadata, 'a': GroupMetadata, 'b': Groupmetadata}``` After input parsing, this function then creates all the nodes in the hierarchy concurrently. Arrays and Groups are yielded in the order they are created. This order is not stable and should not be relied on. Parameters ---------- store : Store The storage backend to use. nodes : dict[str, GroupMetadata | ArrayV3Metadata | ArrayV2Metadata] A dictionary defining the hierarchy. The keys are the paths of the nodes in the hierarchy, relative to the root of the ``Store``. The root of the store can be specified with the empty string ``''``. The values are instances of ``GroupMetadata`` or ``ArrayMetadata``. Note that all values must have the same ``zarr_format`` -- it is an error to mix zarr versions in the same hierarchy. Leading "/" characters from keys will be removed. overwrite : bool Whether to overwrite existing nodes. Defaults to ``False``, in which case an error is raised instead of overwriting an existing array or group. This function will not erase an existing group unless that group is explicitly named in ``nodes``. If ``nodes`` defines implicit groups, e.g. ``{`'a/b/c': GroupMetadata}``, and a group already exists at path ``a``, then this function will leave the group at ``a`` as-is. Yields ------ tuple[str, Group | Array] This function yields (path, node) pairs, in the order the nodes were created. Examples -------- >>> from zarr import create_hierarchy >>> from zarr.storage import MemoryStore >>> from zarr.core.group import GroupMetadata >>> store = MemoryStore() >>> nodes = {'a': GroupMetadata(attributes={'name': 'leaf'})} >>> nodes_created = dict(create_hierarchy(store=store, nodes=nodes)) >>> print(nodes) # {'a': GroupMetadata(attributes={'name': 'leaf'}, zarr_format=3, consolidated_metadata=None, node_type='group')} """ coro = create_hierarchy_async(store=store, nodes=nodes, overwrite=overwrite) for key, value in sync(_collect_aiterator(coro)): yield key, _parse_async_node(value) def create_rooted_hierarchy( *, store: Store, nodes: dict[str, GroupMetadata | ArrayV2Metadata | ArrayV3Metadata], overwrite: bool = False, ) -> Group | Array: """ Create a Zarr hierarchy with a root, and return the root node, which could be a ``Group`` or ``Array`` instance. Parameters ---------- store : Store The storage backend to use. nodes : dict[str, GroupMetadata | ArrayV3Metadata | ArrayV2Metadata] A dictionary defining the hierarchy. The keys are the paths of the nodes in the hierarchy, and the values are the metadata of the nodes. The metadata must be either an instance of GroupMetadata, ArrayV3Metadata or ArrayV2Metadata. overwrite : bool Whether to overwrite existing nodes. Default is ``False``. Returns ------- Group | Array """ async_node = sync(create_rooted_hierarchy_async(store=store, nodes=nodes, overwrite=overwrite)) return _parse_async_node(async_node) def get_node(store: Store, path: str, zarr_format: ZarrFormat) -> Array | Group: """ Get an Array or Group from a path in a Store. Parameters ---------- store : Store The store-like object to read from. path : str The path to the node to read. zarr_format : {2, 3} The zarr format of the node to read. Returns ------- Array | Group """ return _parse_async_node(sync(get_node_async(store=store, path=path, zarr_format=zarr_format))) zarr-python-3.0.6/src/zarr/creation.py000066400000000000000000000013471476711733500177630ustar00rootroot00000000000000""" Helpers for creating arrays. .. warning:: This sub-module is deprecated. All functions here are defined in the top level zarr namespace instead. """ import warnings from zarr.api.synchronous import ( array, create, empty, empty_like, full, full_like, ones, ones_like, open_array, open_like, zeros, zeros_like, ) __all__ = [ "array", "create", "empty", "empty_like", "full", "full_like", "ones", "ones_like", "open_array", "open_like", "zeros", "zeros_like", ] warnings.warn( "zarr.creation is deprecated. " "Import these functions from the top level zarr. namespace instead.", DeprecationWarning, stacklevel=2, ) zarr-python-3.0.6/src/zarr/errors.py000066400000000000000000000031651476711733500174730ustar00rootroot00000000000000from typing import Any __all__ = [ "BaseZarrError", "ContainsArrayAndGroupError", "ContainsArrayError", "ContainsGroupError", "MetadataValidationError", "NodeTypeValidationError", ] class BaseZarrError(ValueError): """ Base error which all zarr errors are sub-classed from. """ _msg = "" def __init__(self, *args: Any) -> None: super().__init__(self._msg.format(*args)) class ContainsGroupError(BaseZarrError): """Raised when a group already exists at a certain path.""" _msg = "A group exists in store {!r} at path {!r}." class ContainsArrayError(BaseZarrError): """Raised when an array already exists at a certain path.""" _msg = "An array exists in store {!r} at path {!r}." class ContainsArrayAndGroupError(BaseZarrError): """Raised when both array and group metadata are found at the same path.""" _msg = ( "Array and group metadata documents (.zarray and .zgroup) were both found in store " "{!r} at path {!r}. " "Only one of these files may be present in a given directory / prefix. " "Remove the .zarray file, or the .zgroup file, or both." ) class MetadataValidationError(BaseZarrError): """Raised when the Zarr metadata is invalid in some way""" _msg = "Invalid value for '{}'. Expected '{}'. Got '{}'." class NodeTypeValidationError(MetadataValidationError): """ Specialized exception when the node_type of the metadata document is incorrect.. This can be raised when the value is invalid or unexpected given the context, for example an 'array' node when we expected a 'group'. """ zarr-python-3.0.6/src/zarr/py.typed000066400000000000000000000000001476711733500172650ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/registry.py000066400000000000000000000215551476711733500200320ustar00rootroot00000000000000from __future__ import annotations import warnings from collections import defaultdict from importlib.metadata import entry_points as get_entry_points from typing import TYPE_CHECKING, Any, Generic, TypeVar from zarr.core.config import BadConfigError, config if TYPE_CHECKING: from importlib.metadata import EntryPoint from zarr.abc.codec import ( ArrayArrayCodec, ArrayBytesCodec, BytesBytesCodec, Codec, CodecPipeline, ) from zarr.core.buffer import Buffer, NDBuffer from zarr.core.common import JSON __all__ = [ "Registry", "get_buffer_class", "get_codec_class", "get_ndbuffer_class", "get_pipeline_class", "register_buffer", "register_codec", "register_ndbuffer", "register_pipeline", ] T = TypeVar("T") class Registry(dict[str, type[T]], Generic[T]): def __init__(self) -> None: super().__init__() self.lazy_load_list: list[EntryPoint] = [] def lazy_load(self) -> None: for e in self.lazy_load_list: self.register(e.load()) self.lazy_load_list.clear() def register(self, cls: type[T]) -> None: self[fully_qualified_name(cls)] = cls __codec_registries: dict[str, Registry[Codec]] = defaultdict(Registry) __pipeline_registry: Registry[CodecPipeline] = Registry() __buffer_registry: Registry[Buffer] = Registry() __ndbuffer_registry: Registry[NDBuffer] = Registry() """ The registry module is responsible for managing implementations of codecs, pipelines, buffers and ndbuffers and collecting them from entrypoints. The implementation used is determined by the config. """ def _collect_entrypoints() -> list[Registry[Any]]: """ Collects codecs, pipelines, buffers and ndbuffers from entrypoints. Entry points can either be single items or groups of items. Allowed syntax for entry_points.txt is e.g. [zarr.codecs] gzip = package:EntrypointGzipCodec1 [zarr.codecs.gzip] some_name = package:EntrypointGzipCodec2 another = package:EntrypointGzipCodec3 [zarr] buffer = package:TestBuffer1 [zarr.buffer] xyz = package:TestBuffer2 abc = package:TestBuffer3 ... """ entry_points = get_entry_points() __buffer_registry.lazy_load_list.extend(entry_points.select(group="zarr.buffer")) __buffer_registry.lazy_load_list.extend(entry_points.select(group="zarr", name="buffer")) __ndbuffer_registry.lazy_load_list.extend(entry_points.select(group="zarr.ndbuffer")) __ndbuffer_registry.lazy_load_list.extend(entry_points.select(group="zarr", name="ndbuffer")) __pipeline_registry.lazy_load_list.extend(entry_points.select(group="zarr.codec_pipeline")) __pipeline_registry.lazy_load_list.extend( entry_points.select(group="zarr", name="codec_pipeline") ) for e in entry_points.select(group="zarr.codecs"): __codec_registries[e.name].lazy_load_list.append(e) for group in entry_points.groups: if group.startswith("zarr.codecs."): codec_name = group.split(".")[2] __codec_registries[codec_name].lazy_load_list.extend(entry_points.select(group=group)) return [ *__codec_registries.values(), __pipeline_registry, __buffer_registry, __ndbuffer_registry, ] def _reload_config() -> None: config.refresh() def fully_qualified_name(cls: type) -> str: module = cls.__module__ return module + "." + cls.__qualname__ def register_codec(key: str, codec_cls: type[Codec]) -> None: if key not in __codec_registries: __codec_registries[key] = Registry() __codec_registries[key].register(codec_cls) def register_pipeline(pipe_cls: type[CodecPipeline]) -> None: __pipeline_registry.register(pipe_cls) def register_ndbuffer(cls: type[NDBuffer]) -> None: __ndbuffer_registry.register(cls) def register_buffer(cls: type[Buffer]) -> None: __buffer_registry.register(cls) def get_codec_class(key: str, reload_config: bool = False) -> type[Codec]: if reload_config: _reload_config() if key in __codec_registries: # logger.debug("Auto loading codec '%s' from entrypoint", codec_id) __codec_registries[key].lazy_load() codec_classes = __codec_registries[key] if not codec_classes: raise KeyError(key) config_entry = config.get("codecs", {}).get(key) if config_entry is None: if len(codec_classes) == 1: return next(iter(codec_classes.values())) warnings.warn( f"Codec '{key}' not configured in config. Selecting any implementation.", stacklevel=2 ) return list(codec_classes.values())[-1] selected_codec_cls = codec_classes[config_entry] if selected_codec_cls: return selected_codec_cls raise KeyError(key) def _resolve_codec(data: dict[str, JSON]) -> Codec: """ Get a codec instance from a dict representation of that codec. """ # TODO: narrow the type of the input to only those dicts that map on to codec class instances. return get_codec_class(data["name"]).from_dict(data) # type: ignore[arg-type] def _parse_bytes_bytes_codec(data: dict[str, JSON] | Codec) -> BytesBytesCodec: """ Normalize the input to a ``BytesBytesCodec`` instance. If the input is already a ``BytesBytesCodec``, it is returned as is. If the input is a dict, it is converted to a ``BytesBytesCodec`` instance via the ``_resolve_codec`` function. """ from zarr.abc.codec import BytesBytesCodec if isinstance(data, dict): result = _resolve_codec(data) if not isinstance(result, BytesBytesCodec): msg = f"Expected a dict representation of a BytesBytesCodec; got a dict representation of a {type(result)} instead." raise TypeError(msg) else: if not isinstance(data, BytesBytesCodec): raise TypeError(f"Expected a BytesBytesCodec. Got {type(data)} instead.") result = data return result def _parse_array_bytes_codec(data: dict[str, JSON] | Codec) -> ArrayBytesCodec: """ Normalize the input to a ``ArrayBytesCodec`` instance. If the input is already a ``ArrayBytesCodec``, it is returned as is. If the input is a dict, it is converted to a ``ArrayBytesCodec`` instance via the ``_resolve_codec`` function. """ from zarr.abc.codec import ArrayBytesCodec if isinstance(data, dict): result = _resolve_codec(data) if not isinstance(result, ArrayBytesCodec): msg = f"Expected a dict representation of a ArrayBytesCodec; got a dict representation of a {type(result)} instead." raise TypeError(msg) else: if not isinstance(data, ArrayBytesCodec): raise TypeError(f"Expected a ArrayBytesCodec. Got {type(data)} instead.") result = data return result def _parse_array_array_codec(data: dict[str, JSON] | Codec) -> ArrayArrayCodec: """ Normalize the input to a ``ArrayArrayCodec`` instance. If the input is already a ``ArrayArrayCodec``, it is returned as is. If the input is a dict, it is converted to a ``ArrayArrayCodec`` instance via the ``_resolve_codec`` function. """ from zarr.abc.codec import ArrayArrayCodec if isinstance(data, dict): result = _resolve_codec(data) if not isinstance(result, ArrayArrayCodec): msg = f"Expected a dict representation of a ArrayArrayCodec; got a dict representation of a {type(result)} instead." raise TypeError(msg) else: if not isinstance(data, ArrayArrayCodec): raise TypeError(f"Expected a ArrayArrayCodec. Got {type(data)} instead.") result = data return result def get_pipeline_class(reload_config: bool = False) -> type[CodecPipeline]: if reload_config: _reload_config() __pipeline_registry.lazy_load() path = config.get("codec_pipeline.path") pipeline_class = __pipeline_registry.get(path) if pipeline_class: return pipeline_class raise BadConfigError( f"Pipeline class '{path}' not found in registered pipelines: {list(__pipeline_registry)}." ) def get_buffer_class(reload_config: bool = False) -> type[Buffer]: if reload_config: _reload_config() __buffer_registry.lazy_load() path = config.get("buffer") buffer_class = __buffer_registry.get(path) if buffer_class: return buffer_class raise BadConfigError( f"Buffer class '{path}' not found in registered buffers: {list(__buffer_registry)}." ) def get_ndbuffer_class(reload_config: bool = False) -> type[NDBuffer]: if reload_config: _reload_config() __ndbuffer_registry.lazy_load() path = config.get("ndbuffer") ndbuffer_class = __ndbuffer_registry.get(path) if ndbuffer_class: return ndbuffer_class raise BadConfigError( f"NDBuffer class '{path}' not found in registered buffers: {list(__ndbuffer_registry)}." ) _collect_entrypoints() zarr-python-3.0.6/src/zarr/storage/000077500000000000000000000000001476711733500172445ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/storage/__init__.py000066400000000000000000000022771476711733500213650ustar00rootroot00000000000000import sys import warnings from types import ModuleType from typing import Any from zarr.storage._common import StoreLike, StorePath from zarr.storage._fsspec import FsspecStore from zarr.storage._local import LocalStore from zarr.storage._logging import LoggingStore from zarr.storage._memory import GpuMemoryStore, MemoryStore from zarr.storage._wrapper import WrapperStore from zarr.storage._zip import ZipStore __all__ = [ "FsspecStore", "GpuMemoryStore", "LocalStore", "LoggingStore", "MemoryStore", "StoreLike", "StorePath", "WrapperStore", "ZipStore", ] class VerboseModule(ModuleType): def __setattr__(self, attr: str, value: Any) -> None: if attr == "default_compressor": warnings.warn( "setting zarr.storage.default_compressor is deprecated, use " "zarr.config to configure array.v2_default_compressor " "e.g. config.set({'codecs.zstd':'numcodecs.Zstd', 'array.v2_default_compressor.numeric': 'zstd'})", DeprecationWarning, stacklevel=1, ) else: super().__setattr__(attr, value) sys.modules[__name__].__class__ = VerboseModule zarr-python-3.0.6/src/zarr/storage/_common.py000066400000000000000000000406671476711733500212620ustar00rootroot00000000000000from __future__ import annotations import json from pathlib import Path from typing import TYPE_CHECKING, Any, Literal from zarr.abc.store import ByteRequest, Store from zarr.core.buffer import Buffer, default_buffer_prototype from zarr.core.common import ZARR_JSON, ZARRAY_JSON, ZGROUP_JSON, AccessModeLiteral, ZarrFormat from zarr.errors import ContainsArrayAndGroupError, ContainsArrayError, ContainsGroupError from zarr.storage._local import LocalStore from zarr.storage._memory import MemoryStore from zarr.storage._utils import normalize_path if TYPE_CHECKING: from zarr.core.buffer import BufferPrototype def _dereference_path(root: str, path: str) -> str: assert isinstance(root, str) assert isinstance(path, str) root = root.rstrip("/") path = f"{root}/{path}" if root else path return path.rstrip("/") class StorePath: """ Path-like interface for a Store. Parameters ---------- store : Store The store to use. path : str The path within the store. """ store: Store path: str def __init__(self, store: Store, path: str = "") -> None: self.store = store self.path = normalize_path(path) @property def read_only(self) -> bool: return self.store.read_only @classmethod async def open( cls, store: Store, path: str, mode: AccessModeLiteral | None = None ) -> StorePath: """ Open StorePath based on the provided mode. * If the mode is 'w-' and the StorePath contains keys, raise a FileExistsError. * If the mode is 'w', delete all keys nested within the StorePath * If the mode is 'a', 'r', or 'r+', do nothing Parameters ---------- mode : AccessModeLiteral The mode to use when initializing the store path. Raises ------ FileExistsError If the mode is 'w-' and the store path already exists. """ await store._ensure_open() self = cls(store, path) # fastpath if mode is None if mode is None: return self if store.read_only and mode != "r": raise ValueError(f"Store is read-only but mode is '{mode}'") match mode: case "w-": if not await self.is_empty(): msg = ( f"{self} is not empty, but `mode` is set to 'w-'." "Either remove the existing objects in storage," "or set `mode` to a value that handles pre-existing objects" "in storage, like `a` or `w`." ) raise FileExistsError(msg) case "w": await self.delete_dir() case "a" | "r" | "r+": # No init action pass case _: raise ValueError(f"Invalid mode: {mode}") return self async def get( self, prototype: BufferPrototype | None = None, byte_range: ByteRequest | None = None, ) -> Buffer | None: """ Read bytes from the store. Parameters ---------- prototype : BufferPrototype, optional The buffer prototype to use when reading the bytes. byte_range : ByteRequest, optional The range of bytes to read. Returns ------- buffer : Buffer or None The read bytes, or None if the key does not exist. """ if prototype is None: prototype = default_buffer_prototype() return await self.store.get(self.path, prototype=prototype, byte_range=byte_range) async def set(self, value: Buffer, byte_range: ByteRequest | None = None) -> None: """ Write bytes to the store. Parameters ---------- value : Buffer The buffer to write. byte_range : ByteRequest, optional The range of bytes to write. If None, the entire buffer is written. Raises ------ NotImplementedError If `byte_range` is not None, because Store.set does not support partial writes yet. """ if byte_range is not None: raise NotImplementedError("Store.set does not have partial writes yet") await self.store.set(self.path, value) async def delete(self) -> None: """ Delete the key from the store. Raises ------ NotImplementedError If the store does not support deletion. """ await self.store.delete(self.path) async def delete_dir(self) -> None: """ Delete all keys with the given prefix from the store. """ await self.store.delete_dir(self.path) async def set_if_not_exists(self, default: Buffer) -> None: """ Store a key to ``value`` if the key is not already present. Parameters ---------- default : Buffer The buffer to store if the key is not already present. """ await self.store.set_if_not_exists(self.path, default) async def exists(self) -> bool: """ Check if the key exists in the store. Returns ------- bool True if the key exists in the store, False otherwise. """ return await self.store.exists(self.path) async def is_empty(self) -> bool: """ Check if any keys exist in the store with the given prefix. Returns ------- bool True if no keys exist in the store with the given prefix, False otherwise. """ return await self.store.is_empty(self.path) def __truediv__(self, other: str) -> StorePath: """Combine this store path with another path""" return self.__class__(self.store, _dereference_path(self.path, other)) def __str__(self) -> str: return _dereference_path(str(self.store), self.path) def __repr__(self) -> str: return f"StorePath({self.store.__class__.__name__}, '{self}')" def __eq__(self, other: object) -> bool: """ Check if two StorePath objects are equal. Returns ------- bool True if the two objects are equal, False otherwise. Notes ----- Two StorePath objects are considered equal if their stores are equal and their paths are equal. """ try: return self.store == other.store and self.path == other.path # type: ignore[attr-defined, no-any-return] except Exception: pass return False StoreLike = Store | StorePath | Path | str | dict[str, Buffer] async def make_store_path( store_like: StoreLike | None, *, path: str | None = "", mode: AccessModeLiteral | None = None, storage_options: dict[str, Any] | None = None, ) -> StorePath: """ Convert a `StoreLike` object into a StorePath object. This function takes a `StoreLike` object and returns a `StorePath` object. The `StoreLike` object can be a `Store`, `StorePath`, `Path`, `str`, or `dict[str, Buffer]`. If the `StoreLike` object is a Store or `StorePath`, it is converted to a `StorePath` object. If the `StoreLike` object is a Path or str, it is converted to a LocalStore object and then to a `StorePath` object. If the `StoreLike` object is a dict[str, Buffer], it is converted to a `MemoryStore` object and then to a `StorePath` object. If the `StoreLike` object is None, a `MemoryStore` object is created and converted to a `StorePath` object. If the `StoreLike` object is a str and starts with a protocol, it is converted to a RemoteStore object and then to a `StorePath` object. If the `StoreLike` object is a dict[str, Buffer] and the mode is not None, the `MemoryStore` object is created with the given mode. If the `StoreLike` object is a str and starts with a protocol, the RemoteStore object is created with the given mode and storage options. Parameters ---------- store_like : StoreLike | None The object to convert to a `StorePath` object. path : str | None, optional The path to use when creating the `StorePath` object. If None, the default path is the empty string. mode : StoreAccessMode | None, optional The mode to use when creating the `StorePath` object. If None, the default mode is 'r'. storage_options : dict[str, Any] | None, optional The storage options to use when creating the `RemoteStore` object. If None, the default storage options are used. Returns ------- StorePath The converted StorePath object. Raises ------ TypeError If the StoreLike object is not one of the supported types. """ from zarr.storage._fsspec import FsspecStore # circular import used_storage_options = False path_normalized = normalize_path(path) if isinstance(store_like, StorePath): result = store_like / path_normalized else: assert mode in (None, "r", "r+", "a", "w", "w-") # if mode 'r' was provided, we'll open any new stores as read-only _read_only = mode == "r" if isinstance(store_like, Store): store = store_like elif store_like is None: store = await MemoryStore.open(read_only=_read_only) elif isinstance(store_like, Path): store = await LocalStore.open(root=store_like, read_only=_read_only) elif isinstance(store_like, str): storage_options = storage_options or {} if _is_fsspec_uri(store_like): used_storage_options = True store = FsspecStore.from_url( store_like, storage_options=storage_options, read_only=_read_only ) else: store = await LocalStore.open(root=Path(store_like), read_only=_read_only) elif isinstance(store_like, dict): # We deliberate only consider dict[str, Buffer] here, and not arbitrary mutable mappings. # By only allowing dictionaries, which are in-memory, we know that MemoryStore appropriate. store = await MemoryStore.open(store_dict=store_like, read_only=_read_only) else: msg = f"Unsupported type for store_like: '{type(store_like).__name__}'" # type: ignore[unreachable] raise TypeError(msg) result = await StorePath.open(store, path=path_normalized, mode=mode) if storage_options and not used_storage_options: msg = "'storage_options' was provided but unused. 'storage_options' is only used for fsspec filesystem stores." raise TypeError(msg) return result def _is_fsspec_uri(uri: str) -> bool: """ Check if a URI looks like a non-local fsspec URI. Examples -------- >>> _is_fsspec_uri("s3://bucket") True >>> _is_fsspec_uri("my-directory") False >>> _is_fsspec_uri("local://my-directory") False """ return "://" in uri or ("::" in uri and "local://" not in uri) async def ensure_no_existing_node(store_path: StorePath, zarr_format: ZarrFormat) -> None: """ Check if a store_path is safe for array / group creation. Returns `None` or raises an exception. Parameters ---------- store_path : StorePath The storage location to check. zarr_format : ZarrFormat The Zarr format to check. Raises ------ ContainsArrayError, ContainsGroupError, ContainsArrayAndGroupError """ if zarr_format == 2: extant_node = await _contains_node_v2(store_path) elif zarr_format == 3: extant_node = await _contains_node_v3(store_path) if extant_node == "array": raise ContainsArrayError(store_path.store, store_path.path) elif extant_node == "group": raise ContainsGroupError(store_path.store, store_path.path) elif extant_node == "nothing": return msg = f"Invalid value for extant_node: {extant_node}" # type: ignore[unreachable] raise ValueError(msg) async def _contains_node_v3(store_path: StorePath) -> Literal["array", "group", "nothing"]: """ Check if a store_path contains nothing, an array, or a group. This function returns the string "array", "group", or "nothing" to denote containing an array, a group, or nothing. Parameters ---------- store_path : StorePath The location in storage to check. Returns ------- Literal["array", "group", "nothing"] A string representing the zarr node found at store_path. """ result: Literal["array", "group", "nothing"] = "nothing" extant_meta_bytes = await (store_path / ZARR_JSON).get() # if no metadata document could be loaded, then we just return "nothing" if extant_meta_bytes is not None: try: extant_meta_json = json.loads(extant_meta_bytes.to_bytes()) # avoid constructing a full metadata document here in the name of speed. if extant_meta_json["node_type"] == "array": result = "array" elif extant_meta_json["node_type"] == "group": result = "group" except (KeyError, json.JSONDecodeError): # either of these errors is consistent with no array or group present. pass return result async def _contains_node_v2(store_path: StorePath) -> Literal["array", "group", "nothing"]: """ Check if a store_path contains nothing, an array, a group, or both. If both an array and a group are detected, a `ContainsArrayAndGroup` exception is raised. Otherwise, this function returns the string "array", "group", or "nothing" to denote containing an array, a group, or nothing. Parameters ---------- store_path : StorePath The location in storage to check. Returns ------- Literal["array", "group", "nothing"] A string representing the zarr node found at store_path. """ _array = await contains_array(store_path=store_path, zarr_format=2) _group = await contains_group(store_path=store_path, zarr_format=2) if _array and _group: raise ContainsArrayAndGroupError(store_path.store, store_path.path) elif _array: return "array" elif _group: return "group" else: return "nothing" async def contains_array(store_path: StorePath, zarr_format: ZarrFormat) -> bool: """ Check if an array exists at a given StorePath. Parameters ---------- store_path : StorePath The StorePath to check for an existing group. zarr_format : The zarr format to check for. Returns ------- bool True if the StorePath contains a group, False otherwise. """ if zarr_format == 3: extant_meta_bytes = await (store_path / ZARR_JSON).get() if extant_meta_bytes is None: return False else: try: extant_meta_json = json.loads(extant_meta_bytes.to_bytes()) # we avoid constructing a full metadata document here in the name of speed. if extant_meta_json["node_type"] == "array": return True except (ValueError, KeyError): return False elif zarr_format == 2: return await (store_path / ZARRAY_JSON).exists() msg = f"Invalid zarr_format provided. Got {zarr_format}, expected 2 or 3" raise ValueError(msg) async def contains_group(store_path: StorePath, zarr_format: ZarrFormat) -> bool: """ Check if a group exists at a given StorePath. Parameters ---------- store_path : StorePath The StorePath to check for an existing group. zarr_format : The zarr format to check for. Returns ------- bool True if the StorePath contains a group, False otherwise """ if zarr_format == 3: extant_meta_bytes = await (store_path / ZARR_JSON).get() if extant_meta_bytes is None: return False else: try: extant_meta_json = json.loads(extant_meta_bytes.to_bytes()) # we avoid constructing a full metadata document here in the name of speed. result: bool = extant_meta_json["node_type"] == "group" except (ValueError, KeyError): return False else: return result elif zarr_format == 2: return await (store_path / ZGROUP_JSON).exists() msg = f"Invalid zarr_format provided. Got {zarr_format}, expected 2 or 3" # type: ignore[unreachable] raise ValueError(msg) zarr-python-3.0.6/src/zarr/storage/_fsspec.py000066400000000000000000000321311476711733500212400ustar00rootroot00000000000000from __future__ import annotations import warnings from contextlib import suppress from typing import TYPE_CHECKING, Any from zarr.abc.store import ( ByteRequest, OffsetByteRequest, RangeByteRequest, Store, SuffixByteRequest, ) from zarr.core.buffer import Buffer from zarr.storage._common import _dereference_path if TYPE_CHECKING: from collections.abc import AsyncIterator, Iterable from fsspec.asyn import AsyncFileSystem from zarr.core.buffer import BufferPrototype from zarr.core.common import BytesLike ALLOWED_EXCEPTIONS: tuple[type[Exception], ...] = ( FileNotFoundError, IsADirectoryError, NotADirectoryError, ) class FsspecStore(Store): """ A remote Store based on FSSpec Parameters ---------- fs : AsyncFileSystem The Async FSSpec filesystem to use with this store. read_only : bool Whether the store is read-only path : str The root path of the store. This should be a relative path and must not include the filesystem scheme. allowed_exceptions : tuple[type[Exception], ...] When fetching data, these cases will be deemed to correspond to missing keys. Attributes ---------- fs allowed_exceptions supports_writes supports_deletes supports_partial_writes supports_listing Raises ------ TypeError If the Filesystem does not support async operations. ValueError If the path argument includes a scheme. Warns ----- UserWarning If the file system (fs) was not created with `asynchronous=True`. See Also -------- FsspecStore.from_upath FsspecStore.from_url """ # based on FSSpec supports_writes: bool = True supports_deletes: bool = True supports_partial_writes: bool = False supports_listing: bool = True fs: AsyncFileSystem allowed_exceptions: tuple[type[Exception], ...] def __init__( self, fs: AsyncFileSystem, read_only: bool = False, path: str = "/", allowed_exceptions: tuple[type[Exception], ...] = ALLOWED_EXCEPTIONS, ) -> None: super().__init__(read_only=read_only) self.fs = fs self.path = path self.allowed_exceptions = allowed_exceptions if not self.fs.async_impl: raise TypeError("Filesystem needs to support async operations.") if not self.fs.asynchronous: warnings.warn( f"fs ({fs}) was not created with `asynchronous=True`, this may lead to surprising behavior", stacklevel=2, ) if "://" in path and not path.startswith("http"): # `not path.startswith("http")` is a special case for the http filesystem (¯\_(ツ)_/¯) scheme, _ = path.split("://", maxsplit=1) raise ValueError(f"path argument to FsspecStore must not include scheme ({scheme}://)") @classmethod def from_upath( cls, upath: Any, read_only: bool = False, allowed_exceptions: tuple[type[Exception], ...] = ALLOWED_EXCEPTIONS, ) -> FsspecStore: """ Create a FsspecStore from an upath object. Parameters ---------- upath : UPath The upath to the root of the store. read_only : bool Whether the store is read-only, defaults to False. allowed_exceptions : tuple, optional The exceptions that are allowed to be raised when accessing the store. Defaults to ALLOWED_EXCEPTIONS. Returns ------- FsspecStore """ return cls( fs=upath.fs, path=upath.path.rstrip("/"), read_only=read_only, allowed_exceptions=allowed_exceptions, ) @classmethod def from_url( cls, url: str, storage_options: dict[str, Any] | None = None, read_only: bool = False, allowed_exceptions: tuple[type[Exception], ...] = ALLOWED_EXCEPTIONS, ) -> FsspecStore: """ Create a FsspecStore from a URL. Parameters ---------- url : str The URL to the root of the store. storage_options : dict, optional The options to pass to fsspec when creating the filesystem. read_only : bool Whether the store is read-only, defaults to False. allowed_exceptions : tuple, optional The exceptions that are allowed to be raised when accessing the store. Defaults to ALLOWED_EXCEPTIONS. Returns ------- FsspecStore """ try: from fsspec import url_to_fs except ImportError: # before fsspec==2024.3.1 from fsspec.core import url_to_fs opts = storage_options or {} opts = {"asynchronous": True, **opts} fs, path = url_to_fs(url, **opts) if not fs.async_impl: try: from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper fs = AsyncFileSystemWrapper(fs, asynchronous=True) except ImportError as e: raise ImportError( f"The filesystem for URL '{url}' is synchronous, and the required " "AsyncFileSystemWrapper is not available. Upgrade fsspec to version " "2024.12.0 or later to enable this functionality." ) from e # fsspec is not consistent about removing the scheme from the path, so check and strip it here # https://github.com/fsspec/filesystem_spec/issues/1722 if "://" in path and not path.startswith("http"): # `not path.startswith("http")` is a special case for the http filesystem (¯\_(ツ)_/¯) path = fs._strip_protocol(path) return cls(fs=fs, path=path, read_only=read_only, allowed_exceptions=allowed_exceptions) async def clear(self) -> None: # docstring inherited try: for subpath in await self.fs._find(self.path, withdirs=True): if subpath != self.path: await self.fs._rm(subpath, recursive=True) except FileNotFoundError: pass def __repr__(self) -> str: return f"" def __eq__(self, other: object) -> bool: return ( isinstance(other, type(self)) and self.path == other.path and self.read_only == other.read_only and self.fs == other.fs ) async def get( self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None, ) -> Buffer | None: # docstring inherited if not self._is_open: await self._open() path = _dereference_path(self.path, key) try: if byte_range is None: value = prototype.buffer.from_bytes(await self.fs._cat_file(path)) elif isinstance(byte_range, RangeByteRequest): value = prototype.buffer.from_bytes( await self.fs._cat_file( path, start=byte_range.start, end=byte_range.end, ) ) elif isinstance(byte_range, OffsetByteRequest): value = prototype.buffer.from_bytes( await self.fs._cat_file(path, start=byte_range.offset, end=None) ) elif isinstance(byte_range, SuffixByteRequest): value = prototype.buffer.from_bytes( await self.fs._cat_file(path, start=-byte_range.suffix, end=None) ) else: raise ValueError(f"Unexpected byte_range, got {byte_range}.") except self.allowed_exceptions: return None except OSError as e: if "not satisfiable" in str(e): # this is an s3-specific condition we probably don't want to leak return prototype.buffer.from_bytes(b"") raise else: return value async def set( self, key: str, value: Buffer, byte_range: tuple[int, int] | None = None, ) -> None: # docstring inherited if not self._is_open: await self._open() self._check_writable() if not isinstance(value, Buffer): raise TypeError( f"FsspecStore.set(): `value` must be a Buffer instance. Got an instance of {type(value)} instead." ) path = _dereference_path(self.path, key) # write data if byte_range: raise NotImplementedError await self.fs._pipe_file(path, value.to_bytes()) async def delete(self, key: str) -> None: # docstring inherited self._check_writable() path = _dereference_path(self.path, key) try: await self.fs._rm(path) except FileNotFoundError: pass except self.allowed_exceptions: pass async def delete_dir(self, prefix: str) -> None: # docstring inherited if not self.supports_deletes: raise NotImplementedError( "This method is only available for stores that support deletes." ) self._check_writable() path_to_delete = _dereference_path(self.path, prefix) with suppress(*self.allowed_exceptions): await self.fs._rm(path_to_delete, recursive=True) async def exists(self, key: str) -> bool: # docstring inherited path = _dereference_path(self.path, key) exists: bool = await self.fs._exists(path) return exists async def get_partial_values( self, prototype: BufferPrototype, key_ranges: Iterable[tuple[str, ByteRequest | None]], ) -> list[Buffer | None]: # docstring inherited if key_ranges: # _cat_ranges expects a list of paths, start, and end ranges, so we need to reformat each ByteRequest. key_ranges = list(key_ranges) paths: list[str] = [] starts: list[int | None] = [] stops: list[int | None] = [] for key, byte_range in key_ranges: paths.append(_dereference_path(self.path, key)) if byte_range is None: starts.append(None) stops.append(None) elif isinstance(byte_range, RangeByteRequest): starts.append(byte_range.start) stops.append(byte_range.end) elif isinstance(byte_range, OffsetByteRequest): starts.append(byte_range.offset) stops.append(None) elif isinstance(byte_range, SuffixByteRequest): starts.append(-byte_range.suffix) stops.append(None) else: raise ValueError(f"Unexpected byte_range, got {byte_range}.") else: return [] # TODO: expectations for exceptions or missing keys? res = await self.fs._cat_ranges(paths, starts, stops, on_error="return") # the following is an s3-specific condition we probably don't want to leak res = [b"" if (isinstance(r, OSError) and "not satisfiable" in str(r)) else r for r in res] for r in res: if isinstance(r, Exception) and not isinstance(r, self.allowed_exceptions): raise r return [None if isinstance(r, Exception) else prototype.buffer.from_bytes(r) for r in res] async def set_partial_values( self, key_start_values: Iterable[tuple[str, int, BytesLike]] ) -> None: # docstring inherited raise NotImplementedError async def list(self) -> AsyncIterator[str]: # docstring inherited allfiles = await self.fs._find(self.path, detail=False, withdirs=False) for onefile in (a.removeprefix(self.path + "/") for a in allfiles): yield onefile async def list_dir(self, prefix: str) -> AsyncIterator[str]: # docstring inherited prefix = f"{self.path}/{prefix.rstrip('/')}" try: allfiles = await self.fs._ls(prefix, detail=False) except FileNotFoundError: return for onefile in (a.replace(prefix + "/", "") for a in allfiles): yield onefile.removeprefix(self.path).removeprefix("/") async def list_prefix(self, prefix: str) -> AsyncIterator[str]: # docstring inherited for onefile in await self.fs._find( f"{self.path}/{prefix}", detail=False, maxdepth=None, withdirs=False ): yield onefile.removeprefix(f"{self.path}/") async def getsize(self, key: str) -> int: path = _dereference_path(self.path, key) info = await self.fs._info(path) size = info.get("size") if size is None: # Not all filesystems support size. Fall back to reading the entire object return await super().getsize(key) else: # fsspec doesn't have typing. We'll need to assume or verify this is true return int(size) zarr-python-3.0.6/src/zarr/storage/_local.py000066400000000000000000000176531476711733500210630ustar00rootroot00000000000000from __future__ import annotations import asyncio import io import os import shutil from pathlib import Path from typing import TYPE_CHECKING from zarr.abc.store import ( ByteRequest, OffsetByteRequest, RangeByteRequest, Store, SuffixByteRequest, ) from zarr.core.buffer import Buffer from zarr.core.buffer.core import default_buffer_prototype from zarr.core.common import concurrent_map if TYPE_CHECKING: from collections.abc import AsyncIterator, Iterable from zarr.core.buffer import BufferPrototype def _get(path: Path, prototype: BufferPrototype, byte_range: ByteRequest | None) -> Buffer: if byte_range is None: return prototype.buffer.from_bytes(path.read_bytes()) with path.open("rb") as f: size = f.seek(0, io.SEEK_END) if isinstance(byte_range, RangeByteRequest): f.seek(byte_range.start) return prototype.buffer.from_bytes(f.read(byte_range.end - f.tell())) elif isinstance(byte_range, OffsetByteRequest): f.seek(byte_range.offset) elif isinstance(byte_range, SuffixByteRequest): f.seek(max(0, size - byte_range.suffix)) else: raise TypeError(f"Unexpected byte_range, got {byte_range}.") return prototype.buffer.from_bytes(f.read()) def _put( path: Path, value: Buffer, start: int | None = None, exclusive: bool = False, ) -> int | None: path.parent.mkdir(parents=True, exist_ok=True) if start is not None: with path.open("r+b") as f: f.seek(start) f.write(value.as_numpy_array().tobytes()) return None else: view = memoryview(value.as_numpy_array().tobytes()) if exclusive: mode = "xb" else: mode = "wb" with path.open(mode=mode) as f: return f.write(view) class LocalStore(Store): """ Local file system store. Parameters ---------- root : str or Path Directory to use as root of store. read_only : bool Whether the store is read-only Attributes ---------- supports_writes supports_deletes supports_partial_writes supports_listing root """ supports_writes: bool = True supports_deletes: bool = True supports_partial_writes: bool = True supports_listing: bool = True root: Path def __init__(self, root: Path | str, *, read_only: bool = False) -> None: super().__init__(read_only=read_only) if isinstance(root, str): root = Path(root) if not isinstance(root, Path): raise TypeError( f"'root' must be a string or Path instance. Got an instance of {type(root)} instead." ) self.root = root async def _open(self) -> None: if not self.read_only: self.root.mkdir(parents=True, exist_ok=True) return await super()._open() async def clear(self) -> None: # docstring inherited self._check_writable() shutil.rmtree(self.root) self.root.mkdir() def __str__(self) -> str: return f"file://{self.root.as_posix()}" def __repr__(self) -> str: return f"LocalStore('{self}')" def __eq__(self, other: object) -> bool: return isinstance(other, type(self)) and self.root == other.root async def get( self, key: str, prototype: BufferPrototype | None = None, byte_range: ByteRequest | None = None, ) -> Buffer | None: # docstring inherited if prototype is None: prototype = default_buffer_prototype() if not self._is_open: await self._open() assert isinstance(key, str) path = self.root / key try: return await asyncio.to_thread(_get, path, prototype, byte_range) except (FileNotFoundError, IsADirectoryError, NotADirectoryError): return None async def get_partial_values( self, prototype: BufferPrototype, key_ranges: Iterable[tuple[str, ByteRequest | None]], ) -> list[Buffer | None]: # docstring inherited args = [] for key, byte_range in key_ranges: assert isinstance(key, str) path = self.root / key args.append((_get, path, prototype, byte_range)) return await concurrent_map(args, asyncio.to_thread, limit=None) # TODO: fix limit async def set(self, key: str, value: Buffer) -> None: # docstring inherited return await self._set(key, value) async def set_if_not_exists(self, key: str, value: Buffer) -> None: # docstring inherited try: return await self._set(key, value, exclusive=True) except FileExistsError: pass async def _set(self, key: str, value: Buffer, exclusive: bool = False) -> None: if not self._is_open: await self._open() self._check_writable() assert isinstance(key, str) if not isinstance(value, Buffer): raise TypeError( f"LocalStore.set(): `value` must be a Buffer instance. Got an instance of {type(value)} instead." ) path = self.root / key await asyncio.to_thread(_put, path, value, start=None, exclusive=exclusive) async def set_partial_values( self, key_start_values: Iterable[tuple[str, int, bytes | bytearray | memoryview]] ) -> None: # docstring inherited self._check_writable() args = [] for key, start, value in key_start_values: assert isinstance(key, str) path = self.root / key args.append((_put, path, value, start)) await concurrent_map(args, asyncio.to_thread, limit=None) # TODO: fix limit async def delete(self, key: str) -> None: """ Remove a key from the store. Parameters ---------- key : str Notes ----- If ``key`` is a directory within this store, the entire directory at ``store.root / key`` is deleted. """ # docstring inherited self._check_writable() path = self.root / key if path.is_dir(): # TODO: support deleting directories? shutil.rmtree? shutil.rmtree(path) else: await asyncio.to_thread(path.unlink, True) # Q: we may want to raise if path is missing async def delete_dir(self, prefix: str) -> None: # docstring inherited self._check_writable() path = self.root / prefix if path.is_dir(): shutil.rmtree(path) elif path.is_file(): raise ValueError(f"delete_dir was passed a {prefix=!r} that is a file") else: # Non-existent directory # This path is tested by test_group:test_create_creates_parents for one pass async def exists(self, key: str) -> bool: # docstring inherited path = self.root / key return await asyncio.to_thread(path.is_file) async def list(self) -> AsyncIterator[str]: # docstring inherited to_strip = self.root.as_posix() + "/" for p in list(self.root.rglob("*")): if p.is_file(): yield p.as_posix().replace(to_strip, "") async def list_prefix(self, prefix: str) -> AsyncIterator[str]: # docstring inherited to_strip = self.root.as_posix() + "/" prefix = prefix.rstrip("/") for p in (self.root / prefix).rglob("*"): if p.is_file(): yield p.as_posix().replace(to_strip, "") async def list_dir(self, prefix: str) -> AsyncIterator[str]: # docstring inherited base = self.root / prefix try: key_iter = base.iterdir() for key in key_iter: yield key.relative_to(base).as_posix() except (FileNotFoundError, NotADirectoryError): pass async def getsize(self, key: str) -> int: return os.path.getsize(self.root / key) zarr-python-3.0.6/src/zarr/storage/_logging.py000066400000000000000000000173251476711733500214130ustar00rootroot00000000000000from __future__ import annotations import inspect import logging import sys import time from collections import defaultdict from contextlib import contextmanager from typing import TYPE_CHECKING, Any, Self, TypeVar from zarr.abc.store import Store from zarr.storage._wrapper import WrapperStore if TYPE_CHECKING: from collections.abc import AsyncGenerator, Generator, Iterable from zarr.abc.store import ByteRequest from zarr.core.buffer import Buffer, BufferPrototype counter: defaultdict[str, int] T_Store = TypeVar("T_Store", bound=Store) class LoggingStore(WrapperStore[T_Store]): """ Store wrapper that logs all calls to the wrapped store. Parameters ---------- store : Store Store to wrap log_level : str Log level log_handler : logging.Handler Log handler Attributes ---------- counter : dict Counter of number of times each method has been called """ counter: defaultdict[str, int] def __init__( self, store: T_Store, log_level: str = "DEBUG", log_handler: logging.Handler | None = None, ) -> None: super().__init__(store) self.counter = defaultdict(int) self.log_level = log_level self.log_handler = log_handler self._configure_logger(log_level, log_handler) def _configure_logger( self, log_level: str = "DEBUG", log_handler: logging.Handler | None = None ) -> None: self.log_level = log_level self.logger = logging.getLogger(f"LoggingStore({self._store})") self.logger.setLevel(log_level) if not self.logger.hasHandlers(): if not log_handler: log_handler = self._default_handler() # Add handler to logger self.logger.addHandler(log_handler) def _default_handler(self) -> logging.Handler: """Define a default log handler""" handler = logging.StreamHandler(stream=sys.stdout) handler.setLevel(self.log_level) handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ) return handler @contextmanager def log(self, hint: Any = "") -> Generator[None, None, None]: """Context manager to log method calls Each call to the wrapped store is logged to the configured logger and added to the counter dict. """ method = inspect.stack()[2].function op = f"{type(self._store).__name__}.{method}" if hint: op = f"{op}({hint})" self.logger.info(" Calling %s", op) start_time = time.time() try: self.counter[method] += 1 yield finally: end_time = time.time() self.logger.info("Finished %s [%.2f s]", op, end_time - start_time) @classmethod async def open(cls: type[Self], store_cls: type[T_Store], *args: Any, **kwargs: Any) -> Self: log_level = kwargs.pop("log_level", "DEBUG") log_handler = kwargs.pop("log_handler", None) store = store_cls(*args, **kwargs) await store._open() return cls(store=store, log_level=log_level, log_handler=log_handler) @property def supports_writes(self) -> bool: with self.log(): return self._store.supports_writes @property def supports_deletes(self) -> bool: with self.log(): return self._store.supports_deletes @property def supports_partial_writes(self) -> bool: with self.log(): return self._store.supports_partial_writes @property def supports_listing(self) -> bool: with self.log(): return self._store.supports_listing @property def read_only(self) -> bool: with self.log(): return self._store.read_only @property def _is_open(self) -> bool: with self.log(): return self._store._is_open @_is_open.setter def _is_open(self, value: bool) -> None: raise NotImplementedError("LoggingStore must be opened via the `_open` method") async def _open(self) -> None: with self.log(): return await self._store._open() async def _ensure_open(self) -> None: with self.log(): return await self._store._ensure_open() async def is_empty(self, prefix: str = "") -> bool: # docstring inherited with self.log(): return await self._store.is_empty(prefix=prefix) async def clear(self) -> None: # docstring inherited with self.log(): return await self._store.clear() def __str__(self) -> str: return f"logging-{self._store}" def __repr__(self) -> str: return f"LoggingStore({self._store.__class__.__name__}, '{self._store}')" def __eq__(self, other: object) -> bool: with self.log(other): return type(self) is type(other) and self._store.__eq__(other._store) # type: ignore[attr-defined] async def get( self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None, ) -> Buffer | None: # docstring inherited with self.log(key): return await self._store.get(key=key, prototype=prototype, byte_range=byte_range) async def get_partial_values( self, prototype: BufferPrototype, key_ranges: Iterable[tuple[str, ByteRequest | None]], ) -> list[Buffer | None]: # docstring inherited keys = ",".join([k[0] for k in key_ranges]) with self.log(keys): return await self._store.get_partial_values(prototype=prototype, key_ranges=key_ranges) async def exists(self, key: str) -> bool: # docstring inherited with self.log(key): return await self._store.exists(key) async def set(self, key: str, value: Buffer) -> None: # docstring inherited with self.log(key): return await self._store.set(key=key, value=value) async def set_if_not_exists(self, key: str, value: Buffer) -> None: # docstring inherited with self.log(key): return await self._store.set_if_not_exists(key=key, value=value) async def delete(self, key: str) -> None: # docstring inherited with self.log(key): return await self._store.delete(key=key) async def set_partial_values( self, key_start_values: Iterable[tuple[str, int, bytes | bytearray | memoryview]] ) -> None: # docstring inherited keys = ",".join([k[0] for k in key_start_values]) with self.log(keys): return await self._store.set_partial_values(key_start_values=key_start_values) async def list(self) -> AsyncGenerator[str, None]: # docstring inherited with self.log(): async for key in self._store.list(): yield key async def list_prefix(self, prefix: str) -> AsyncGenerator[str, None]: # docstring inherited with self.log(prefix): async for key in self._store.list_prefix(prefix=prefix): yield key async def list_dir(self, prefix: str) -> AsyncGenerator[str, None]: # docstring inherited with self.log(prefix): async for key in self._store.list_dir(prefix=prefix): yield key async def delete_dir(self, prefix: str) -> None: # docstring inherited with self.log(prefix): await self._store.delete_dir(prefix=prefix) async def getsize(self, key: str) -> int: with self.log(key): return await self._store.getsize(key) async def getsize_prefix(self, prefix: str) -> int: with self.log(prefix): return await self._store.getsize_prefix(prefix) zarr-python-3.0.6/src/zarr/storage/_memory.py000066400000000000000000000170611476711733500212720ustar00rootroot00000000000000from __future__ import annotations from logging import getLogger from typing import TYPE_CHECKING, Self from zarr.abc.store import ByteRequest, Store from zarr.core.buffer import Buffer, gpu from zarr.core.common import concurrent_map from zarr.storage._utils import _normalize_byte_range_index if TYPE_CHECKING: from collections.abc import AsyncIterator, Iterable, MutableMapping from zarr.core.buffer import BufferPrototype logger = getLogger(__name__) class MemoryStore(Store): """ In-memory store. Parameters ---------- store_dict : dict Initial data read_only : bool Whether the store is read-only Attributes ---------- supports_writes supports_deletes supports_partial_writes supports_listing """ supports_writes: bool = True supports_deletes: bool = True supports_partial_writes: bool = True supports_listing: bool = True _store_dict: MutableMapping[str, Buffer] def __init__( self, store_dict: MutableMapping[str, Buffer] | None = None, *, read_only: bool = False, ) -> None: super().__init__(read_only=read_only) if store_dict is None: store_dict = {} self._store_dict = store_dict async def clear(self) -> None: # docstring inherited self._store_dict.clear() def __str__(self) -> str: return f"memory://{id(self._store_dict)}" def __repr__(self) -> str: return f"MemoryStore('{self}')" def __eq__(self, other: object) -> bool: return ( isinstance(other, type(self)) and self._store_dict == other._store_dict and self.read_only == other.read_only ) async def get( self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None, ) -> Buffer | None: # docstring inherited if not self._is_open: await self._open() assert isinstance(key, str) try: value = self._store_dict[key] start, stop = _normalize_byte_range_index(value, byte_range) return prototype.buffer.from_buffer(value[start:stop]) except KeyError: return None async def get_partial_values( self, prototype: BufferPrototype, key_ranges: Iterable[tuple[str, ByteRequest | None]], ) -> list[Buffer | None]: # docstring inherited # All the key-ranges arguments goes with the same prototype async def _get(key: str, byte_range: ByteRequest | None) -> Buffer | None: return await self.get(key, prototype=prototype, byte_range=byte_range) return await concurrent_map(key_ranges, _get, limit=None) async def exists(self, key: str) -> bool: # docstring inherited return key in self._store_dict async def set(self, key: str, value: Buffer, byte_range: tuple[int, int] | None = None) -> None: # docstring inherited self._check_writable() await self._ensure_open() assert isinstance(key, str) if not isinstance(value, Buffer): raise TypeError( f"MemoryStore.set(): `value` must be a Buffer instance. Got an instance of {type(value)} instead." ) if byte_range is not None: buf = self._store_dict[key] buf[byte_range[0] : byte_range[1]] = value self._store_dict[key] = buf else: self._store_dict[key] = value async def set_if_not_exists(self, key: str, value: Buffer) -> None: # docstring inherited self._check_writable() await self._ensure_open() self._store_dict.setdefault(key, value) async def delete(self, key: str) -> None: # docstring inherited self._check_writable() try: del self._store_dict[key] except KeyError: logger.debug("Key %s does not exist.", key) async def set_partial_values(self, key_start_values: Iterable[tuple[str, int, bytes]]) -> None: # docstring inherited raise NotImplementedError async def list(self) -> AsyncIterator[str]: # docstring inherited for key in self._store_dict: yield key async def list_prefix(self, prefix: str) -> AsyncIterator[str]: # docstring inherited # note: we materialize all dict keys into a list here so we can mutate the dict in-place (e.g. in delete_prefix) for key in list(self._store_dict): if key.startswith(prefix): yield key async def list_dir(self, prefix: str) -> AsyncIterator[str]: # docstring inherited prefix = prefix.rstrip("/") if prefix == "": keys_unique = {k.split("/")[0] for k in self._store_dict} else: # Our dictionary doesn't contain directory markers, but we want to include # a pseudo directory when there's a nested item and we're listing an # intermediate level. keys_unique = { key.removeprefix(prefix + "/").split("/")[0] for key in self._store_dict if key.startswith(prefix + "/") and key != prefix } for key in keys_unique: yield key class GpuMemoryStore(MemoryStore): """A GPU only memory store that stores every chunk in GPU memory irrespective of the original location. The dictionary of buffers to initialize this memory store with *must* be GPU Buffers. Writing data to this store through ``.set`` will move the buffer to the GPU if necessary. Parameters ---------- store_dict : MutableMapping, optional A mutable mapping with string keys and :class:`zarr.core.buffer.gpu.Buffer` values. read_only : bool Whether to open the store in read-only mode. """ _store_dict: MutableMapping[str, gpu.Buffer] # type: ignore[assignment] def __init__( self, store_dict: MutableMapping[str, gpu.Buffer] | None = None, *, read_only: bool = False, ) -> None: super().__init__(store_dict=store_dict, read_only=read_only) # type: ignore[arg-type] def __str__(self) -> str: return f"gpumemory://{id(self._store_dict)}" def __repr__(self) -> str: return f"GpuMemoryStore('{self}')" @classmethod def from_dict(cls, store_dict: MutableMapping[str, Buffer]) -> Self: """ Create a GpuMemoryStore from a dictionary of buffers at any location. The dictionary backing the newly created ``GpuMemoryStore`` will not be the same as ``store_dict``. Parameters ---------- store_dict : mapping A mapping of strings keys to arbitrary Buffers. The buffer data will be moved into a :class:`gpu.Buffer`. Returns ------- GpuMemoryStore """ gpu_store_dict = {k: gpu.Buffer.from_buffer(v) for k, v in store_dict.items()} return cls(gpu_store_dict) async def set(self, key: str, value: Buffer, byte_range: tuple[int, int] | None = None) -> None: # docstring inherited self._check_writable() assert isinstance(key, str) if not isinstance(value, Buffer): raise TypeError( f"GpuMemoryStore.set(): `value` must be a Buffer instance. Got an instance of {type(value)} instead." ) # Convert to gpu.Buffer gpu_value = value if isinstance(value, gpu.Buffer) else gpu.Buffer.from_buffer(value) await super().set(key, gpu_value, byte_range=byte_range) zarr-python-3.0.6/src/zarr/storage/_utils.py000066400000000000000000000070361476711733500211230ustar00rootroot00000000000000from __future__ import annotations import re from pathlib import Path from typing import TYPE_CHECKING, TypeVar from zarr.abc.store import OffsetByteRequest, RangeByteRequest, SuffixByteRequest if TYPE_CHECKING: from collections.abc import Iterable, Mapping from zarr.abc.store import ByteRequest from zarr.core.buffer import Buffer def normalize_path(path: str | bytes | Path | None) -> str: if path is None: result = "" elif isinstance(path, bytes): result = str(path, "ascii") # handle pathlib.Path elif isinstance(path, Path): result = str(path) elif isinstance(path, str): result = path else: raise TypeError(f'Object {path} has an invalid type for "path": {type(path).__name__}') # convert backslash to forward slash result = result.replace("\\", "/") # remove leading and trailing slashes result = result.strip("/") # collapse any repeated slashes pat = re.compile(r"//+") result = pat.sub("/", result) # disallow path segments with just '.' or '..' segments = result.split("/") if any(s in {".", ".."} for s in segments): raise ValueError( f"The path {path!r} is invalid because its string representation contains '.' or '..' segments." ) return result def _normalize_byte_range_index(data: Buffer, byte_range: ByteRequest | None) -> tuple[int, int]: """ Convert an ByteRequest into an explicit start and stop """ if byte_range is None: start = 0 stop = len(data) + 1 elif isinstance(byte_range, RangeByteRequest): start = byte_range.start stop = byte_range.end elif isinstance(byte_range, OffsetByteRequest): start = byte_range.offset stop = len(data) + 1 elif isinstance(byte_range, SuffixByteRequest): start = len(data) - byte_range.suffix stop = len(data) + 1 else: raise ValueError(f"Unexpected byte_range, got {byte_range}.") return (start, stop) def _join_paths(paths: Iterable[str]) -> str: """ Filter out instances of '' and join the remaining strings with '/'. Because the root node of a zarr hierarchy is represented by an empty string, """ return "/".join(filter(lambda v: v != "", paths)) def _normalize_paths(paths: Iterable[str]) -> tuple[str, ...]: """ Normalize the input paths according to the normalization scheme used for zarr node paths. If any two paths normalize to the same value, raise a ValueError. """ path_map: dict[str, str] = {} for path in paths: parsed = normalize_path(path) if parsed in path_map: msg = ( f"After normalization, the value '{path}' collides with '{path_map[parsed]}'. " f"Both '{path}' and '{path_map[parsed]}' normalize to the same value: '{parsed}'. " f"You should use either '{path}' or '{path_map[parsed]}', but not both." ) raise ValueError(msg) path_map[parsed] = path return tuple(path_map.keys()) T = TypeVar("T") def _normalize_path_keys(data: Mapping[str, T]) -> dict[str, T]: """ Normalize the keys of the input dict according to the normalization scheme used for zarr node paths. If any two keys in the input normalize to the same value, raise a ValueError. Returns a dict where the keys are the elements of the input and the values are the normalized form of each key. """ parsed_keys = _normalize_paths(data.keys()) return dict(zip(parsed_keys, data.values(), strict=True)) zarr-python-3.0.6/src/zarr/storage/_wrapper.py000066400000000000000000000113471476711733500214430ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING, Generic, TypeVar if TYPE_CHECKING: from collections.abc import AsyncGenerator, AsyncIterator, Iterable from types import TracebackType from typing import Any, Self from zarr.abc.store import ByteRequest from zarr.core.buffer import Buffer, BufferPrototype from zarr.core.common import BytesLike from zarr.abc.store import Store T_Store = TypeVar("T_Store", bound=Store) class WrapperStore(Store, Generic[T_Store]): """ A store class that wraps an existing ``Store`` instance. By default all of the store methods are delegated to the wrapped store instance, which is accessible via the ``._store`` attribute of this class. Use this class to modify or extend the behavior of the other store classes. """ _store: T_Store def __init__(self, store: T_Store) -> None: self._store = store @classmethod async def open(cls: type[Self], store_cls: type[T_Store], *args: Any, **kwargs: Any) -> Self: store = store_cls(*args, **kwargs) await store._open() return cls(store=store) def __enter__(self) -> Self: return type(self)(self._store.__enter__()) def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: return self._store.__exit__(exc_type, exc_value, traceback) async def _open(self) -> None: await self._store._open() async def _ensure_open(self) -> None: await self._store._ensure_open() async def is_empty(self, prefix: str) -> bool: return await self._store.is_empty(prefix) @property def _is_open(self) -> bool: return self._store._is_open @_is_open.setter def _is_open(self, value: bool) -> None: raise NotImplementedError("WrapperStore must be opened via the `_open` method") async def clear(self) -> None: return await self._store.clear() @property def read_only(self) -> bool: return self._store.read_only def _check_writable(self) -> None: return self._store._check_writable() def __eq__(self, value: object) -> bool: return type(self) is type(value) and self._store.__eq__(value._store) # type: ignore[attr-defined] def __str__(self) -> str: return f"wrapping-{self._store}" def __repr__(self) -> str: return f"WrapperStore({self._store.__class__.__name__}, '{self._store}')" async def get( self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None ) -> Buffer | None: return await self._store.get(key, prototype, byte_range) async def get_partial_values( self, prototype: BufferPrototype, key_ranges: Iterable[tuple[str, ByteRequest | None]], ) -> list[Buffer | None]: return await self._store.get_partial_values(prototype, key_ranges) async def exists(self, key: str) -> bool: return await self._store.exists(key) async def set(self, key: str, value: Buffer) -> None: await self._store.set(key, value) async def set_if_not_exists(self, key: str, value: Buffer) -> None: return await self._store.set_if_not_exists(key, value) async def _set_many(self, values: Iterable[tuple[str, Buffer]]) -> None: await self._store._set_many(values) @property def supports_writes(self) -> bool: return self._store.supports_writes @property def supports_deletes(self) -> bool: return self._store.supports_deletes async def delete(self, key: str) -> None: await self._store.delete(key) @property def supports_partial_writes(self) -> bool: return self._store.supports_partial_writes async def set_partial_values( self, key_start_values: Iterable[tuple[str, int, BytesLike]] ) -> None: return await self._store.set_partial_values(key_start_values) @property def supports_listing(self) -> bool: return self._store.supports_listing def list(self) -> AsyncIterator[str]: return self._store.list() def list_prefix(self, prefix: str) -> AsyncIterator[str]: return self._store.list_prefix(prefix) def list_dir(self, prefix: str) -> AsyncIterator[str]: return self._store.list_dir(prefix) async def delete_dir(self, prefix: str) -> None: return await self._store.delete_dir(prefix) def close(self) -> None: self._store.close() async def _get_many( self, requests: Iterable[tuple[str, BufferPrototype, ByteRequest | None]] ) -> AsyncGenerator[tuple[str, Buffer | None], None]: async for req in self._store._get_many(requests): yield req zarr-python-3.0.6/src/zarr/storage/_zip.py000066400000000000000000000216041476711733500205620ustar00rootroot00000000000000from __future__ import annotations import os import threading import time import zipfile from pathlib import Path from typing import TYPE_CHECKING, Any, Literal from zarr.abc.store import ( ByteRequest, OffsetByteRequest, RangeByteRequest, Store, SuffixByteRequest, ) from zarr.core.buffer import Buffer, BufferPrototype if TYPE_CHECKING: from collections.abc import AsyncIterator, Iterable ZipStoreAccessModeLiteral = Literal["r", "w", "a"] class ZipStore(Store): """ Storage class using a ZIP file. Parameters ---------- path : str Location of file. mode : str, optional One of 'r' to read an existing file, 'w' to truncate and write a new file, 'a' to append to an existing file, or 'x' to exclusively create and write a new file. compression : int, optional Compression method to use when writing to the archive. allowZip64 : bool, optional If True (the default) will create ZIP files that use the ZIP64 extensions when the zipfile is larger than 2 GiB. If False will raise an exception when the ZIP file would require ZIP64 extensions. Attributes ---------- allowed_exceptions supports_writes supports_deletes supports_partial_writes supports_listing path compression allowZip64 """ supports_writes: bool = True supports_deletes: bool = False supports_partial_writes: bool = False supports_listing: bool = True path: Path compression: int allowZip64: bool _zf: zipfile.ZipFile _lock: threading.RLock def __init__( self, path: Path | str, *, mode: ZipStoreAccessModeLiteral = "r", read_only: bool | None = None, compression: int = zipfile.ZIP_STORED, allowZip64: bool = True, ) -> None: if read_only is None: read_only = mode == "r" super().__init__(read_only=read_only) if isinstance(path, str): path = Path(path) assert isinstance(path, Path) self.path = path # root? self._zmode = mode self.compression = compression self.allowZip64 = allowZip64 def _sync_open(self) -> None: if self._is_open: raise ValueError("store is already open") self._lock = threading.RLock() self._zf = zipfile.ZipFile( self.path, mode=self._zmode, compression=self.compression, allowZip64=self.allowZip64, ) self._is_open = True async def _open(self) -> None: self._sync_open() def __getstate__(self) -> dict[str, Any]: # We need a copy to not modify the state of the original store state = self.__dict__.copy() for attr in ["_zf", "_lock"]: state.pop(attr, None) return state def __setstate__(self, state: dict[str, Any]) -> None: self.__dict__ = state self._is_open = False self._sync_open() def close(self) -> None: # docstring inherited super().close() with self._lock: self._zf.close() async def clear(self) -> None: # docstring inherited with self._lock: self._check_writable() self._zf.close() os.remove(self.path) self._zf = zipfile.ZipFile( self.path, mode="w", compression=self.compression, allowZip64=self.allowZip64 ) def __str__(self) -> str: return f"zip://{self.path}" def __repr__(self) -> str: return f"ZipStore('{self}')" def __eq__(self, other: object) -> bool: return isinstance(other, type(self)) and self.path == other.path def _get( self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None, ) -> Buffer | None: if not self._is_open: self._sync_open() # docstring inherited try: with self._zf.open(key) as f: # will raise KeyError if byte_range is None: return prototype.buffer.from_bytes(f.read()) elif isinstance(byte_range, RangeByteRequest): f.seek(byte_range.start) return prototype.buffer.from_bytes(f.read(byte_range.end - f.tell())) size = f.seek(0, os.SEEK_END) if isinstance(byte_range, OffsetByteRequest): f.seek(byte_range.offset) elif isinstance(byte_range, SuffixByteRequest): f.seek(max(0, size - byte_range.suffix)) else: raise TypeError(f"Unexpected byte_range, got {byte_range}.") return prototype.buffer.from_bytes(f.read()) except KeyError: return None async def get( self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None, ) -> Buffer | None: # docstring inherited assert isinstance(key, str) with self._lock: return self._get(key, prototype=prototype, byte_range=byte_range) async def get_partial_values( self, prototype: BufferPrototype, key_ranges: Iterable[tuple[str, ByteRequest | None]], ) -> list[Buffer | None]: # docstring inherited out = [] with self._lock: for key, byte_range in key_ranges: out.append(self._get(key, prototype=prototype, byte_range=byte_range)) return out def _set(self, key: str, value: Buffer) -> None: if not self._is_open: self._sync_open() # generally, this should be called inside a lock keyinfo = zipfile.ZipInfo(filename=key, date_time=time.localtime(time.time())[:6]) keyinfo.compress_type = self.compression if keyinfo.filename[-1] == os.sep: keyinfo.external_attr = 0o40775 << 16 # drwxrwxr-x keyinfo.external_attr |= 0x10 # MS-DOS directory flag else: keyinfo.external_attr = 0o644 << 16 # ?rw-r--r-- self._zf.writestr(keyinfo, value.to_bytes()) async def set(self, key: str, value: Buffer) -> None: # docstring inherited self._check_writable() if not self._is_open: self._sync_open() assert isinstance(key, str) if not isinstance(value, Buffer): raise TypeError( f"ZipStore.set(): `value` must be a Buffer instance. Got an instance of {type(value)} instead." ) with self._lock: self._set(key, value) async def set_partial_values(self, key_start_values: Iterable[tuple[str, int, bytes]]) -> None: raise NotImplementedError async def set_if_not_exists(self, key: str, value: Buffer) -> None: self._check_writable() with self._lock: members = self._zf.namelist() if key not in members: self._set(key, value) async def delete_dir(self, prefix: str) -> None: # only raise NotImplementedError if any keys are found self._check_writable() if prefix != "" and not prefix.endswith("/"): prefix += "/" async for _ in self.list_prefix(prefix): raise NotImplementedError async def delete(self, key: str) -> None: # docstring inherited # we choose to only raise NotImplementedError here if the key exists # this allows the array/group APIs to avoid the overhead of existence checks self._check_writable() if await self.exists(key): raise NotImplementedError async def exists(self, key: str) -> bool: # docstring inherited with self._lock: try: self._zf.getinfo(key) except KeyError: return False else: return True async def list(self) -> AsyncIterator[str]: # docstring inherited with self._lock: for key in self._zf.namelist(): yield key async def list_prefix(self, prefix: str) -> AsyncIterator[str]: # docstring inherited async for key in self.list(): if key.startswith(prefix): yield key async def list_dir(self, prefix: str) -> AsyncIterator[str]: # docstring inherited prefix = prefix.rstrip("/") keys = self._zf.namelist() seen = set() if prefix == "": keys_unique = {k.split("/")[0] for k in keys} for key in keys_unique: if key not in seen: seen.add(key) yield key else: for key in keys: if key.startswith(prefix + "/") and key.strip("/") != prefix: k = key.removeprefix(prefix + "/").split("/")[0] if k not in seen: seen.add(k) yield k zarr-python-3.0.6/src/zarr/testing/000077500000000000000000000000001476711733500172555ustar00rootroot00000000000000zarr-python-3.0.6/src/zarr/testing/__init__.py000066400000000000000000000005431476711733500213700ustar00rootroot00000000000000import importlib.util import warnings if importlib.util.find_spec("pytest") is not None: from zarr.testing.store import StoreTests else: warnings.warn("pytest not installed, skipping test suite", stacklevel=2) from zarr.testing.utils import assert_bytes_equal # TODO: import public buffer tests? __all__ = ["StoreTests", "assert_bytes_equal"] zarr-python-3.0.6/src/zarr/testing/buffer.py000066400000000000000000000040421476711733500211000ustar00rootroot00000000000000# mypy: ignore-errors from __future__ import annotations from typing import TYPE_CHECKING, Any, Literal import numpy as np import numpy.typing as npt from zarr.core.buffer import Buffer, BufferPrototype, cpu from zarr.storage import MemoryStore if TYPE_CHECKING: from collections.abc import Iterable from typing import Self __all__ = [ "NDBufferUsingTestNDArrayLike", "StoreExpectingTestBuffer", "TestBuffer", ] class TestNDArrayLike(np.ndarray): """An example of a ndarray-like class""" __test__ = False class TestBuffer(cpu.Buffer): """Example of a custom Buffer that handles ArrayLike""" __test__ = False class NDBufferUsingTestNDArrayLike(cpu.NDBuffer): """Example of a custom NDBuffer that handles MyNDArrayLike""" @classmethod def create( cls, *, shape: Iterable[int], dtype: npt.DTypeLike, order: Literal["C", "F"] = "C", fill_value: Any | None = None, ) -> Self: """Overwrite `NDBuffer.create` to create an TestNDArrayLike instance""" ret = cls(TestNDArrayLike(shape=shape, dtype=dtype, order=order)) if fill_value is not None: ret.fill(fill_value) return ret class StoreExpectingTestBuffer(MemoryStore): """Example of a custom Store that expect MyBuffer for all its non-metadata We assume that keys containing "json" is metadata """ async def set(self, key: str, value: Buffer, byte_range: tuple[int, int] | None = None) -> None: if "json" not in key: assert isinstance(value, TestBuffer) await super().set(key, value, byte_range) async def get( self, key: str, prototype: BufferPrototype, byte_range: tuple[int, int | None] | None = None, ) -> Buffer | None: if "json" not in key: assert prototype.buffer is TestBuffer ret = await super().get(key=key, prototype=prototype, byte_range=byte_range) if ret is not None: assert isinstance(ret, prototype.buffer) return ret zarr-python-3.0.6/src/zarr/testing/stateful.py000066400000000000000000000415401476711733500214620ustar00rootroot00000000000000import builtins from typing import Any import hypothesis.extra.numpy as npst import hypothesis.strategies as st import numpy as np from hypothesis import assume, note from hypothesis.stateful import ( RuleBasedStateMachine, initialize, invariant, precondition, rule, ) from hypothesis.strategies import DataObject import zarr from zarr import Array from zarr.abc.store import Store from zarr.core.buffer import Buffer, BufferPrototype, cpu, default_buffer_prototype from zarr.core.sync import SyncMixin from zarr.storage import LocalStore, MemoryStore from zarr.testing.strategies import key_ranges, node_names, np_array_and_chunks, numpy_arrays from zarr.testing.strategies import keys as zarr_keys MAX_BINARY_SIZE = 100 def split_prefix_name(path: str) -> tuple[str, str]: split = path.rsplit("/", maxsplit=1) if len(split) > 1: prefix, name = split else: prefix = "" (name,) = split return prefix, name class ZarrHierarchyStateMachine(SyncMixin, RuleBasedStateMachine): """ This state machine models operations that modify a zarr store's hierarchy. That is, user actions that modify arrays/groups as well as list operations. It is intended to be used by external stores, and compares their results to a MemoryStore that is assumed to be perfect. """ def __init__(self, store: Store) -> None: super().__init__() self.store = store self.model = MemoryStore() zarr.group(store=self.model) # Track state of the hierarchy, these should contain fully qualified paths self.all_groups: set[str] = set() self.all_arrays: set[str] = set() @initialize() def init_store(self) -> None: # This lets us reuse the fixture provided store. self._sync(self.store.clear()) zarr.group(store=self.store) def can_add(self, path: str) -> bool: return path not in self.all_groups and path not in self.all_arrays # -------------------- store operations ----------------------- @rule(name=node_names, data=st.data()) def add_group(self, name: str, data: DataObject) -> None: # Handle possible case-insensitive file systems (e.g. MacOS) if isinstance(self.store, LocalStore): name = name.lower() if self.all_groups: parent = data.draw(st.sampled_from(sorted(self.all_groups)), label="Group parent") else: parent = "" path = f"{parent}/{name}".lstrip("/") assume(self.can_add(path)) note(f"Adding group: path='{path}'") self.all_groups.add(path) zarr.group(store=self.store, path=path) zarr.group(store=self.model, path=path) @rule( data=st.data(), name=node_names, array_and_chunks=np_array_and_chunks(arrays=numpy_arrays(zarr_formats=st.just(3))), ) def add_array( self, data: DataObject, name: str, array_and_chunks: tuple[np.ndarray[Any, Any], tuple[int, ...]], ) -> None: # Handle possible case-insensitive file systems (e.g. MacOS) if isinstance(self.store, LocalStore): name = name.lower() array, chunks = array_and_chunks fill_value = data.draw(npst.from_dtype(array.dtype)) if self.all_groups: parent = data.draw(st.sampled_from(sorted(self.all_groups)), label="Array parent") else: parent = "" # TODO: support creating deeper paths # TODO: support overwriting potentially by just skipping `self.can_add` path = f"{parent}/{name}".lstrip("/") assume(self.can_add(path)) note(f"Adding array: path='{path}' shape={array.shape} chunks={chunks}") for store in [self.store, self.model]: zarr.array(array, chunks=chunks, path=path, store=store, fill_value=fill_value) self.all_arrays.add(path) # @precondition(lambda self: bool(self.all_groups)) # @precondition(lambda self: bool(self.all_arrays)) # @rule(data=st.data()) # def move_array(self, data): # array_path = data.draw(st.sampled_from(self.all_arrays), label="Array move source") # to_group = data.draw(st.sampled_from(self.all_groups), label="Array move destination") # # fixme renaiming to self? # array_name = os.path.basename(array_path) # assume(self.model.can_add(to_group, array_name)) # new_path = f"{to_group}/{array_name}".lstrip("/") # note(f"moving array '{array_path}' -> '{new_path}'") # self.model.rename(array_path, new_path) # self.repo.store.rename(array_path, new_path) # @precondition(lambda self: len(self.all_groups) >= 2) # @rule(data=st.data()) # def move_group(self, data): # from_group = data.draw(st.sampled_from(self.all_groups), label="Group move source") # to_group = data.draw(st.sampled_from(self.all_groups), label="Group move destination") # assume(not to_group.startswith(from_group)) # from_group_name = os.path.basename(from_group) # assume(self.model.can_add(to_group, from_group_name)) # # fixme renaiming to self? # new_path = f"{to_group}/{from_group_name}".lstrip("/") # note(f"moving group '{from_group}' -> '{new_path}'") # self.model.rename(from_group, new_path) # self.repo.store.rename(from_group, new_path) @precondition(lambda self: self.store.supports_deletes) @precondition(lambda self: len(self.all_arrays) >= 1) @rule(data=st.data()) def delete_array_using_del(self, data: DataObject) -> None: array_path = data.draw( st.sampled_from(sorted(self.all_arrays)), label="Array deletion target" ) prefix, array_name = split_prefix_name(array_path) note(f"Deleting array '{array_path}' ({prefix=!r}, {array_name=!r}) using del") for store in [self.model, self.store]: group = zarr.open_group(path=prefix, store=store) group[array_name] # check that it exists del group[array_name] self.all_arrays.remove(array_path) @precondition(lambda self: self.store.supports_deletes) @precondition(lambda self: len(self.all_groups) >= 2) # fixme don't delete root @rule(data=st.data()) def delete_group_using_del(self, data: DataObject) -> None: group_path = data.draw( st.sampled_from(sorted(self.all_groups)), label="Group deletion target" ) prefix, group_name = split_prefix_name(group_path) note(f"Deleting group '{group_path=!r}', {prefix=!r}, {group_name=!r} using delete") members = zarr.open_group(store=self.model, path=group_path).members(max_depth=None) for _, obj in members: if isinstance(obj, Array): self.all_arrays.remove(obj.path) else: self.all_groups.remove(obj.path) for store in [self.store, self.model]: group = zarr.open_group(store=store, path=prefix) group[group_name] # check that it exists del group[group_name] if group_path != "/": # The root group is always present self.all_groups.remove(group_path) # # --------------- assertions ----------------- # def check_group_arrays(self, group): # # note(f"Checking arrays of '{group}'") # g1 = self.model.get_group(group) # g2 = zarr.open_group(path=group, mode="r", store=self.repo.store) # model_arrays = sorted(g1.arrays(), key=itemgetter(0)) # our_arrays = sorted(g2.arrays(), key=itemgetter(0)) # for (n1, a1), (n2, a2) in zip_longest(model_arrays, our_arrays): # assert n1 == n2 # assert_array_equal(a1, a2) # def check_subgroups(self, group_path): # g1 = self.model.get_group(group_path) # g2 = zarr.open_group(path=group_path, mode="r", store=self.repo.store) # g1_children = [name for (name, _) in g1.groups()] # g2_children = [name for (name, _) in g2.groups()] # # note(f"Checking {len(g1_children)} subgroups of group '{group_path}'") # assert g1_children == g2_children # def check_list_prefix_from_group(self, group): # prefix = f"meta/root/{group}" # model_list = sorted(self.model.list_prefix(prefix)) # al_list = sorted(self.repo.store.list_prefix(prefix)) # # note(f"Checking {len(model_list)} keys under '{prefix}'") # assert model_list == al_list # prefix = f"data/root/{group}" # model_list = sorted(self.model.list_prefix(prefix)) # al_list = sorted(self.repo.store.list_prefix(prefix)) # # note(f"Checking {len(model_list)} keys under '{prefix}'") # assert model_list == al_list # @precondition(lambda self: self.model.is_persistent_session()) # @rule(data=st.data()) # def check_group_path(self, data): # t0 = time.time() # group = data.draw(st.sampled_from(self.all_groups)) # self.check_list_prefix_from_group(group) # self.check_subgroups(group) # self.check_group_arrays(group) # t1 = time.time() # note(f"Checks took {t1 - t0} sec.") @invariant() def check_list_prefix_from_root(self) -> None: model_list = self._sync_iter(self.model.list_prefix("")) store_list = self._sync_iter(self.store.list_prefix("")) note(f"Checking {len(model_list)} keys") assert sorted(model_list) == sorted(store_list) class SyncStoreWrapper(zarr.core.sync.SyncMixin): def __init__(self, store: Store) -> None: """Synchronous Store wrapper This class holds synchronous methods that map to async methods of Store classes. The synchronous wrapper is needed because hypothesis' stateful testing infra does not support asyncio so we redefine sync versions of the Store API. https://github.com/HypothesisWorks/hypothesis/issues/3712#issuecomment-1668999041 """ self.store = store @property def read_only(self) -> bool: return self.store.read_only def set(self, key: str, data_buffer: Buffer) -> None: return self._sync(self.store.set(key, data_buffer)) def list(self) -> builtins.list[str]: return self._sync_iter(self.store.list()) def get(self, key: str, prototype: BufferPrototype) -> Buffer | None: return self._sync(self.store.get(key, prototype=prototype)) def get_partial_values( self, key_ranges: builtins.list[Any], prototype: BufferPrototype ) -> builtins.list[Buffer | None]: return self._sync(self.store.get_partial_values(prototype=prototype, key_ranges=key_ranges)) def delete(self, path: str) -> None: return self._sync(self.store.delete(path)) def is_empty(self, prefix: str) -> bool: return self._sync(self.store.is_empty(prefix=prefix)) def clear(self) -> None: return self._sync(self.store.clear()) def exists(self, key: str) -> bool: return self._sync(self.store.exists(key)) def list_dir(self, prefix: str) -> None: raise NotImplementedError def list_prefix(self, prefix: str) -> None: raise NotImplementedError def set_partial_values(self, key_start_values: Any) -> None: raise NotImplementedError @property def supports_listing(self) -> bool: return self.store.supports_listing @property def supports_partial_writes(self) -> bool: return self.supports_partial_writes @property def supports_writes(self) -> bool: return self.store.supports_writes @property def supports_deletes(self) -> bool: return self.store.supports_deletes class ZarrStoreStateMachine(RuleBasedStateMachine): """ " Zarr store state machine This is a subclass of a Hypothesis RuleBasedStateMachine. It is testing a framework to ensure that the state of a Zarr store matches an expected state after a set of random operations. It contains a store (currently, a Zarr MemoryStore) and a model, a simplified version of a zarr store (in this case, a dict). It also contains rules which represent actions that can be applied to a zarr store. Rules apply an action to both the store and the model, and invariants assert that the state of the model is equal to the state of the store. Hypothesis then generates sequences of rules, running invariants after each rule. It raises an error if a sequence produces discontinuity between state of the model and state of the store (ie. an invariant is violated). https://hypothesis.readthedocs.io/en/latest/stateful.html """ def __init__(self, store: Store) -> None: super().__init__() self.model: dict[str, Buffer] = {} self.store = SyncStoreWrapper(store) self.prototype = default_buffer_prototype() @initialize() def init_store(self) -> None: self.store.clear() @rule(key=zarr_keys(), data=st.binary(min_size=0, max_size=MAX_BINARY_SIZE)) def set(self, key: str, data: DataObject) -> None: note(f"(set) Setting {key!r} with {data}") assert not self.store.read_only data_buf = cpu.Buffer.from_bytes(data) self.store.set(key, data_buf) self.model[key] = data_buf @precondition(lambda self: len(self.model.keys()) > 0) @rule(key=zarr_keys(), data=st.data()) def get(self, key: str, data: DataObject) -> None: key = data.draw( st.sampled_from(sorted(self.model.keys())) ) # hypothesis wants to sample from sorted list note("(get)") store_value = self.store.get(key, self.prototype) # to bytes here necessary because data_buf set to model in set() assert self.model[key] == store_value @rule(key=zarr_keys(), data=st.data()) def get_invalid_zarr_keys(self, key: str, data: DataObject) -> None: note("(get_invalid)") assume(key not in self.model) assert self.store.get(key, self.prototype) is None @precondition(lambda self: len(self.model.keys()) > 0) @rule(data=st.data()) def get_partial_values(self, data: DataObject) -> None: key_range = data.draw( key_ranges(keys=st.sampled_from(sorted(self.model.keys())), max_size=MAX_BINARY_SIZE) ) note(f"(get partial) {key_range=}") obs_maybe = self.store.get_partial_values(key_range, self.prototype) observed = [] for obs in obs_maybe: assert obs is not None observed.append(obs.to_bytes()) model_vals_ls = [] for key, byte_range in key_range: start = byte_range.start stop = byte_range.end model_vals_ls.append(self.model[key][start:stop]) assert all( obs == exp.to_bytes() for obs, exp in zip(observed, model_vals_ls, strict=True) ), ( observed, model_vals_ls, ) @precondition(lambda self: self.store.supports_deletes) @precondition(lambda self: len(self.model.keys()) > 0) @rule(data=st.data()) def delete(self, data: DataObject) -> None: key = data.draw(st.sampled_from(sorted(self.model.keys()))) note(f"(delete) Deleting {key=}") self.store.delete(key) del self.model[key] @rule() def clear(self) -> None: assert not self.store.read_only note("(clear)") self.store.clear() self.model.clear() assert self.store.is_empty("") assert len(self.model.keys()) == len(list(self.store.list())) == 0 @rule() # Local store can be non-empty when there are subdirectories but no files @precondition(lambda self: not isinstance(self.store.store, LocalStore)) def is_empty(self) -> None: note("(is_empty)") # make sure they either both are or both aren't empty (same state) assert self.store.is_empty("") == (not self.model) @rule(key=zarr_keys()) def exists(self, key: str) -> None: note("(exists)") assert self.store.exists(key) == (key in self.model) @invariant() def check_paths_equal(self) -> None: note("Checking that paths are equal") paths = sorted(self.store.list()) assert sorted(self.model.keys()) == paths @invariant() def check_vals_equal(self) -> None: note("Checking values equal") for key, val in self.model.items(): store_item = self.store.get(key, self.prototype) assert val == store_item @invariant() def check_num_zarr_keys_equal(self) -> None: note("check num zarr_keys equal") assert len(self.model) == len(list(self.store.list())) @invariant() def check_zarr_keys(self) -> None: keys = list(self.store.list()) if not keys: assert self.store.is_empty("") is True else: assert self.store.is_empty("") is False for key in keys: assert self.store.exists(key) is True note("checking keys / exists / empty") zarr-python-3.0.6/src/zarr/testing/store.py000066400000000000000000000474051476711733500207750ustar00rootroot00000000000000from __future__ import annotations import asyncio import pickle from abc import abstractmethod from typing import TYPE_CHECKING, Generic, TypeVar from zarr.storage import WrapperStore if TYPE_CHECKING: from typing import Any from zarr.abc.store import ByteRequest from zarr.core.buffer.core import BufferPrototype import pytest from zarr.abc.store import ( ByteRequest, OffsetByteRequest, RangeByteRequest, Store, SuffixByteRequest, ) from zarr.core.buffer import Buffer, default_buffer_prototype from zarr.core.sync import _collect_aiterator from zarr.storage._utils import _normalize_byte_range_index from zarr.testing.utils import assert_bytes_equal __all__ = ["StoreTests"] S = TypeVar("S", bound=Store) B = TypeVar("B", bound=Buffer) class StoreTests(Generic[S, B]): store_cls: type[S] buffer_cls: type[B] @abstractmethod async def set(self, store: S, key: str, value: Buffer) -> None: """ Insert a value into a storage backend, with a specific key. This should not not use any store methods. Bypassing the store methods allows them to be tested. """ ... @abstractmethod async def get(self, store: S, key: str) -> Buffer: """ Retrieve a value from a storage backend, by key. This should not not use any store methods. Bypassing the store methods allows them to be tested. """ ... @abstractmethod @pytest.fixture def store_kwargs(self) -> dict[str, Any]: """Kwargs for instantiating a store""" ... @abstractmethod def test_store_repr(self, store: S) -> None: ... @abstractmethod def test_store_supports_writes(self, store: S) -> None: ... @abstractmethod def test_store_supports_partial_writes(self, store: S) -> None: ... @abstractmethod def test_store_supports_listing(self, store: S) -> None: ... @pytest.fixture def open_kwargs(self, store_kwargs: dict[str, Any]) -> dict[str, Any]: return store_kwargs @pytest.fixture async def store(self, open_kwargs: dict[str, Any]) -> Store: return await self.store_cls.open(**open_kwargs) @pytest.fixture async def store_not_open(self, store_kwargs: dict[str, Any]) -> Store: return self.store_cls(**store_kwargs) def test_store_type(self, store: S) -> None: assert isinstance(store, Store) assert isinstance(store, self.store_cls) def test_store_eq(self, store: S, store_kwargs: dict[str, Any]) -> None: # check self equality assert store == store # check store equality with same inputs # asserting this is important for being able to compare (de)serialized stores store2 = self.store_cls(**store_kwargs) assert store == store2 async def test_serializable_store(self, store: S) -> None: new_store: S = pickle.loads(pickle.dumps(store)) assert new_store == store assert new_store.read_only == store.read_only # quickly roundtrip data to a key to test that new store works data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04") key = "foo" await store.set(key, data_buf) observed = await store.get(key, prototype=default_buffer_prototype()) assert_bytes_equal(observed, data_buf) def test_store_read_only(self, store: S) -> None: assert not store.read_only with pytest.raises(AttributeError): store.read_only = False # type: ignore[misc] @pytest.mark.parametrize("read_only", [True, False]) async def test_store_open_read_only(self, open_kwargs: dict[str, Any], read_only: bool) -> None: open_kwargs["read_only"] = read_only store = await self.store_cls.open(**open_kwargs) assert store._is_open assert store.read_only == read_only async def test_store_context_manager(self, open_kwargs: dict[str, Any]) -> None: # Test that the context manager closes the store with await self.store_cls.open(**open_kwargs) as store: assert store._is_open # Test trying to open an already open store with pytest.raises(ValueError, match="store is already open"): await store._open() assert not store._is_open async def test_read_only_store_raises(self, open_kwargs: dict[str, Any]) -> None: kwargs = {**open_kwargs, "read_only": True} store = await self.store_cls.open(**kwargs) assert store.read_only # set with pytest.raises( ValueError, match="store was opened in read-only mode and does not support writing" ): await store.set("foo", self.buffer_cls.from_bytes(b"bar")) # delete with pytest.raises( ValueError, match="store was opened in read-only mode and does not support writing" ): await store.delete("foo") @pytest.mark.parametrize("key", ["c/0", "foo/c/0.0", "foo/0/0"]) @pytest.mark.parametrize("data", [b"\x01\x02\x03\x04", b""]) @pytest.mark.parametrize( "byte_range", [None, RangeByteRequest(1, 4), OffsetByteRequest(1), SuffixByteRequest(1)] ) async def test_get(self, store: S, key: str, data: bytes, byte_range: ByteRequest) -> None: """ Ensure that data can be read from the store using the store.get method. """ data_buf = self.buffer_cls.from_bytes(data) await self.set(store, key, data_buf) observed = await store.get(key, prototype=default_buffer_prototype(), byte_range=byte_range) start, stop = _normalize_byte_range_index(data_buf, byte_range=byte_range) expected = data_buf[start:stop] assert_bytes_equal(observed, expected) async def test_get_not_open(self, store_not_open: S) -> None: """ Ensure that data can be read from the store that isn't yet open using the store.get method. """ assert not store_not_open._is_open data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04") key = "c/0" await self.set(store_not_open, key, data_buf) observed = await store_not_open.get(key, prototype=default_buffer_prototype()) assert_bytes_equal(observed, data_buf) async def test_get_raises(self, store: S) -> None: """ Ensure that a ValueError is raise for invalid byte range syntax """ data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04") await self.set(store, "c/0", data_buf) with pytest.raises((ValueError, TypeError), match=r"Unexpected byte_range, got.*"): await store.get("c/0", prototype=default_buffer_prototype(), byte_range=(0, 2)) # type: ignore[arg-type] async def test_get_many(self, store: S) -> None: """ Ensure that multiple keys can be retrieved at once with the _get_many method. """ keys = tuple(map(str, range(10))) values = tuple(f"{k}".encode() for k in keys) for k, v in zip(keys, values, strict=False): await self.set(store, k, self.buffer_cls.from_bytes(v)) observed_buffers = await _collect_aiterator( store._get_many( zip( keys, (default_buffer_prototype(),) * len(keys), (None,) * len(keys), strict=False, ) ) ) observed_kvs = sorted(((k, b.to_bytes()) for k, b in observed_buffers)) # type: ignore[union-attr] expected_kvs = sorted(((k, b) for k, b in zip(keys, values, strict=False))) assert observed_kvs == expected_kvs @pytest.mark.parametrize("key", ["c/0", "foo/c/0.0", "foo/0/0"]) @pytest.mark.parametrize("data", [b"\x01\x02\x03\x04", b""]) async def test_getsize(self, store: S, key: str, data: bytes) -> None: """ Test the result of store.getsize(). """ data_buf = self.buffer_cls.from_bytes(data) expected = len(data_buf) await self.set(store, key, data_buf) observed = await store.getsize(key) assert observed == expected async def test_getsize_prefix(self, store: S) -> None: """ Test the result of store.getsize_prefix(). """ data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04") keys = ["c/0/0", "c/0/1", "c/1/0", "c/1/1"] keys_values = [(k, data_buf) for k in keys] await store._set_many(keys_values) expected = len(data_buf) * len(keys) observed = await store.getsize_prefix("c") assert observed == expected async def test_getsize_raises(self, store: S) -> None: """ Test that getsize() raise a FileNotFoundError if the key doesn't exist. """ with pytest.raises(FileNotFoundError): await store.getsize("c/1000") @pytest.mark.parametrize("key", ["zarr.json", "c/0", "foo/c/0.0", "foo/0/0"]) @pytest.mark.parametrize("data", [b"\x01\x02\x03\x04", b""]) async def test_set(self, store: S, key: str, data: bytes) -> None: """ Ensure that data can be written to the store using the store.set method. """ assert not store.read_only data_buf = self.buffer_cls.from_bytes(data) await store.set(key, data_buf) observed = await self.get(store, key) assert_bytes_equal(observed, data_buf) async def test_set_not_open(self, store_not_open: S) -> None: """ Ensure that data can be written to the store that's not yet open using the store.set method. """ assert not store_not_open._is_open data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04") key = "c/0" await store_not_open.set(key, data_buf) observed = await self.get(store_not_open, key) assert_bytes_equal(observed, data_buf) async def test_set_many(self, store: S) -> None: """ Test that a dict of key : value pairs can be inserted into the store via the `_set_many` method. """ keys = ["zarr.json", "c/0", "foo/c/0.0", "foo/0/0"] data_buf = [self.buffer_cls.from_bytes(k.encode()) for k in keys] store_dict = dict(zip(keys, data_buf, strict=True)) await store._set_many(store_dict.items()) for k, v in store_dict.items(): assert (await self.get(store, k)).to_bytes() == v.to_bytes() async def test_set_invalid_buffer(self, store: S) -> None: """ Ensure that set raises a Type or Value Error for invalid buffer arguments. """ with pytest.raises( (ValueError, TypeError), match=r"\S+\.set\(\): `value` must be a Buffer instance. Got an instance of instead.", ): await store.set("c/0", 0) # type: ignore[arg-type] @pytest.mark.parametrize( "key_ranges", [ [], [("zarr.json", RangeByteRequest(0, 2))], [("c/0", RangeByteRequest(0, 2)), ("zarr.json", None)], [ ("c/0/0", RangeByteRequest(0, 2)), ("c/0/1", SuffixByteRequest(2)), ("c/0/2", OffsetByteRequest(2)), ], ], ) async def test_get_partial_values( self, store: S, key_ranges: list[tuple[str, ByteRequest]] ) -> None: # put all of the data for key, _ in key_ranges: await self.set(store, key, self.buffer_cls.from_bytes(bytes(key, encoding="utf-8"))) # read back just part of it observed_maybe = await store.get_partial_values( prototype=default_buffer_prototype(), key_ranges=key_ranges ) observed: list[Buffer] = [] expected: list[Buffer] = [] for obs in observed_maybe: assert obs is not None observed.append(obs) for idx in range(len(observed)): key, byte_range = key_ranges[idx] result = await store.get( key, prototype=default_buffer_prototype(), byte_range=byte_range ) assert result is not None expected.append(result) assert all( obs.to_bytes() == exp.to_bytes() for obs, exp in zip(observed, expected, strict=True) ) async def test_exists(self, store: S) -> None: assert not await store.exists("foo") await store.set("foo/zarr.json", self.buffer_cls.from_bytes(b"bar")) assert await store.exists("foo/zarr.json") async def test_delete(self, store: S) -> None: if not store.supports_deletes: pytest.skip("store does not support deletes") await store.set("foo/zarr.json", self.buffer_cls.from_bytes(b"bar")) assert await store.exists("foo/zarr.json") await store.delete("foo/zarr.json") assert not await store.exists("foo/zarr.json") async def test_delete_dir(self, store: S) -> None: if not store.supports_deletes: pytest.skip("store does not support deletes") await store.set("zarr.json", self.buffer_cls.from_bytes(b"root")) await store.set("foo-bar/zarr.json", self.buffer_cls.from_bytes(b"root")) await store.set("foo/zarr.json", self.buffer_cls.from_bytes(b"bar")) await store.set("foo/c/0", self.buffer_cls.from_bytes(b"chunk")) await store.delete_dir("foo") assert await store.exists("zarr.json") assert await store.exists("foo-bar/zarr.json") assert not await store.exists("foo/zarr.json") assert not await store.exists("foo/c/0") async def test_is_empty(self, store: S) -> None: assert await store.is_empty("") await self.set( store, "foo/bar", self.buffer_cls.from_bytes(bytes("something", encoding="utf-8")) ) assert not await store.is_empty("") assert await store.is_empty("fo") assert not await store.is_empty("foo/") assert not await store.is_empty("foo") assert await store.is_empty("spam/") async def test_clear(self, store: S) -> None: await self.set( store, "key", self.buffer_cls.from_bytes(bytes("something", encoding="utf-8")) ) await store.clear() assert await store.is_empty("") async def test_list(self, store: S) -> None: assert await _collect_aiterator(store.list()) == () prefix = "foo" data = self.buffer_cls.from_bytes(b"") store_dict = { prefix + "/zarr.json": data, **{prefix + f"/c/{idx}": data for idx in range(10)}, } await store._set_many(store_dict.items()) expected_sorted = sorted(store_dict.keys()) observed = await _collect_aiterator(store.list()) observed_sorted = sorted(observed) assert observed_sorted == expected_sorted async def test_list_prefix(self, store: S) -> None: """ Test that the `list_prefix` method works as intended. Given a prefix, it should return all the keys in storage that start with this prefix. """ prefixes = ("", "a/", "a/b/", "a/b/c/") data = self.buffer_cls.from_bytes(b"") fname = "zarr.json" store_dict = {p + fname: data for p in prefixes} await store._set_many(store_dict.items()) for prefix in prefixes: observed = tuple(sorted(await _collect_aiterator(store.list_prefix(prefix)))) expected: tuple[str, ...] = () for key in store_dict: if key.startswith(prefix): expected += (key,) expected = tuple(sorted(expected)) assert observed == expected async def test_list_empty_path(self, store: S) -> None: """ Verify that list and list_prefix work correctly when path is an empty string, i.e. no unwanted replacement occurs. """ data = self.buffer_cls.from_bytes(b"") store_dict = { "foo/bar/zarr.json": data, "foo/bar/c/1": data, "foo/baz/c/0": data, } await store._set_many(store_dict.items()) # Test list() observed_list = await _collect_aiterator(store.list()) observed_list_sorted = sorted(observed_list) expected_list_sorted = sorted(store_dict.keys()) assert observed_list_sorted == expected_list_sorted # Test list_prefix() with an empty prefix observed_prefix_empty = await _collect_aiterator(store.list_prefix("")) observed_prefix_empty_sorted = sorted(observed_prefix_empty) expected_prefix_empty_sorted = sorted(store_dict.keys()) assert observed_prefix_empty_sorted == expected_prefix_empty_sorted # Test list_prefix() with a non-empty prefix observed_prefix = await _collect_aiterator(store.list_prefix("foo/bar/")) observed_prefix_sorted = sorted(observed_prefix) expected_prefix_sorted = sorted(k for k in store_dict if k.startswith("foo/bar/")) assert observed_prefix_sorted == expected_prefix_sorted async def test_list_dir(self, store: S) -> None: root = "foo" store_dict = { root + "/zarr.json": self.buffer_cls.from_bytes(b"bar"), root + "/c/1": self.buffer_cls.from_bytes(b"\x01"), } assert await _collect_aiterator(store.list_dir("")) == () assert await _collect_aiterator(store.list_dir(root)) == () await store._set_many(store_dict.items()) keys_observed = await _collect_aiterator(store.list_dir(root)) keys_expected = {k.removeprefix(root + "/").split("/")[0] for k in store_dict} assert sorted(keys_observed) == sorted(keys_expected) keys_observed = await _collect_aiterator(store.list_dir(root + "/")) assert sorted(keys_expected) == sorted(keys_observed) async def test_set_if_not_exists(self, store: S) -> None: key = "k" data_buf = self.buffer_cls.from_bytes(b"0000") await self.set(store, key, data_buf) new = self.buffer_cls.from_bytes(b"1111") await store.set_if_not_exists("k", new) # no error result = await store.get(key, default_buffer_prototype()) assert result == data_buf await store.set_if_not_exists("k2", new) # no error result = await store.get("k2", default_buffer_prototype()) assert result == new class LatencyStore(WrapperStore[Store]): """ A wrapper class that takes any store class in its constructor and adds latency to the `set` and `get` methods. This can be used for performance testing. """ get_latency: float set_latency: float def __init__(self, cls: Store, *, get_latency: float = 0, set_latency: float = 0) -> None: self.get_latency = float(get_latency) self.set_latency = float(set_latency) self._store = cls async def set(self, key: str, value: Buffer) -> None: """ Add latency to the ``set`` method. Calls ``asyncio.sleep(self.set_latency)`` before invoking the wrapped ``set`` method. Parameters ---------- key : str The key to set value : Buffer The value to set Returns ------- None """ await asyncio.sleep(self.set_latency) await self._store.set(key, value) async def get( self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None ) -> Buffer | None: """ Add latency to the ``get`` method. Calls ``asyncio.sleep(self.get_latency)`` before invoking the wrapped ``get`` method. Parameters ---------- key : str The key to get prototype : BufferPrototype The BufferPrototype to use. byte_range : ByteRequest, optional An optional byte range. Returns ------- buffer : Buffer or None """ await asyncio.sleep(self.get_latency) return await self._store.get(key, prototype=prototype, byte_range=byte_range) zarr-python-3.0.6/src/zarr/testing/strategies.py000066400000000000000000000342631476711733500220110ustar00rootroot00000000000000import math import sys from typing import Any, Literal import hypothesis.extra.numpy as npst import hypothesis.strategies as st import numpy as np from hypothesis import event, given, settings # noqa: F401 from hypothesis.strategies import SearchStrategy import zarr from zarr.abc.store import RangeByteRequest, Store from zarr.codecs.bytes import BytesCodec from zarr.core.array import Array from zarr.core.chunk_grids import RegularChunkGrid from zarr.core.chunk_key_encodings import DefaultChunkKeyEncoding from zarr.core.common import ZarrFormat from zarr.core.metadata import ArrayV2Metadata, ArrayV3Metadata from zarr.core.sync import sync from zarr.storage import MemoryStore, StoreLike from zarr.storage._common import _dereference_path from zarr.storage._utils import normalize_path # Copied from Xarray _attr_keys = st.text(st.characters(), min_size=1) _attr_values = st.recursive( st.none() | st.booleans() | st.text(st.characters(), max_size=5), lambda children: st.lists(children) | st.dictionaries(_attr_keys, children), max_leaves=3, ) @st.composite # type: ignore[misc] def keys(draw: st.DrawFn, *, max_num_nodes: int | None = None) -> Any: return draw(st.lists(node_names, min_size=1, max_size=max_num_nodes).map("/".join)) @st.composite # type: ignore[misc] def paths(draw: st.DrawFn, *, max_num_nodes: int | None = None) -> Any: return draw(st.just("/") | keys(max_num_nodes=max_num_nodes)) def v3_dtypes() -> st.SearchStrategy[np.dtype]: return ( npst.boolean_dtypes() | npst.integer_dtypes(endianness="=") | npst.unsigned_integer_dtypes(endianness="=") | npst.floating_dtypes(endianness="=") | npst.complex_number_dtypes(endianness="=") # | npst.byte_string_dtypes(endianness="=") # | npst.unicode_string_dtypes() # | npst.datetime64_dtypes() # | npst.timedelta64_dtypes() ) def v2_dtypes() -> st.SearchStrategy[np.dtype]: return ( npst.boolean_dtypes() | npst.integer_dtypes(endianness="=") | npst.unsigned_integer_dtypes(endianness="=") | npst.floating_dtypes(endianness="=") | npst.complex_number_dtypes(endianness="=") | npst.byte_string_dtypes(endianness="=") | npst.unicode_string_dtypes(endianness="=") | npst.datetime64_dtypes(endianness="=") # | npst.timedelta64_dtypes() ) def safe_unicode_for_dtype(dtype: np.dtype[np.str_]) -> st.SearchStrategy[str]: """Generate UTF-8-safe text constrained to max_len of dtype.""" # account for utf-32 encoding (i.e. 4 bytes/character) max_len = max(1, dtype.itemsize // 4) return st.text( alphabet=st.characters( blacklist_categories=["Cs"], # Avoid *technically allowed* surrogates min_codepoint=32, ), min_size=1, max_size=max_len, ) def clear_store(x: Store) -> Store: sync(x.clear()) return x # From https://zarr-specs.readthedocs.io/en/latest/v3/core/v3.0.html#node-names # 1. must not be the empty string ("") # 2. must not include the character "/" # 3. must not be a string composed only of period characters, e.g. "." or ".." # 4. must not start with the reserved prefix "__" zarr_key_chars = st.sampled_from( ".-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz" ) node_names = st.text(zarr_key_chars, min_size=1).filter( lambda t: t not in (".", "..") and not t.startswith("__") ) short_node_names = st.text(zarr_key_chars, max_size=3, min_size=1).filter( lambda t: t not in (".", "..") and not t.startswith("__") ) array_names = node_names attrs = st.none() | st.dictionaries(_attr_keys, _attr_values) # st.builds will only call a new store constructor for different keyword arguments # i.e. stores.examples() will always return the same object per Store class. # So we map a clear to reset the store. stores = st.builds(MemoryStore, st.just({})).map(clear_store) compressors = st.sampled_from([None, "default"]) zarr_formats: st.SearchStrategy[ZarrFormat] = st.sampled_from([3, 2]) # We de-prioritize arrays having dim sizes 0, 1, 2 array_shapes = npst.array_shapes(max_dims=4, min_side=3) | npst.array_shapes(max_dims=4, min_side=0) @st.composite # type: ignore[misc] def dimension_names(draw: st.DrawFn, *, ndim: int | None = None) -> list[None | str] | None: simple_text = st.text(zarr_key_chars, min_size=0) return draw(st.none() | st.lists(st.none() | simple_text, min_size=ndim, max_size=ndim)) # type: ignore[no-any-return] @st.composite # type: ignore[misc] def array_metadata( draw: st.DrawFn, *, array_shapes: st.SearchStrategy[tuple[int, ...]] = npst.array_shapes, zarr_formats: st.SearchStrategy[Literal[2, 3]] = zarr_formats, attributes: st.SearchStrategy[dict[str, Any]] = attrs, ) -> ArrayV2Metadata | ArrayV3Metadata: zarr_format = draw(zarr_formats) # separator = draw(st.sampled_from(['/', '\\'])) shape = draw(array_shapes()) ndim = len(shape) chunk_shape = draw(array_shapes(min_dims=ndim, max_dims=ndim)) dtype = draw(v3_dtypes()) fill_value = draw(npst.from_dtype(dtype)) if zarr_format == 2: return ArrayV2Metadata( shape=shape, chunks=chunk_shape, dtype=dtype, fill_value=fill_value, order=draw(st.sampled_from(["C", "F"])), attributes=draw(attributes), dimension_separator=draw(st.sampled_from([".", "/"])), filters=None, compressor=None, ) else: return ArrayV3Metadata( shape=shape, data_type=dtype, chunk_grid=RegularChunkGrid(chunk_shape=chunk_shape), fill_value=fill_value, attributes=draw(attributes), dimension_names=draw(dimension_names(ndim=ndim)), chunk_key_encoding=DefaultChunkKeyEncoding(separator="/"), # FIXME codecs=[BytesCodec()], storage_transformers=(), ) @st.composite # type: ignore[misc] def numpy_arrays( draw: st.DrawFn, *, shapes: st.SearchStrategy[tuple[int, ...]] = array_shapes, dtype: np.dtype[Any] | None = None, zarr_formats: st.SearchStrategy[ZarrFormat] | None = zarr_formats, ) -> Any: """ Generate numpy arrays that can be saved in the provided Zarr format. """ zarr_format = draw(zarr_formats) if dtype is None: dtype = draw(v3_dtypes() if zarr_format == 3 else v2_dtypes()) if np.issubdtype(dtype, np.str_): safe_unicode_strings = safe_unicode_for_dtype(dtype) return draw(npst.arrays(dtype=dtype, shape=shapes, elements=safe_unicode_strings)) return draw(npst.arrays(dtype=dtype, shape=shapes)) @st.composite # type: ignore[misc] def chunk_shapes(draw: st.DrawFn, *, shape: tuple[int, ...]) -> tuple[int, ...]: # We want this strategy to shrink towards arrays with smaller number of chunks # 1. st.integers() shrinks towards smaller values. So we use that to generate number of chunks numchunks = draw( st.tuples(*[st.integers(min_value=0 if size == 0 else 1, max_value=size) for size in shape]) ) # 2. and now generate the chunks tuple chunks = tuple( size // nchunks if nchunks > 0 else 0 for size, nchunks in zip(shape, numchunks, strict=True) ) for c in chunks: event("chunk size", c) if any((c != 0 and s % c != 0) for s, c in zip(shape, chunks, strict=True)): event("smaller last chunk") return chunks @st.composite # type: ignore[misc] def shard_shapes( draw: st.DrawFn, *, shape: tuple[int, ...], chunk_shape: tuple[int, ...] ) -> tuple[int, ...]: # We want this strategy to shrink towards arrays with smaller number of shards # shards must be an integral number of chunks assert all(c != 0 for c in chunk_shape) numchunks = tuple(s // c for s, c in zip(shape, chunk_shape, strict=True)) multiples = tuple(draw(st.integers(min_value=1, max_value=nc)) for nc in numchunks) return tuple(m * c for m, c in zip(multiples, chunk_shape, strict=True)) @st.composite # type: ignore[misc] def np_array_and_chunks( draw: st.DrawFn, *, arrays: st.SearchStrategy[np.ndarray] = numpy_arrays ) -> tuple[np.ndarray, tuple[int, ...]]: # type: ignore[type-arg] """A hypothesis strategy to generate small sized random arrays. Returns: a tuple of the array and a suitable random chunking for it. """ array = draw(arrays) return (array, draw(chunk_shapes(shape=array.shape))) @st.composite # type: ignore[misc] def arrays( draw: st.DrawFn, *, shapes: st.SearchStrategy[tuple[int, ...]] = array_shapes, compressors: st.SearchStrategy = compressors, stores: st.SearchStrategy[StoreLike] = stores, paths: st.SearchStrategy[str | None] = paths(), # noqa: B008 array_names: st.SearchStrategy = array_names, arrays: st.SearchStrategy | None = None, attrs: st.SearchStrategy = attrs, zarr_formats: st.SearchStrategy = zarr_formats, ) -> Array: store = draw(stores) path = draw(paths) name = draw(array_names) attributes = draw(attrs) zarr_format = draw(zarr_formats) if arrays is None: arrays = numpy_arrays(shapes=shapes, zarr_formats=st.just(zarr_format)) nparray = draw(arrays) chunk_shape = draw(chunk_shapes(shape=nparray.shape)) if zarr_format == 3 and all(c > 0 for c in chunk_shape): shard_shape = draw(st.none() | shard_shapes(shape=nparray.shape, chunk_shape=chunk_shape)) else: shard_shape = None # test that None works too. fill_value = draw(st.one_of([st.none(), npst.from_dtype(nparray.dtype)])) # compressor = draw(compressors) expected_attrs = {} if attributes is None else attributes array_path = _dereference_path(path, name) root = zarr.open_group(store, mode="w", zarr_format=zarr_format) a = root.create_array( array_path, shape=nparray.shape, chunks=chunk_shape, shards=shard_shape, dtype=nparray.dtype, attributes=attributes, # compressor=compressor, # FIXME fill_value=fill_value, ) assert isinstance(a, Array) if a.metadata.zarr_format == 3: assert a.fill_value is not None assert a.name is not None assert a.path == normalize_path(array_path) assert a.name == "/" + a.path assert isinstance(root[array_path], Array) assert nparray.shape == a.shape assert chunk_shape == a.chunks assert shard_shape == a.shards assert a.basename == name, (a.basename, name) assert dict(a.attrs) == expected_attrs a[:] = nparray return a @st.composite # type: ignore[misc] def simple_arrays( draw: st.DrawFn, *, shapes: st.SearchStrategy[tuple[int, ...]] = array_shapes, ) -> Any: return draw( arrays( shapes=shapes, paths=paths(max_num_nodes=2), array_names=short_node_names, attrs=st.none(), compressors=st.sampled_from([None, "default"]), ) ) def is_negative_slice(idx: Any) -> bool: return isinstance(idx, slice) and idx.step is not None and idx.step < 0 @st.composite # type: ignore[misc] def end_slices(draw: st.DrawFn, *, shape: tuple[int]) -> Any: """ A strategy that slices ranges that include the last chunk. This is intended to stress-test handling of a possibly smaller last chunk. """ slicers = [] for size in shape: start = draw(st.integers(min_value=size // 2, max_value=size - 1)) length = draw(st.integers(min_value=0, max_value=size - start)) slicers.append(slice(start, start + length)) event("drawing end slice") return tuple(slicers) @st.composite # type: ignore[misc] def basic_indices(draw: st.DrawFn, *, shape: tuple[int], **kwargs: Any) -> Any: """Basic indices without unsupported negative slices.""" strategy = npst.basic_indices(shape=shape, **kwargs).filter( lambda idxr: ( not ( is_negative_slice(idxr) or (isinstance(idxr, tuple) and any(is_negative_slice(idx) for idx in idxr)) ) ) ) if math.prod(shape) >= 3: strategy = end_slices(shape=shape) | strategy return draw(strategy) @st.composite # type: ignore[misc] def orthogonal_indices( draw: st.DrawFn, *, shape: tuple[int] ) -> tuple[tuple[np.ndarray[Any, Any], ...], tuple[np.ndarray[Any, Any], ...]]: """ Strategy that returns (1) a tuple of integer arrays used for orthogonal indexing of Zarr arrays. (2) an tuple of integer arrays that can be used for equivalent indexing of numpy arrays """ zindexer = [] npindexer = [] ndim = len(shape) for axis, size in enumerate(shape): val = draw( npst.integer_array_indices( shape=(size,), result_shape=npst.array_shapes(min_side=1, max_side=size, max_dims=1) ) | basic_indices(min_dims=1, shape=(size,), allow_ellipsis=False) .map(lambda x: (x,) if not isinstance(x, tuple) else x) # bare ints, slices .filter(bool) # skip empty tuple ) (idxr,) = val if isinstance(idxr, int): idxr = np.array([idxr]) zindexer.append(idxr) if isinstance(idxr, slice): idxr = np.arange(*idxr.indices(size)) elif isinstance(idxr, (tuple, int)): idxr = np.array(idxr) newshape = [1] * ndim newshape[axis] = idxr.size npindexer.append(idxr.reshape(newshape)) # casting the output of broadcast_arrays is needed for numpy 1.25 return tuple(zindexer), tuple(np.broadcast_arrays(*npindexer)) def key_ranges( keys: SearchStrategy = node_names, max_size: int = sys.maxsize ) -> SearchStrategy[list[int]]: """ Function to generate key_ranges strategy for get_partial_values() returns list strategy w/ form:: [(key, (range_start, range_end)), (key, (range_start, range_end)),...] """ def make_request(start: int, length: int) -> RangeByteRequest: return RangeByteRequest(start, end=min(start + length, max_size)) byte_ranges = st.builds( make_request, start=st.integers(min_value=0, max_value=max_size), length=st.integers(min_value=0, max_value=max_size), ) key_tuple = st.tuples(keys, byte_ranges) return st.lists(key_tuple, min_size=1, max_size=10) zarr-python-3.0.6/src/zarr/testing/utils.py000066400000000000000000000024151476711733500207710ustar00rootroot00000000000000from __future__ import annotations from collections.abc import Callable, Coroutine from typing import TYPE_CHECKING, Any, TypeVar, cast import pytest from zarr.core.buffer import Buffer if TYPE_CHECKING: from zarr.core.common import BytesLike __all__ = ["assert_bytes_equal"] def assert_bytes_equal(b1: Buffer | BytesLike | None, b2: Buffer | BytesLike | None) -> None: """Help function to assert if two bytes-like or Buffers are equal Warnings -------- Always copies data, only use for testing and debugging """ if isinstance(b1, Buffer): b1 = b1.to_bytes() if isinstance(b2, Buffer): b2 = b2.to_bytes() assert b1 == b2 def has_cupy() -> bool: try: import cupy return cast(bool, cupy.cuda.runtime.getDeviceCount() > 0) except ImportError: return False except cupy.cuda.runtime.CUDARuntimeError: return False T_Callable = TypeVar("T_Callable", bound=Callable[..., Coroutine[Any, Any, None] | None]) # Decorator for GPU tests def gpu_test(func: T_Callable) -> T_Callable: return cast( T_Callable, pytest.mark.gpu( pytest.mark.skipif(not has_cupy(), reason="CuPy not installed or no GPU available")( func ) ), ) zarr-python-3.0.6/tests/000077500000000000000000000000001476711733500151755ustar00rootroot00000000000000zarr-python-3.0.6/tests/__init__.py000066400000000000000000000000001476711733500172740ustar00rootroot00000000000000zarr-python-3.0.6/tests/conftest.py000066400000000000000000000301271476711733500173770ustar00rootroot00000000000000from __future__ import annotations import pathlib from dataclasses import dataclass, field from typing import TYPE_CHECKING import numpy as np import numpy.typing as npt import pytest from hypothesis import HealthCheck, Verbosity, settings from zarr import AsyncGroup, config from zarr.abc.store import Store from zarr.codecs.sharding import ShardingCodec, ShardingCodecIndexLocation from zarr.core.array import ( _parse_chunk_encoding_v2, _parse_chunk_encoding_v3, _parse_chunk_key_encoding, ) from zarr.core.chunk_grids import RegularChunkGrid, _auto_partition from zarr.core.common import JSON, parse_dtype, parse_shapelike from zarr.core.config import config as zarr_config from zarr.core.metadata.v2 import ArrayV2Metadata from zarr.core.metadata.v3 import ArrayV3Metadata from zarr.core.sync import sync from zarr.storage import FsspecStore, LocalStore, MemoryStore, StorePath, ZipStore if TYPE_CHECKING: from collections.abc import Generator, Iterable from typing import Any, Literal from _pytest.compat import LEGACY_PATH from zarr.abc.codec import Codec from zarr.core.array import CompressorsLike, FiltersLike, SerializerLike, ShardsLike from zarr.core.chunk_key_encodings import ChunkKeyEncoding, ChunkKeyEncodingLike from zarr.core.common import ChunkCoords, MemoryOrder, ShapeLike, ZarrFormat async def parse_store( store: Literal["local", "memory", "fsspec", "zip"], path: str ) -> LocalStore | MemoryStore | FsspecStore | ZipStore: if store == "local": return await LocalStore.open(path) if store == "memory": return await MemoryStore.open() if store == "fsspec": return await FsspecStore.open(url=path) if store == "zip": return await ZipStore.open(path + "/zarr.zip", mode="w") raise AssertionError @pytest.fixture(params=[str, pathlib.Path]) def path_type(request: pytest.FixtureRequest) -> Any: return request.param # todo: harmonize this with local_store fixture @pytest.fixture async def store_path(tmpdir: LEGACY_PATH) -> StorePath: store = await LocalStore.open(str(tmpdir)) return StorePath(store) @pytest.fixture async def local_store(tmpdir: LEGACY_PATH) -> LocalStore: return await LocalStore.open(str(tmpdir)) @pytest.fixture async def remote_store(url: str) -> FsspecStore: return await FsspecStore.open(url) @pytest.fixture async def memory_store() -> MemoryStore: return await MemoryStore.open() @pytest.fixture async def zip_store(tmpdir: LEGACY_PATH) -> ZipStore: return await ZipStore.open(str(tmpdir / "zarr.zip"), mode="w") @pytest.fixture async def store(request: pytest.FixtureRequest, tmpdir: LEGACY_PATH) -> Store: param = request.param return await parse_store(param, str(tmpdir)) @pytest.fixture(params=["local", "memory", "zip"]) def sync_store(request: pytest.FixtureRequest, tmp_path: LEGACY_PATH) -> Store: result = sync(parse_store(request.param, str(tmp_path))) if not isinstance(result, Store): raise TypeError("Wrong store class returned by test fixture! got " + result + " instead") return result @dataclass class AsyncGroupRequest: zarr_format: ZarrFormat store: Literal["local", "fsspec", "memory", "zip"] attributes: dict[str, Any] = field(default_factory=dict) @pytest.fixture async def async_group(request: pytest.FixtureRequest, tmpdir: LEGACY_PATH) -> AsyncGroup: param: AsyncGroupRequest = request.param store = await parse_store(param.store, str(tmpdir)) return await AsyncGroup.from_store( store, attributes=param.attributes, zarr_format=param.zarr_format, overwrite=False, ) @pytest.fixture(params=["numpy", "cupy"]) def xp(request: pytest.FixtureRequest) -> Any: """Fixture to parametrize over numpy-like libraries""" if request.param == "cupy": request.node.add_marker(pytest.mark.gpu) return pytest.importorskip(request.param) @pytest.fixture(autouse=True) def reset_config() -> Generator[None, None, None]: config.reset() yield config.reset() @dataclass class ArrayRequest: shape: ChunkCoords dtype: str order: MemoryOrder @pytest.fixture def array_fixture(request: pytest.FixtureRequest) -> npt.NDArray[Any]: array_request: ArrayRequest = request.param return ( np.arange(np.prod(array_request.shape)) .reshape(array_request.shape, order=array_request.order) .astype(array_request.dtype) ) @pytest.fixture(params=(2, 3), ids=["zarr2", "zarr3"]) def zarr_format(request: pytest.FixtureRequest) -> ZarrFormat: if request.param == 2: return 2 elif request.param == 3: return 3 msg = f"Invalid zarr format requested. Got {request.param}, expected on of (2,3)." raise ValueError(msg) def pytest_addoption(parser: Any) -> None: parser.addoption( "--run-slow-hypothesis", action="store_true", default=False, help="run slow hypothesis tests", ) def pytest_collection_modifyitems(config: Any, items: Any) -> None: if config.getoption("--run-slow-hypothesis"): return skip_slow_hyp = pytest.mark.skip(reason="need --run-slow-hypothesis option to run") for item in items: if "slow_hypothesis" in item.keywords: item.add_marker(skip_slow_hyp) settings.register_profile( "ci", max_examples=1000, deadline=None, suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.too_slow], ) settings.register_profile( "local", max_examples=300, suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.too_slow], verbosity=Verbosity.verbose, ) # TODO: uncomment these overrides when we can get mypy to accept them """ @overload def create_array_metadata( *, shape: ShapeLike, dtype: npt.DTypeLike, chunks: ChunkCoords | Literal["auto"], shards: None, filters: FiltersLike, compressors: CompressorsLike, serializer: SerializerLike, fill_value: Any | None, order: MemoryOrder | None, zarr_format: Literal[2], attributes: dict[str, JSON] | None, chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None, dimension_names: None, ) -> ArrayV2Metadata: ... @overload def create_array_metadata( *, shape: ShapeLike, dtype: npt.DTypeLike, chunks: ChunkCoords | Literal["auto"], shards: ShardsLike | None, filters: FiltersLike, compressors: CompressorsLike, serializer: SerializerLike, fill_value: Any | None, order: None, zarr_format: Literal[3], attributes: dict[str, JSON] | None, chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None, dimension_names: Iterable[str] | None, ) -> ArrayV3Metadata: ... """ def create_array_metadata( *, shape: ShapeLike, dtype: npt.DTypeLike, chunks: ChunkCoords | Literal["auto"] = "auto", shards: ShardsLike | None = None, filters: FiltersLike = "auto", compressors: CompressorsLike = "auto", serializer: SerializerLike = "auto", fill_value: Any | None = None, order: MemoryOrder | None = None, zarr_format: ZarrFormat, attributes: dict[str, JSON] | None = None, chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None = None, dimension_names: Iterable[str] | None = None, ) -> ArrayV2Metadata | ArrayV3Metadata: """ Create array metadata """ dtype_parsed = parse_dtype(dtype, zarr_format=zarr_format) shape_parsed = parse_shapelike(shape) chunk_key_encoding_parsed = _parse_chunk_key_encoding( chunk_key_encoding, zarr_format=zarr_format ) shard_shape_parsed, chunk_shape_parsed = _auto_partition( array_shape=shape_parsed, shard_shape=shards, chunk_shape=chunks, dtype=dtype_parsed ) if order is None: order_parsed = zarr_config.get("array.order") else: order_parsed = order chunks_out: tuple[int, ...] if zarr_format == 2: filters_parsed, compressor_parsed = _parse_chunk_encoding_v2( compressor=compressors, filters=filters, dtype=np.dtype(dtype) ) return ArrayV2Metadata( shape=shape_parsed, dtype=np.dtype(dtype), chunks=chunk_shape_parsed, order=order_parsed, dimension_separator=chunk_key_encoding_parsed.separator, fill_value=fill_value, compressor=compressor_parsed, filters=filters_parsed, attributes=attributes, ) elif zarr_format == 3: array_array, array_bytes, bytes_bytes = _parse_chunk_encoding_v3( compressors=compressors, filters=filters, serializer=serializer, dtype=dtype_parsed, ) sub_codecs: tuple[Codec, ...] = (*array_array, array_bytes, *bytes_bytes) codecs_out: tuple[Codec, ...] if shard_shape_parsed is not None: index_location = None if isinstance(shards, dict): index_location = ShardingCodecIndexLocation(shards.get("index_location", None)) if index_location is None: index_location = ShardingCodecIndexLocation.end sharding_codec = ShardingCodec( chunk_shape=chunk_shape_parsed, codecs=sub_codecs, index_location=index_location, ) sharding_codec.validate( shape=chunk_shape_parsed, dtype=dtype_parsed, chunk_grid=RegularChunkGrid(chunk_shape=shard_shape_parsed), ) codecs_out = (sharding_codec,) chunks_out = shard_shape_parsed else: chunks_out = chunk_shape_parsed codecs_out = sub_codecs return ArrayV3Metadata( shape=shape_parsed, data_type=dtype_parsed, chunk_grid=RegularChunkGrid(chunk_shape=chunks_out), chunk_key_encoding=chunk_key_encoding_parsed, fill_value=fill_value, codecs=codecs_out, attributes=attributes, dimension_names=dimension_names, ) raise ValueError(f"Invalid Zarr format: {zarr_format}") # TODO: uncomment these overrides when we can get mypy to accept them """ @overload def meta_from_array( array: np.ndarray[Any, Any], chunks: ChunkCoords | Literal["auto"], shards: None, filters: FiltersLike, compressors: CompressorsLike, serializer: SerializerLike, fill_value: Any | None, order: MemoryOrder | None, zarr_format: Literal[2], attributes: dict[str, JSON] | None, chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None, dimension_names: Iterable[str] | None, ) -> ArrayV2Metadata: ... @overload def meta_from_array( array: np.ndarray[Any, Any], chunks: ChunkCoords | Literal["auto"], shards: ShardsLike | None, filters: FiltersLike, compressors: CompressorsLike, serializer: SerializerLike, fill_value: Any | None, order: None, zarr_format: Literal[3], attributes: dict[str, JSON] | None, chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None, dimension_names: Iterable[str] | None, ) -> ArrayV3Metadata: ... """ def meta_from_array( array: np.ndarray[Any, Any], *, chunks: ChunkCoords | Literal["auto"] = "auto", shards: ShardsLike | None = None, filters: FiltersLike = "auto", compressors: CompressorsLike = "auto", serializer: SerializerLike = "auto", fill_value: Any | None = None, order: MemoryOrder | None = None, zarr_format: ZarrFormat = 3, attributes: dict[str, JSON] | None = None, chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None = None, dimension_names: Iterable[str] | None = None, ) -> ArrayV3Metadata | ArrayV2Metadata: """ Create array metadata from an array """ return create_array_metadata( shape=array.shape, dtype=array.dtype, chunks=chunks, shards=shards, filters=filters, compressors=compressors, serializer=serializer, fill_value=fill_value, order=order, zarr_format=zarr_format, attributes=attributes, chunk_key_encoding=chunk_key_encoding, dimension_names=dimension_names, ) zarr-python-3.0.6/tests/package_with_entrypoint-0.1.dist-info/000077500000000000000000000000001476711733500243055ustar00rootroot00000000000000zarr-python-3.0.6/tests/package_with_entrypoint-0.1.dist-info/entry_points.txt000066400000000000000000000011321476711733500276000ustar00rootroot00000000000000[zarr.codecs] test = package_with_entrypoint:TestEntrypointCodec [zarr.codecs.test] another_codec = package_with_entrypoint:TestEntrypointGroup.Codec [zarr] codec_pipeline = package_with_entrypoint:TestEntrypointCodecPipeline ndbuffer = package_with_entrypoint:TestEntrypointNDBuffer buffer = package_with_entrypoint:TestEntrypointBuffer [zarr.buffer] another_buffer = package_with_entrypoint:TestEntrypointGroup.Buffer [zarr.ndbuffer] another_ndbuffer = package_with_entrypoint:TestEntrypointGroup.NDBuffer [zarr.codec_pipeline] another_pipeline = package_with_entrypoint:TestEntrypointGroup.Pipeline zarr-python-3.0.6/tests/package_with_entrypoint/000077500000000000000000000000001476711733500221165ustar00rootroot00000000000000zarr-python-3.0.6/tests/package_with_entrypoint/__init__.py000066400000000000000000000030211476711733500242230ustar00rootroot00000000000000from collections.abc import Iterable from numpy import ndarray import zarr.core.buffer from zarr.abc.codec import ArrayBytesCodec, CodecInput, CodecOutput, CodecPipeline from zarr.codecs import BytesCodec from zarr.core.array_spec import ArraySpec from zarr.core.buffer import Buffer, NDBuffer from zarr.core.common import BytesLike class TestEntrypointCodec(ArrayBytesCodec): is_fixed_size = True async def encode( self, chunks_and_specs: Iterable[tuple[CodecInput | None, ArraySpec]], ) -> Iterable[CodecOutput | None]: pass async def decode( self, chunks_and_specs: Iterable[tuple[CodecInput | None, ArraySpec]], ) -> ndarray: pass def compute_encoded_size(self, input_byte_length: int, chunk_spec: ArraySpec) -> int: return input_byte_length class TestEntrypointCodecPipeline(CodecPipeline): def __init__(self, batch_size: int = 1) -> None: pass async def encode( self, chunks_and_specs: Iterable[tuple[CodecInput | None, ArraySpec]] ) -> BytesLike: pass async def decode( self, chunks_and_specs: Iterable[tuple[CodecInput | None, ArraySpec]] ) -> ndarray: pass class TestEntrypointBuffer(Buffer): pass class TestEntrypointNDBuffer(NDBuffer): pass class TestEntrypointGroup: class Codec(BytesCodec): pass class Buffer(zarr.core.buffer.Buffer): pass class NDBuffer(zarr.core.buffer.NDBuffer): pass class Pipeline(CodecPipeline): pass zarr-python-3.0.6/tests/test_api.py000066400000000000000000001170211476711733500173610ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING if TYPE_CHECKING: import pathlib from zarr.abc.store import Store from zarr.core.common import JSON, MemoryOrder, ZarrFormat import warnings from typing import Literal import numpy as np import pytest from numpy.testing import assert_array_equal import zarr import zarr.api.asynchronous import zarr.api.synchronous import zarr.core.group from zarr import Array, Group from zarr.api.synchronous import ( create, create_array, create_group, group, load, open, open_group, save, save_array, save_group, ) from zarr.errors import MetadataValidationError from zarr.storage import MemoryStore from zarr.storage._utils import normalize_path from zarr.testing.utils import gpu_test def test_create(memory_store: Store) -> None: store = memory_store # create array z = create(shape=100, store=store) assert isinstance(z, Array) assert z.shape == (100,) # create array, overwrite, specify chunk shape z = create(shape=200, chunk_shape=20, store=store, overwrite=True) assert isinstance(z, Array) assert z.shape == (200,) assert z.chunks == (20,) # create array, overwrite, specify chunk shape via chunks param z = create(shape=400, chunks=40, store=store, overwrite=True) assert isinstance(z, Array) assert z.shape == (400,) assert z.chunks == (40,) # create array with float shape with pytest.raises(TypeError): z = create(shape=(400.5, 100), store=store, overwrite=True) # type: ignore [arg-type] # create array with float chunk shape with pytest.raises(TypeError): z = create(shape=(400, 100), chunks=(16, 16.5), store=store, overwrite=True) # type: ignore [arg-type] # TODO: parametrize over everything this function takes @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_create_array(store: Store) -> None: attrs: dict[str, JSON] = {"foo": 100} # explicit type annotation to avoid mypy error shape = (10, 10) path = "foo" data_val = 1 array_w = create_array( store, name=path, shape=shape, attributes=attrs, chunks=shape, dtype="uint8" ) array_w[:] = data_val assert array_w.shape == shape assert array_w.attrs == attrs assert np.array_equal(array_w[:], np.zeros(shape, dtype=array_w.dtype) + data_val) @pytest.mark.parametrize("write_empty_chunks", [True, False]) def test_write_empty_chunks_warns(write_empty_chunks: bool) -> None: """ Test that using the `write_empty_chunks` kwarg on array access will raise a warning. """ match = "The `write_empty_chunks` keyword argument .*" with pytest.warns(RuntimeWarning, match=match): _ = zarr.array( data=np.arange(10), shape=(10,), dtype="uint8", write_empty_chunks=write_empty_chunks ) with pytest.warns(RuntimeWarning, match=match): _ = zarr.create(shape=(10,), dtype="uint8", write_empty_chunks=write_empty_chunks) @pytest.mark.parametrize("path", ["foo", "/", "/foo", "///foo/bar"]) @pytest.mark.parametrize("node_type", ["array", "group"]) def test_open_normalized_path( memory_store: MemoryStore, path: str, node_type: Literal["array", "group"] ) -> None: node: Group | Array if node_type == "group": node = group(store=memory_store, path=path) elif node_type == "array": node = create(store=memory_store, path=path, shape=(2,)) assert node.path == normalize_path(path) async def test_open_array(memory_store: MemoryStore) -> None: store = memory_store # open array, create if doesn't exist z = open(store=store, shape=100) assert isinstance(z, Array) assert z.shape == (100,) # open array, overwrite # store._store_dict = {} store = MemoryStore() z = open(store=store, shape=200) assert isinstance(z, Array) assert z.shape == (200,) # open array, read-only store_cls = type(store) ro_store = await store_cls.open(store_dict=store._store_dict, read_only=True) z = open(store=ro_store, mode="r") assert isinstance(z, Array) assert z.shape == (200,) assert z.read_only # path not found with pytest.raises(FileNotFoundError): open(store="doesnotexist", mode="r") @pytest.mark.parametrize("store", ["memory"], indirect=True) async def test_create_group(store: Store, zarr_format: ZarrFormat) -> None: attrs = {"foo": 100} path = "node" node = create_group(store, path=path, attributes=attrs, zarr_format=zarr_format) assert isinstance(node, Group) assert node.attrs == attrs assert node.metadata.zarr_format == zarr_format async def test_open_group(memory_store: MemoryStore) -> None: store = memory_store # open group, create if doesn't exist g = open_group(store=store) g.create_group("foo") assert isinstance(g, Group) assert "foo" in g # open group, overwrite # g = open_group(store=store) # assert isinstance(g, Group) # assert "foo" not in g # open group, read-only store_cls = type(store) ro_store = await store_cls.open(store_dict=store._store_dict, read_only=True) g = open_group(store=ro_store, mode="r") assert isinstance(g, Group) assert g.read_only @pytest.mark.parametrize("zarr_format", [None, 2, 3]) async def test_open_group_unspecified_version( tmpdir: pathlib.Path, zarr_format: ZarrFormat ) -> None: """Regression test for https://github.com/zarr-developers/zarr-python/issues/2175""" # create a group with specified zarr format (could be 2, 3, or None) _ = await zarr.api.asynchronous.open_group( store=str(tmpdir), mode="w", zarr_format=zarr_format, attributes={"foo": "bar"} ) # now open that group without specifying the format g2 = await zarr.api.asynchronous.open_group(store=str(tmpdir), mode="r") assert g2.attrs == {"foo": "bar"} if zarr_format is not None: assert g2.metadata.zarr_format == zarr_format @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) @pytest.mark.parametrize("n_args", [10, 1, 0]) @pytest.mark.parametrize("n_kwargs", [10, 1, 0]) def test_save(store: Store, n_args: int, n_kwargs: int) -> None: data = np.arange(10) args = [np.arange(10) for _ in range(n_args)] kwargs = {f"arg_{i}": data for i in range(n_kwargs)} if n_kwargs == 0 and n_args == 0: with pytest.raises(ValueError): save(store) elif n_args == 1 and n_kwargs == 0: save(store, *args) array = open(store) assert isinstance(array, Array) assert_array_equal(array[:], data) else: save(store, *args, **kwargs) # type: ignore [arg-type] group = open(store) assert isinstance(group, Group) for array in group.array_values(): assert_array_equal(array[:], data) for k in kwargs: assert k in group assert group.nmembers() == n_args + n_kwargs def test_save_errors() -> None: with pytest.raises(ValueError): # no arrays provided save_group("data/group.zarr") with pytest.raises(TypeError): # no array provided save_array("data/group.zarr") with pytest.raises(ValueError): # no arrays provided save("data/group.zarr") with pytest.raises(TypeError): # mode is no valid argument and would get handled as an array a = np.arange(10) zarr.save("data/example.zarr", a, mode="w") def test_open_with_mode_r(tmp_path: pathlib.Path) -> None: # 'r' means read only (must exist) with pytest.raises(FileNotFoundError): zarr.open(store=tmp_path, mode="r") z1 = zarr.ones(store=tmp_path, shape=(3, 3)) assert z1.fill_value == 1 z2 = zarr.open(store=tmp_path, mode="r") assert isinstance(z2, Array) assert z2.fill_value == 1 assert (z2[:] == 1).all() with pytest.raises(ValueError): z2[:] = 3 def test_open_with_mode_r_plus(tmp_path: pathlib.Path) -> None: # 'r+' means read/write (must exist) with pytest.raises(FileNotFoundError): zarr.open(store=tmp_path, mode="r+") zarr.ones(store=tmp_path, shape=(3, 3)) z2 = zarr.open(store=tmp_path, mode="r+") assert isinstance(z2, Array) assert (z2[:] == 1).all() z2[:] = 3 async def test_open_with_mode_a(tmp_path: pathlib.Path) -> None: # Open without shape argument should default to group g = zarr.open(store=tmp_path, mode="a") assert isinstance(g, Group) await g.store_path.delete() # 'a' means read/write (create if doesn't exist) arr = zarr.open(store=tmp_path, mode="a", shape=(3, 3)) assert isinstance(arr, Array) arr[...] = 1 z2 = zarr.open(store=tmp_path, mode="a") assert isinstance(z2, Array) assert (z2[:] == 1).all() z2[:] = 3 def test_open_with_mode_w(tmp_path: pathlib.Path) -> None: # 'w' means create (overwrite if exists); arr = zarr.open(store=tmp_path, mode="w", shape=(3, 3)) assert isinstance(arr, Array) arr[...] = 3 z2 = zarr.open(store=tmp_path, mode="w", shape=(3, 3)) assert isinstance(z2, Array) assert not (z2[:] == 3).all() z2[:] = 3 def test_open_with_mode_w_minus(tmp_path: pathlib.Path) -> None: # 'w-' means create (fail if exists) arr = zarr.open(store=tmp_path, mode="w-", shape=(3, 3)) assert isinstance(arr, Array) arr[...] = 1 with pytest.raises(FileExistsError): zarr.open(store=tmp_path, mode="w-") @pytest.mark.parametrize("zarr_format", [2, 3]) def test_array_order(zarr_format: ZarrFormat) -> None: arr = zarr.ones(shape=(2, 2), order=None, zarr_format=zarr_format) expected = zarr.config.get("array.order") assert arr.order == expected vals = np.asarray(arr) if expected == "C": assert vals.flags.c_contiguous elif expected == "F": assert vals.flags.f_contiguous else: raise AssertionError @pytest.mark.parametrize("order", ["C", "F"]) @pytest.mark.parametrize("zarr_format", [2, 3]) def test_array_order_warns(order: MemoryOrder | None, zarr_format: ZarrFormat) -> None: with pytest.warns(RuntimeWarning, match="The `order` keyword argument .*"): arr = zarr.ones(shape=(2, 2), order=order, zarr_format=zarr_format) expected = order or zarr.config.get("array.order") assert arr.order == expected vals = np.asarray(arr) if expected == "C": assert vals.flags.c_contiguous elif expected == "F": assert vals.flags.f_contiguous else: raise AssertionError # def test_lazy_loader(): # foo = np.arange(100) # bar = np.arange(100, 0, -1) # store = "data/group.zarr" # save(store, foo=foo, bar=bar) # loader = load(store) # assert "foo" in loader # assert "bar" in loader # assert "baz" not in loader # assert len(loader) == 2 # assert sorted(loader) == ["bar", "foo"] # assert_array_equal(foo, loader["foo"]) # assert_array_equal(bar, loader["bar"]) # assert "LazyLoader: " in repr(loader) def test_load_array(memory_store: Store) -> None: store = memory_store foo = np.arange(100) bar = np.arange(100, 0, -1) save(store, foo=foo, bar=bar) # can also load arrays directly into a numpy array for array_name in ["foo", "bar"]: array = load(store, path=array_name) assert isinstance(array, np.ndarray) if array_name == "foo": assert_array_equal(foo, array) else: assert_array_equal(bar, array) def test_tree() -> None: pytest.importorskip("rich") g1 = zarr.group() g1.create_group("foo") g3 = g1.create_group("bar") g3.create_group("baz") g5 = g3.create_group("qux") g5.create_array("baz", shape=(100,), chunks=(10,), dtype="float64") with pytest.warns(DeprecationWarning): assert repr(zarr.tree(g1)) == repr(g1.tree()) assert str(zarr.tree(g1)) == str(g1.tree()) # @pytest.mark.parametrize("stores_from_path", [False, True]) # @pytest.mark.parametrize( # "with_chunk_store,listable", # [(False, True), (True, True), (False, False)], # ids=["default-listable", "with_chunk_store-listable", "default-unlistable"], # ) # def test_consolidate_metadata(with_chunk_store, listable, monkeypatch, stores_from_path): # # setup initial data # if stores_from_path: # store = tempfile.mkdtemp() # atexit.register(atexit_rmtree, store) # if with_chunk_store: # chunk_store = tempfile.mkdtemp() # atexit.register(atexit_rmtree, chunk_store) # else: # chunk_store = None # else: # store = MemoryStore() # chunk_store = MemoryStore() if with_chunk_store else None # path = None # z = group(store, chunk_store=chunk_store, path=path) # # Reload the actual store implementation in case str # store_to_copy = z.store # z.create_group("g1") # g2 = z.create_group("g2") # g2.attrs["hello"] = "world" # arr = g2.create_array("arr", shape=(20, 20), chunks=(5, 5), dtype="f8") # assert 16 == arr.nchunks # assert 0 == arr.nchunks_initialized # arr.attrs["data"] = 1 # arr[:] = 1.0 # assert 16 == arr.nchunks_initialized # if stores_from_path: # # get the actual store class for use with consolidate_metadata # store_class = z._store # else: # store_class = store # # perform consolidation # out = consolidate_metadata(store_class, path=path) # assert isinstance(out, Group) # assert ["g1", "g2"] == list(out) # if not stores_from_path: # assert isinstance(out._store, ConsolidatedMetadataStore) # assert ".zmetadata" in store # meta_keys = [ # ".zgroup", # "g1/.zgroup", # "g2/.zgroup", # "g2/.zattrs", # "g2/arr/.zarray", # "g2/arr/.zattrs", # ] # for key in meta_keys: # del store[key] # # https://github.com/zarr-developers/zarr-python/issues/993 # # Make sure we can still open consolidated on an unlistable store: # if not listable: # fs_memory = pytest.importorskip("fsspec.implementations.memory") # monkeypatch.setattr(fs_memory.MemoryFileSystem, "isdir", lambda x, y: False) # monkeypatch.delattr(fs_memory.MemoryFileSystem, "ls") # fs = fs_memory.MemoryFileSystem() # store_to_open = FSStore("", fs=fs) # # copy original store to new unlistable store # store_to_open.update(store_to_copy) # else: # store_to_open = store # # open consolidated # z2 = open_consolidated(store_to_open, chunk_store=chunk_store, path=path) # assert ["g1", "g2"] == list(z2) # assert "world" == z2.g2.attrs["hello"] # assert 1 == z2.g2.arr.attrs["data"] # assert (z2.g2.arr[:] == 1.0).all() # assert 16 == z2.g2.arr.nchunks # if listable: # assert 16 == z2.g2.arr.nchunks_initialized # else: # with pytest.raises(NotImplementedError): # _ = z2.g2.arr.nchunks_initialized # if stores_from_path: # # path string is note a BaseStore subclass so cannot be used to # # initialize a ConsolidatedMetadataStore. # with pytest.raises(ValueError): # cmd = ConsolidatedMetadataStore(store) # else: # # tests del/write on the store # cmd = ConsolidatedMetadataStore(store) # with pytest.raises(PermissionError): # del cmd[".zgroup"] # with pytest.raises(PermissionError): # cmd[".zgroup"] = None # # test getsize on the store # assert isinstance(getsize(cmd), Integral) # # test new metadata are not writeable # with pytest.raises(PermissionError): # z2.create_group("g3") # with pytest.raises(PermissionError): # z2.create_dataset("spam", shape=42, chunks=7, dtype="i4") # with pytest.raises(PermissionError): # del z2["g2"] # # test consolidated metadata are not writeable # with pytest.raises(PermissionError): # z2.g2.attrs["hello"] = "universe" # with pytest.raises(PermissionError): # z2.g2.arr.attrs["foo"] = "bar" # # test the data are writeable # z2.g2.arr[:] = 2 # assert (z2.g2.arr[:] == 2).all() # # test invalid modes # with pytest.raises(ValueError): # open_consolidated(store, chunk_store=chunk_store, mode="a", path=path) # with pytest.raises(ValueError): # open_consolidated(store, chunk_store=chunk_store, mode="w", path=path) # with pytest.raises(ValueError): # open_consolidated(store, chunk_store=chunk_store, mode="w-", path=path) # # make sure keyword arguments are passed through without error # open_consolidated( # store, # chunk_store=chunk_store, # path=path, # cache_attrs=True, # synchronizer=None, # ) # @pytest.mark.parametrize( # "options", # ( # {"dimension_separator": "/"}, # {"dimension_separator": "."}, # {"dimension_separator": None}, # ), # ) # def test_save_array_separator(tmpdir, options): # data = np.arange(6).reshape((3, 2)) # url = tmpdir.join("test.zarr") # save_array(url, data, **options) # class TestCopyStore(unittest.TestCase): # _version = 2 # def setUp(self): # source = dict() # source["foo"] = b"xxx" # source["bar/baz"] = b"yyy" # source["bar/qux"] = b"zzz" # self.source = source # def _get_dest_store(self): # return dict() # def test_no_paths(self): # source = self.source # dest = self._get_dest_store() # copy_store(source, dest) # assert len(source) == len(dest) # for key in source: # assert source[key] == dest[key] # def test_source_path(self): # source = self.source # # paths should be normalized # for source_path in "bar", "bar/", "/bar", "/bar/": # dest = self._get_dest_store() # copy_store(source, dest, source_path=source_path) # assert 2 == len(dest) # for key in source: # if key.startswith("bar/"): # dest_key = key.split("bar/")[1] # assert source[key] == dest[dest_key] # else: # assert key not in dest # def test_dest_path(self): # source = self.source # # paths should be normalized # for dest_path in "new", "new/", "/new", "/new/": # dest = self._get_dest_store() # copy_store(source, dest, dest_path=dest_path) # assert len(source) == len(dest) # for key in source: # if self._version == 3: # dest_key = key[:10] + "new/" + key[10:] # else: # dest_key = "new/" + key # assert source[key] == dest[dest_key] # def test_source_dest_path(self): # source = self.source # # paths should be normalized # for source_path in "bar", "bar/", "/bar", "/bar/": # for dest_path in "new", "new/", "/new", "/new/": # dest = self._get_dest_store() # copy_store(source, dest, source_path=source_path, dest_path=dest_path) # assert 2 == len(dest) # for key in source: # if key.startswith("bar/"): # dest_key = "new/" + key.split("bar/")[1] # assert source[key] == dest[dest_key] # else: # assert key not in dest # assert ("new/" + key) not in dest # def test_excludes_includes(self): # source = self.source # # single excludes # dest = self._get_dest_store() # excludes = "f.*" # copy_store(source, dest, excludes=excludes) # assert len(dest) == 2 # root = "" # assert root + "foo" not in dest # # multiple excludes # dest = self._get_dest_store() # excludes = "b.z", ".*x" # copy_store(source, dest, excludes=excludes) # assert len(dest) == 1 # assert root + "foo" in dest # assert root + "bar/baz" not in dest # assert root + "bar/qux" not in dest # # excludes and includes # dest = self._get_dest_store() # excludes = "b.*" # includes = ".*x" # copy_store(source, dest, excludes=excludes, includes=includes) # assert len(dest) == 2 # assert root + "foo" in dest # assert root + "bar/baz" not in dest # assert root + "bar/qux" in dest # def test_dry_run(self): # source = self.source # dest = self._get_dest_store() # copy_store(source, dest, dry_run=True) # assert 0 == len(dest) # def test_if_exists(self): # source = self.source # dest = self._get_dest_store() # root = "" # dest[root + "bar/baz"] = b"mmm" # # default ('raise') # with pytest.raises(CopyError): # copy_store(source, dest) # # explicit 'raise' # with pytest.raises(CopyError): # copy_store(source, dest, if_exists="raise") # # skip # copy_store(source, dest, if_exists="skip") # assert 3 == len(dest) # assert dest[root + "foo"] == b"xxx" # assert dest[root + "bar/baz"] == b"mmm" # assert dest[root + "bar/qux"] == b"zzz" # # replace # copy_store(source, dest, if_exists="replace") # assert 3 == len(dest) # assert dest[root + "foo"] == b"xxx" # assert dest[root + "bar/baz"] == b"yyy" # assert dest[root + "bar/qux"] == b"zzz" # # invalid option # with pytest.raises(ValueError): # copy_store(source, dest, if_exists="foobar") # def check_copied_array(original, copied, without_attrs=False, expect_props=None): # # setup # source_h5py = original.__module__.startswith("h5py.") # dest_h5py = copied.__module__.startswith("h5py.") # zarr_to_zarr = not (source_h5py or dest_h5py) # h5py_to_h5py = source_h5py and dest_h5py # zarr_to_h5py = not source_h5py and dest_h5py # h5py_to_zarr = source_h5py and not dest_h5py # if expect_props is None: # expect_props = dict() # else: # expect_props = expect_props.copy() # # common properties in zarr and h5py # for p in "dtype", "shape", "chunks": # expect_props.setdefault(p, getattr(original, p)) # # zarr-specific properties # if zarr_to_zarr: # for p in "compressor", "filters", "order", "fill_value": # expect_props.setdefault(p, getattr(original, p)) # # h5py-specific properties # if h5py_to_h5py: # for p in ( # "maxshape", # "compression", # "compression_opts", # "shuffle", # "scaleoffset", # "fletcher32", # "fillvalue", # ): # expect_props.setdefault(p, getattr(original, p)) # # common properties with some name differences # if h5py_to_zarr: # expect_props.setdefault("fill_value", original.fillvalue) # if zarr_to_h5py: # expect_props.setdefault("fillvalue", original.fill_value) # # compare properties # for k, v in expect_props.items(): # assert v == getattr(copied, k) # # compare data # assert_array_equal(original[:], copied[:]) # # compare attrs # if without_attrs: # for k in original.attrs.keys(): # assert k not in copied.attrs # else: # if dest_h5py and "filters" in original.attrs: # # special case in v3 (storing filters metadata under attributes) # # we explicitly do not copy this info over to HDF5 # original_attrs = original.attrs.asdict().copy() # original_attrs.pop("filters") # else: # original_attrs = original.attrs # assert sorted(original_attrs.items()) == sorted(copied.attrs.items()) # def check_copied_group(original, copied, without_attrs=False, expect_props=None, shallow=False): # # setup # if expect_props is None: # expect_props = dict() # else: # expect_props = expect_props.copy() # # compare children # for k, v in original.items(): # if hasattr(v, "shape"): # assert k in copied # check_copied_array(v, copied[k], without_attrs=without_attrs, expect_props=expect_props) # elif shallow: # assert k not in copied # else: # assert k in copied # check_copied_group( # v, # copied[k], # without_attrs=without_attrs, # shallow=shallow, # expect_props=expect_props, # ) # # compare attrs # if without_attrs: # for k in original.attrs.keys(): # assert k not in copied.attrs # else: # assert sorted(original.attrs.items()) == sorted(copied.attrs.items()) # def test_copy_all(): # """ # https://github.com/zarr-developers/zarr-python/issues/269 # copy_all used to not copy attributes as `.keys()` does not return hidden `.zattrs`. # """ # original_group = zarr.group(store=MemoryStore(), overwrite=True) # original_group.attrs["info"] = "group attrs" # original_subgroup = original_group.create_group("subgroup") # original_subgroup.attrs["info"] = "sub attrs" # destination_group = zarr.group(store=MemoryStore(), overwrite=True) # # copy from memory to directory store # copy_all( # original_group, # destination_group, # dry_run=False, # ) # assert "subgroup" in destination_group # assert destination_group.attrs["info"] == "group attrs" # assert destination_group.subgroup.attrs["info"] == "sub attrs" # class TestCopy: # @pytest.fixture(params=[False, True], ids=["zarr", "hdf5"]) # def source(self, request, tmpdir): # def prep_source(source): # foo = source.create_group("foo") # foo.attrs["experiment"] = "weird science" # baz = foo.create_dataset("bar/baz", data=np.arange(100), chunks=(50,)) # baz.attrs["units"] = "metres" # if request.param: # extra_kws = dict( # compression="gzip", # compression_opts=3, # fillvalue=84, # shuffle=True, # fletcher32=True, # ) # else: # extra_kws = dict(compressor=Zlib(3), order="F", fill_value=42, filters=[Adler32()]) # source.create_dataset( # "spam", # data=np.arange(100, 200).reshape(20, 5), # chunks=(10, 2), # dtype="i2", # **extra_kws, # ) # return source # if request.param: # h5py = pytest.importorskip("h5py") # fn = tmpdir.join("source.h5") # with h5py.File(str(fn), mode="w") as h5f: # yield prep_source(h5f) # else: # yield prep_source(group()) # @pytest.fixture(params=[False, True], ids=["zarr", "hdf5"]) # def dest(self, request, tmpdir): # if request.param: # h5py = pytest.importorskip("h5py") # fn = tmpdir.join("dest.h5") # with h5py.File(str(fn), mode="w") as h5f: # yield h5f # else: # yield group() # def test_copy_array(self, source, dest): # # copy array with default options # copy(source["foo/bar/baz"], dest) # check_copied_array(source["foo/bar/baz"], dest["baz"]) # copy(source["spam"], dest) # check_copied_array(source["spam"], dest["spam"]) # def test_copy_bad_dest(self, source, dest): # # try to copy to an array, dest must be a group # dest = dest.create_dataset("eggs", shape=(100,)) # with pytest.raises(ValueError): # copy(source["foo/bar/baz"], dest) # def test_copy_array_name(self, source, dest): # # copy array with name # copy(source["foo/bar/baz"], dest, name="qux") # assert "baz" not in dest # check_copied_array(source["foo/bar/baz"], dest["qux"]) # def test_copy_array_create_options(self, source, dest): # dest_h5py = dest.__module__.startswith("h5py.") # # copy array, provide creation options # compressor = Zlib(9) # create_kws = dict(chunks=(10,)) # if dest_h5py: # create_kws.update( # compression="gzip", compression_opts=9, shuffle=True, fletcher32=True, fillvalue=42 # ) # else: # create_kws.update(compressor=compressor, fill_value=42, order="F", filters=[Adler32()]) # copy(source["foo/bar/baz"], dest, without_attrs=True, **create_kws) # check_copied_array( # source["foo/bar/baz"], dest["baz"], without_attrs=True, expect_props=create_kws # ) # def test_copy_array_exists_array(self, source, dest): # # copy array, dest array in the way # dest.create_dataset("baz", shape=(10,)) # # raise # with pytest.raises(CopyError): # # should raise by default # copy(source["foo/bar/baz"], dest) # assert (10,) == dest["baz"].shape # with pytest.raises(CopyError): # copy(source["foo/bar/baz"], dest, if_exists="raise") # assert (10,) == dest["baz"].shape # # skip # copy(source["foo/bar/baz"], dest, if_exists="skip") # assert (10,) == dest["baz"].shape # # replace # copy(source["foo/bar/baz"], dest, if_exists="replace") # check_copied_array(source["foo/bar/baz"], dest["baz"]) # # invalid option # with pytest.raises(ValueError): # copy(source["foo/bar/baz"], dest, if_exists="foobar") # def test_copy_array_exists_group(self, source, dest): # # copy array, dest group in the way # dest.create_group("baz") # # raise # with pytest.raises(CopyError): # copy(source["foo/bar/baz"], dest) # assert not hasattr(dest["baz"], "shape") # with pytest.raises(CopyError): # copy(source["foo/bar/baz"], dest, if_exists="raise") # assert not hasattr(dest["baz"], "shape") # # skip # copy(source["foo/bar/baz"], dest, if_exists="skip") # assert not hasattr(dest["baz"], "shape") # # replace # copy(source["foo/bar/baz"], dest, if_exists="replace") # check_copied_array(source["foo/bar/baz"], dest["baz"]) # def test_copy_array_skip_initialized(self, source, dest): # dest_h5py = dest.__module__.startswith("h5py.") # dest.create_dataset("baz", shape=(100,), chunks=(10,), dtype="i8") # assert not np.all(source["foo/bar/baz"][:] == dest["baz"][:]) # if dest_h5py: # with pytest.raises(ValueError): # # not available with copy to h5py # copy(source["foo/bar/baz"], dest, if_exists="skip_initialized") # else: # # copy array, dest array exists but not yet initialized # copy(source["foo/bar/baz"], dest, if_exists="skip_initialized") # check_copied_array(source["foo/bar/baz"], dest["baz"]) # # copy array, dest array exists and initialized, will be skipped # dest["baz"][:] = np.arange(100, 200) # copy(source["foo/bar/baz"], dest, if_exists="skip_initialized") # assert_array_equal(np.arange(100, 200), dest["baz"][:]) # assert not np.all(source["foo/bar/baz"][:] == dest["baz"][:]) # def test_copy_group(self, source, dest): # # copy group, default options # copy(source["foo"], dest) # check_copied_group(source["foo"], dest["foo"]) # def test_copy_group_no_name(self, source, dest): # with pytest.raises(TypeError): # # need a name if copy root # copy(source, dest) # copy(source, dest, name="root") # check_copied_group(source, dest["root"]) # def test_copy_group_options(self, source, dest): # # copy group, non-default options # copy(source["foo"], dest, name="qux", without_attrs=True) # assert "foo" not in dest # check_copied_group(source["foo"], dest["qux"], without_attrs=True) # def test_copy_group_shallow(self, source, dest): # # copy group, shallow # copy(source, dest, name="eggs", shallow=True) # check_copied_group(source, dest["eggs"], shallow=True) # def test_copy_group_exists_group(self, source, dest): # # copy group, dest groups exist # dest.create_group("foo/bar") # copy(source["foo"], dest) # check_copied_group(source["foo"], dest["foo"]) # def test_copy_group_exists_array(self, source, dest): # # copy group, dest array in the way # dest.create_dataset("foo/bar", shape=(10,)) # # raise # with pytest.raises(CopyError): # copy(source["foo"], dest) # assert dest["foo/bar"].shape == (10,) # with pytest.raises(CopyError): # copy(source["foo"], dest, if_exists="raise") # assert dest["foo/bar"].shape == (10,) # # skip # copy(source["foo"], dest, if_exists="skip") # assert dest["foo/bar"].shape == (10,) # # replace # copy(source["foo"], dest, if_exists="replace") # check_copied_group(source["foo"], dest["foo"]) # def test_copy_group_dry_run(self, source, dest): # # dry run, empty destination # n_copied, n_skipped, n_bytes_copied = copy( # source["foo"], dest, dry_run=True, return_stats=True # ) # assert 0 == len(dest) # assert 3 == n_copied # assert 0 == n_skipped # assert 0 == n_bytes_copied # # dry run, array exists in destination # baz = np.arange(100, 200) # dest.create_dataset("foo/bar/baz", data=baz) # assert not np.all(source["foo/bar/baz"][:] == dest["foo/bar/baz"][:]) # assert 1 == len(dest) # # raise # with pytest.raises(CopyError): # copy(source["foo"], dest, dry_run=True) # assert 1 == len(dest) # # skip # n_copied, n_skipped, n_bytes_copied = copy( # source["foo"], dest, dry_run=True, if_exists="skip", return_stats=True # ) # assert 1 == len(dest) # assert 2 == n_copied # assert 1 == n_skipped # assert 0 == n_bytes_copied # assert_array_equal(baz, dest["foo/bar/baz"]) # # replace # n_copied, n_skipped, n_bytes_copied = copy( # source["foo"], dest, dry_run=True, if_exists="replace", return_stats=True # ) # assert 1 == len(dest) # assert 3 == n_copied # assert 0 == n_skipped # assert 0 == n_bytes_copied # assert_array_equal(baz, dest["foo/bar/baz"]) # def test_logging(self, source, dest, tmpdir): # # callable log # copy(source["foo"], dest, dry_run=True, log=print) # # file name # fn = str(tmpdir.join("log_name")) # copy(source["foo"], dest, dry_run=True, log=fn) # # file # with tmpdir.join("log_file").open(mode="w") as f: # copy(source["foo"], dest, dry_run=True, log=f) # # bad option # with pytest.raises(TypeError): # copy(source["foo"], dest, dry_run=True, log=True) def test_open_positional_args_deprecated() -> None: store = MemoryStore() with pytest.warns(FutureWarning, match="pass"): open(store, "w", shape=(1,)) def test_save_array_positional_args_deprecated() -> None: store = MemoryStore() with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="zarr_version is deprecated", category=DeprecationWarning ) with pytest.warns(FutureWarning, match="pass"): save_array( store, np.ones( 1, ), 3, ) def test_group_positional_args_deprecated() -> None: store = MemoryStore() with pytest.warns(FutureWarning, match="pass"): group(store, True) def test_open_group_positional_args_deprecated() -> None: store = MemoryStore() with pytest.warns(FutureWarning, match="pass"): open_group(store, "w") def test_open_falls_back_to_open_group() -> None: # https://github.com/zarr-developers/zarr-python/issues/2309 store = MemoryStore() zarr.open_group(store, attributes={"key": "value"}) group = zarr.open(store) assert isinstance(group, Group) assert group.attrs == {"key": "value"} async def test_open_falls_back_to_open_group_async() -> None: # https://github.com/zarr-developers/zarr-python/issues/2309 store = MemoryStore() await zarr.api.asynchronous.open_group(store, attributes={"key": "value"}) group = await zarr.api.asynchronous.open(store=store) assert isinstance(group, zarr.core.group.AsyncGroup) assert group.attrs == {"key": "value"} @pytest.mark.parametrize("mode", ["r", "r+", "w", "a"]) def test_open_modes_creates_group(tmp_path: pathlib.Path, mode: str) -> None: # https://github.com/zarr-developers/zarr-python/issues/2490 zarr_dir = tmp_path / f"mode-{mode}-test.zarr" if mode in ["r", "r+"]: # Expect FileNotFoundError to be raised if 'r' or 'r+' mode with pytest.raises(FileNotFoundError): zarr.open(store=zarr_dir, mode=mode) else: group = zarr.open(store=zarr_dir, mode=mode) assert isinstance(group, Group) async def test_metadata_validation_error() -> None: with pytest.raises( MetadataValidationError, match="Invalid value for 'zarr_format'. Expected '2, 3, or None'. Got '3.0'.", ): await zarr.api.asynchronous.open_group(zarr_format="3.0") # type: ignore [arg-type] with pytest.raises( MetadataValidationError, match="Invalid value for 'zarr_format'. Expected '2, 3, or None'. Got '3.0'.", ): await zarr.api.asynchronous.open_array(shape=(1,), zarr_format="3.0") # type: ignore [arg-type] @pytest.mark.parametrize( "store", ["local", "memory", "zip"], indirect=True, ) def test_open_array_with_mode_r_plus(store: Store) -> None: # 'r+' means read/write (must exist) with pytest.raises(FileNotFoundError): zarr.open_array(store=store, mode="r+") zarr.ones(store=store, shape=(3, 3)) z2 = zarr.open_array(store=store, mode="r+") assert isinstance(z2, Array) assert (z2[:] == 1).all() z2[:] = 3 def test_api_exports() -> None: """ Test that the sync API and the async API export the same objects """ assert zarr.api.asynchronous.__all__ == zarr.api.synchronous.__all__ @gpu_test @pytest.mark.parametrize( "store", ["local", "memory", "zip"], indirect=True, ) @pytest.mark.parametrize("zarr_format", [None, 2, 3]) def test_gpu_basic(store: Store, zarr_format: ZarrFormat | None) -> None: import cupy as cp if zarr_format == 2: # Without this, the zstd codec attempts to convert the cupy # array to bytes. compressors = None else: compressors = "auto" with zarr.config.enable_gpu(): src = cp.random.uniform(size=(100, 100)) # allocate on the device z = zarr.create_array( store, name="a", shape=src.shape, chunks=(10, 10), dtype=src.dtype, overwrite=True, zarr_format=zarr_format, compressors=compressors, ) z[:10, :10] = src[:10, :10] result = z[:10, :10] # assert_array_equal doesn't check the type assert isinstance(result, type(src)) cp.testing.assert_array_equal(result, src[:10, :10]) zarr-python-3.0.6/tests/test_array.py000066400000000000000000001415461476711733500177370ustar00rootroot00000000000000import dataclasses import json import math import multiprocessing as mp import pickle import re import sys from itertools import accumulate from typing import TYPE_CHECKING, Any, Literal from unittest import mock import numcodecs import numpy as np import pytest from packaging.version import Version import zarr.api.asynchronous import zarr.api.synchronous as sync_api from zarr import Array, AsyncArray, Group from zarr.abc.store import Store from zarr.codecs import ( BytesCodec, GzipCodec, TransposeCodec, VLenBytesCodec, VLenUTF8Codec, ZstdCodec, ) from zarr.core._info import ArrayInfo from zarr.core.array import ( CompressorsLike, FiltersLike, _get_default_chunk_encoding_v2, _get_default_chunk_encoding_v3, _parse_chunk_encoding_v2, _parse_chunk_encoding_v3, chunks_initialized, create_array, ) from zarr.core.buffer import default_buffer_prototype from zarr.core.buffer.cpu import NDBuffer from zarr.core.chunk_grids import _auto_partition from zarr.core.common import JSON, MemoryOrder, ZarrFormat from zarr.core.group import AsyncGroup from zarr.core.indexing import BasicIndexer, ceildiv from zarr.core.metadata.v3 import ArrayV3Metadata, DataType from zarr.core.sync import sync from zarr.errors import ContainsArrayError, ContainsGroupError from zarr.storage import LocalStore, MemoryStore, StorePath if TYPE_CHECKING: from zarr.core.array_spec import ArrayConfigLike from zarr.core.metadata.v2 import ArrayV2Metadata @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) @pytest.mark.parametrize("zarr_format", [2, 3]) @pytest.mark.parametrize("overwrite", [True, False]) @pytest.mark.parametrize("extant_node", ["array", "group"]) def test_array_creation_existing_node( store: LocalStore | MemoryStore, zarr_format: ZarrFormat, overwrite: bool, extant_node: Literal["array", "group"], ) -> None: """ Check that an existing array or group is handled as expected during array creation. """ spath = StorePath(store) group = Group.from_store(spath, zarr_format=zarr_format) expected_exception: type[ContainsArrayError | ContainsGroupError] if extant_node == "array": expected_exception = ContainsArrayError _ = group.create_array("extant", shape=(10,), dtype="uint8") elif extant_node == "group": expected_exception = ContainsGroupError _ = group.create_group("extant") else: raise AssertionError new_shape = (2, 2) new_dtype = "float32" if overwrite: if not store.supports_deletes: pytest.skip("store does not support deletes") arr_new = zarr.create_array( spath / "extant", shape=new_shape, dtype=new_dtype, overwrite=overwrite, zarr_format=zarr_format, ) assert arr_new.shape == new_shape assert arr_new.dtype == new_dtype else: with pytest.raises(expected_exception): arr_new = zarr.create_array( spath / "extant", shape=new_shape, dtype=new_dtype, overwrite=overwrite, zarr_format=zarr_format, ) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) @pytest.mark.parametrize("zarr_format", [2, 3]) async def test_create_creates_parents( store: LocalStore | MemoryStore, zarr_format: ZarrFormat ) -> None: # prepare a root node, with some data set await zarr.api.asynchronous.open_group( store=store, path="a", zarr_format=zarr_format, attributes={"key": "value"} ) # create a child node with a couple intermediates await zarr.api.asynchronous.create( shape=(2, 2), store=store, path="a/b/c/d", zarr_format=zarr_format ) parts = ["a", "a/b", "a/b/c"] if zarr_format == 2: files = [".zattrs", ".zgroup"] else: files = ["zarr.json"] expected = [f"{part}/{file}" for file in files for part in parts] if zarr_format == 2: expected.extend([".zattrs", ".zgroup", "a/b/c/d/.zarray", "a/b/c/d/.zattrs"]) else: expected.extend(["zarr.json", "a/b/c/d/zarr.json"]) expected = sorted(expected) result = sorted([x async for x in store.list_prefix("")]) assert result == expected paths = ["a", "a/b", "a/b/c"] for path in paths: g = await zarr.api.asynchronous.open_group(store=store, path=path) assert isinstance(g, AsyncGroup) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) @pytest.mark.parametrize("zarr_format", [2, 3]) def test_array_name_properties_no_group( store: LocalStore | MemoryStore, zarr_format: ZarrFormat ) -> None: arr = zarr.create_array( store=store, shape=(100,), chunks=(10,), zarr_format=zarr_format, dtype="i4" ) assert arr.path == "" assert arr.name == "/" assert arr.basename == "" @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) @pytest.mark.parametrize("zarr_format", [2, 3]) def test_array_name_properties_with_group( store: LocalStore | MemoryStore, zarr_format: ZarrFormat ) -> None: root = Group.from_store(store=store, zarr_format=zarr_format) foo = root.create_array("foo", shape=(100,), chunks=(10,), dtype="i4") assert foo.path == "foo" assert foo.name == "/foo" assert foo.basename == "foo" bar = root.create_group("bar") spam = bar.create_array("spam", shape=(100,), chunks=(10,), dtype="i4") assert spam.path == "bar/spam" assert spam.name == "/bar/spam" assert spam.basename == "spam" @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize("specifiy_fill_value", [True, False]) @pytest.mark.parametrize("dtype_str", ["bool", "uint8", "complex64"]) def test_array_v3_fill_value_default( store: MemoryStore, specifiy_fill_value: bool, dtype_str: str ) -> None: """ Test that creating an array with the fill_value parameter set to None, or unspecified, results in the expected fill_value attribute of the array, i.e. 0 cast to the array's dtype. """ shape = (10,) default_fill_value = 0 if specifiy_fill_value: arr = zarr.create_array( store=store, shape=shape, dtype=dtype_str, zarr_format=3, chunks=shape, fill_value=None, ) else: arr = zarr.create_array( store=store, shape=shape, dtype=dtype_str, zarr_format=3, chunks=shape ) assert arr.fill_value == np.dtype(dtype_str).type(default_fill_value) assert arr.fill_value.dtype == arr.dtype @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize( ("dtype_str", "fill_value"), [("bool", True), ("uint8", 99), ("float32", -99.9), ("complex64", 3 + 4j)], ) def test_array_v3_fill_value(store: MemoryStore, fill_value: int, dtype_str: str) -> None: shape = (10,) arr = zarr.create_array( store=store, shape=shape, dtype=dtype_str, zarr_format=3, chunks=shape, fill_value=fill_value, ) assert arr.fill_value == np.dtype(dtype_str).type(fill_value) assert arr.fill_value.dtype == arr.dtype def test_create_positional_args_deprecated() -> None: store = MemoryStore() with pytest.warns(FutureWarning, match="Pass"): zarr.Array.create(store, (2, 2), dtype="f8") def test_selection_positional_args_deprecated() -> None: store = MemoryStore() arr = zarr.create_array(store, shape=(2, 2), dtype="f8") with pytest.warns(FutureWarning, match="Pass out"): arr.get_basic_selection(..., NDBuffer(array=np.empty((2, 2)))) with pytest.warns(FutureWarning, match="Pass fields"): arr.set_basic_selection(..., 1, None) with pytest.warns(FutureWarning, match="Pass out"): arr.get_orthogonal_selection(..., NDBuffer(array=np.empty((2, 2)))) with pytest.warns(FutureWarning, match="Pass"): arr.set_orthogonal_selection(..., 1, None) with pytest.warns(FutureWarning, match="Pass"): arr.get_mask_selection(np.zeros((2, 2), dtype=bool), NDBuffer(array=np.empty((0,)))) with pytest.warns(FutureWarning, match="Pass"): arr.set_mask_selection(np.zeros((2, 2), dtype=bool), 1, None) with pytest.warns(FutureWarning, match="Pass"): arr.get_coordinate_selection(([0, 1], [0, 1]), NDBuffer(array=np.empty((2,)))) with pytest.warns(FutureWarning, match="Pass"): arr.set_coordinate_selection(([0, 1], [0, 1]), 1, None) with pytest.warns(FutureWarning, match="Pass"): arr.get_block_selection((0, slice(None)), NDBuffer(array=np.empty((2, 2)))) with pytest.warns(FutureWarning, match="Pass"): arr.set_block_selection((0, slice(None)), 1, None) @pytest.mark.parametrize("store", ["memory"], indirect=True) async def test_array_v3_nan_fill_value(store: MemoryStore) -> None: shape = (10,) arr = zarr.create_array( store=store, shape=shape, dtype=np.float64, zarr_format=3, chunks=shape, fill_value=np.nan, ) arr[:] = np.nan assert np.isnan(arr.fill_value) assert arr.fill_value.dtype == arr.dtype # all fill value chunk is an empty chunk, and should not be written assert len([a async for a in store.list_prefix("/")]) == 0 @pytest.mark.parametrize("store", ["local"], indirect=["store"]) @pytest.mark.parametrize("zarr_format", [2, 3]) async def test_serializable_async_array( store: LocalStore | MemoryStore, zarr_format: ZarrFormat ) -> None: expected = await zarr.api.asynchronous.create_array( store=store, shape=(100,), chunks=(10,), zarr_format=zarr_format, dtype="i4" ) # await expected.setitems(list(range(100))) p = pickle.dumps(expected) actual = pickle.loads(p) assert actual == expected # np.testing.assert_array_equal(await actual.getitem(slice(None)), await expected.getitem(slice(None))) # TODO: uncomment the parts of this test that will be impacted by the config/prototype changes in flight @pytest.mark.parametrize("store", ["local"], indirect=["store"]) @pytest.mark.parametrize("zarr_format", [2, 3]) def test_serializable_sync_array(store: LocalStore, zarr_format: ZarrFormat) -> None: expected = zarr.create_array( store=store, shape=(100,), chunks=(10,), zarr_format=zarr_format, dtype="i4" ) expected[:] = list(range(100)) p = pickle.dumps(expected) actual = pickle.loads(p) assert actual == expected np.testing.assert_array_equal(actual[:], expected[:]) @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_storage_transformers(store: MemoryStore) -> None: """ Test that providing an actual storage transformer produces a warning and otherwise passes through """ metadata_dict: dict[str, JSON] = { "zarr_format": 3, "node_type": "array", "shape": (10,), "chunk_grid": {"name": "regular", "configuration": {"chunk_shape": (1,)}}, "data_type": "uint8", "chunk_key_encoding": {"name": "v2", "configuration": {"separator": "/"}}, "codecs": (BytesCodec().to_dict(),), "fill_value": 0, "storage_transformers": ({"test": "should_raise"}), } match = "Arrays with storage transformers are not supported in zarr-python at this time." with pytest.raises(ValueError, match=match): Array.from_dict(StorePath(store), data=metadata_dict) @pytest.mark.parametrize("test_cls", [Array, AsyncArray[Any]]) @pytest.mark.parametrize("nchunks", [2, 5, 10]) def test_nchunks(test_cls: type[Array] | type[AsyncArray[Any]], nchunks: int) -> None: """ Test that nchunks returns the number of chunks defined for the array. """ store = MemoryStore() shape = 100 arr = zarr.create_array(store, shape=(shape,), chunks=(ceildiv(shape, nchunks),), dtype="i4") expected = nchunks if test_cls == Array: observed = arr.nchunks else: observed = arr._async_array.nchunks assert observed == expected @pytest.mark.parametrize("test_cls", [Array, AsyncArray[Any]]) async def test_nchunks_initialized(test_cls: type[Array] | type[AsyncArray[Any]]) -> None: """ Test that nchunks_initialized accurately returns the number of stored chunks. """ store = MemoryStore() arr = zarr.create_array(store, shape=(100,), chunks=(10,), dtype="i4") # write chunks one at a time for idx, region in enumerate(arr._iter_chunk_regions()): arr[region] = 1 expected = idx + 1 if test_cls == Array: observed = arr.nchunks_initialized else: observed = await arr._async_array.nchunks_initialized() assert observed == expected # delete chunks for idx, key in enumerate(arr._iter_chunk_keys()): sync(arr.store_path.store.delete(key)) if test_cls == Array: observed = arr.nchunks_initialized else: observed = await arr._async_array.nchunks_initialized() expected = arr.nchunks - idx - 1 assert observed == expected async def test_chunks_initialized() -> None: """ Test that chunks_initialized accurately returns the keys of stored chunks. """ store = MemoryStore() arr = zarr.create_array(store, shape=(100,), chunks=(10,), dtype="i4") chunks_accumulated = tuple( accumulate(tuple(tuple(v.split(" ")) for v in arr._iter_chunk_keys())) ) for keys, region in zip(chunks_accumulated, arr._iter_chunk_regions(), strict=False): arr[region] = 1 observed = sorted(await chunks_initialized(arr._async_array)) expected = sorted(keys) assert observed == expected def test_nbytes_stored() -> None: arr = zarr.create(shape=(100,), chunks=(10,), dtype="i4", codecs=[BytesCodec()]) result = arr.nbytes_stored() assert result == 502 # the size of the metadata document. This is a fragile test. arr[:50] = 1 result = arr.nbytes_stored() assert result == 702 # the size with 5 chunks filled. arr[50:] = 2 result = arr.nbytes_stored() assert result == 902 # the size with all chunks filled. async def test_nbytes_stored_async() -> None: arr = await zarr.api.asynchronous.create( shape=(100,), chunks=(10,), dtype="i4", codecs=[BytesCodec()] ) result = await arr.nbytes_stored() assert result == 502 # the size of the metadata document. This is a fragile test. await arr.setitem(slice(50), 1) result = await arr.nbytes_stored() assert result == 702 # the size with 5 chunks filled. await arr.setitem(slice(50, 100), 2) result = await arr.nbytes_stored() assert result == 902 # the size with all chunks filled. def test_default_fill_values() -> None: a = zarr.Array.create(MemoryStore(), shape=5, chunk_shape=5, dtype=" None: with pytest.raises(ValueError, match="At least one ArrayBytesCodec is required."): Array.create(MemoryStore(), shape=5, chunks=5, dtype=" None: # regression test for https://github.com/zarr-developers/zarr-python/issues/2328 store = MemoryStore() arr = zarr.create_array( store=store, shape=(5,), chunks=(5,), dtype="f8", zarr_format=zarr_format ) arr.attrs["foo"] = "bar" assert arr.attrs["foo"] == "bar" arr2 = zarr.open_array(store=store, zarr_format=zarr_format) assert arr2.attrs["foo"] == "bar" @pytest.mark.parametrize(("chunks", "shards"), [((2, 2), None), ((2, 2), (4, 4))]) class TestInfo: def test_info_v2(self, chunks: tuple[int, int], shards: tuple[int, int] | None) -> None: arr = zarr.create_array(store={}, shape=(8, 8), dtype="f8", chunks=chunks, zarr_format=2) result = arr.info expected = ArrayInfo( _zarr_format=2, _data_type=np.dtype("float64"), _shape=(8, 8), _chunk_shape=chunks, _shard_shape=None, _order="C", _read_only=False, _store_type="MemoryStore", _count_bytes=512, _compressors=(numcodecs.Zstd(),), ) assert result == expected def test_info_v3(self, chunks: tuple[int, int], shards: tuple[int, int] | None) -> None: arr = zarr.create_array(store={}, shape=(8, 8), dtype="f8", chunks=chunks, shards=shards) result = arr.info expected = ArrayInfo( _zarr_format=3, _data_type=DataType.parse("float64"), _shape=(8, 8), _chunk_shape=chunks, _shard_shape=shards, _order="C", _read_only=False, _store_type="MemoryStore", _compressors=(ZstdCodec(),), _serializer=BytesCodec(), _count_bytes=512, ) assert result == expected def test_info_complete(self, chunks: tuple[int, int], shards: tuple[int, int] | None) -> None: arr = zarr.create_array( store={}, shape=(8, 8), dtype="f8", chunks=chunks, shards=shards, compressors=(), ) result = arr.info_complete() expected = ArrayInfo( _zarr_format=3, _data_type=DataType.parse("float64"), _shape=(8, 8), _chunk_shape=chunks, _shard_shape=shards, _order="C", _read_only=False, _store_type="MemoryStore", _serializer=BytesCodec(), _count_bytes=512, _count_chunks_initialized=0, _count_bytes_stored=521 if shards is None else 982, # the metadata? ) assert result == expected arr[:4, :4] = 10 result = arr.info_complete() if shards is None: expected = dataclasses.replace( expected, _count_chunks_initialized=4, _count_bytes_stored=649 ) else: expected = dataclasses.replace( expected, _count_chunks_initialized=1, _count_bytes_stored=1178 ) assert result == expected async def test_info_v2_async( self, chunks: tuple[int, int], shards: tuple[int, int] | None ) -> None: arr = await zarr.api.asynchronous.create_array( store={}, shape=(8, 8), dtype="f8", chunks=chunks, zarr_format=2 ) result = arr.info expected = ArrayInfo( _zarr_format=2, _data_type=np.dtype("float64"), _shape=(8, 8), _chunk_shape=(2, 2), _shard_shape=None, _order="C", _read_only=False, _store_type="MemoryStore", _count_bytes=512, _compressors=(numcodecs.Zstd(),), ) assert result == expected async def test_info_v3_async( self, chunks: tuple[int, int], shards: tuple[int, int] | None ) -> None: arr = await zarr.api.asynchronous.create_array( store={}, shape=(8, 8), dtype="f8", chunks=chunks, shards=shards, ) result = arr.info expected = ArrayInfo( _zarr_format=3, _data_type=DataType.parse("float64"), _shape=(8, 8), _chunk_shape=chunks, _shard_shape=shards, _order="C", _read_only=False, _store_type="MemoryStore", _compressors=(ZstdCodec(),), _serializer=BytesCodec(), _count_bytes=512, ) assert result == expected async def test_info_complete_async( self, chunks: tuple[int, int], shards: tuple[int, int] | None ) -> None: arr = await zarr.api.asynchronous.create_array( store={}, dtype="f8", shape=(8, 8), chunks=chunks, shards=shards, compressors=None, ) result = await arr.info_complete() expected = ArrayInfo( _zarr_format=3, _data_type=DataType.parse("float64"), _shape=(8, 8), _chunk_shape=chunks, _shard_shape=shards, _order="C", _read_only=False, _store_type="MemoryStore", _serializer=BytesCodec(), _count_bytes=512, _count_chunks_initialized=0, _count_bytes_stored=521 if shards is None else 982, # the metadata? ) assert result == expected await arr.setitem((slice(4), slice(4)), 10) result = await arr.info_complete() if shards is None: expected = dataclasses.replace( expected, _count_chunks_initialized=4, _count_bytes_stored=553 ) else: expected = dataclasses.replace( expected, _count_chunks_initialized=1, _count_bytes_stored=1178 ) @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_resize_1d(store: MemoryStore, zarr_format: ZarrFormat) -> None: z = zarr.create( shape=105, chunks=10, dtype="i4", fill_value=0, store=store, zarr_format=zarr_format ) a = np.arange(105, dtype="i4") z[:] = a assert (105,) == z.shape assert (105,) == z[:].shape assert np.dtype("i4") == z.dtype assert np.dtype("i4") == z[:].dtype assert (10,) == z.chunks np.testing.assert_array_equal(a, z[:]) z.resize(205) assert (205,) == z.shape assert (205,) == z[:].shape assert np.dtype("i4") == z.dtype assert np.dtype("i4") == z[:].dtype assert (10,) == z.chunks np.testing.assert_array_equal(a, z[:105]) np.testing.assert_array_equal(np.zeros(100, dtype="i4"), z[105:]) z.resize(55) assert (55,) == z.shape assert (55,) == z[:].shape assert np.dtype("i4") == z.dtype assert np.dtype("i4") == z[:].dtype assert (10,) == z.chunks np.testing.assert_array_equal(a[:55], z[:]) # via shape setter new_shape = (105,) z.shape = new_shape assert new_shape == z.shape assert new_shape == z[:].shape @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_resize_2d(store: MemoryStore, zarr_format: ZarrFormat) -> None: z = zarr.create( shape=(105, 105), chunks=(10, 10), dtype="i4", fill_value=0, store=store, zarr_format=zarr_format, ) a = np.arange(105 * 105, dtype="i4").reshape((105, 105)) z[:] = a assert (105, 105) == z.shape assert (105, 105) == z[:].shape assert np.dtype("i4") == z.dtype assert np.dtype("i4") == z[:].dtype assert (10, 10) == z.chunks np.testing.assert_array_equal(a, z[:]) z.resize((205, 205)) assert (205, 205) == z.shape assert (205, 205) == z[:].shape assert np.dtype("i4") == z.dtype assert np.dtype("i4") == z[:].dtype assert (10, 10) == z.chunks np.testing.assert_array_equal(a, z[:105, :105]) np.testing.assert_array_equal(np.zeros((100, 205), dtype="i4"), z[105:, :]) np.testing.assert_array_equal(np.zeros((205, 100), dtype="i4"), z[:, 105:]) z.resize((55, 55)) assert (55, 55) == z.shape assert (55, 55) == z[:].shape assert np.dtype("i4") == z.dtype assert np.dtype("i4") == z[:].dtype assert (10, 10) == z.chunks np.testing.assert_array_equal(a[:55, :55], z[:]) z.resize((55, 1)) assert (55, 1) == z.shape assert (55, 1) == z[:].shape assert np.dtype("i4") == z.dtype assert np.dtype("i4") == z[:].dtype assert (10, 10) == z.chunks np.testing.assert_array_equal(a[:55, :1], z[:]) z.resize((1, 55)) assert (1, 55) == z.shape assert (1, 55) == z[:].shape assert np.dtype("i4") == z.dtype assert np.dtype("i4") == z[:].dtype assert (10, 10) == z.chunks np.testing.assert_array_equal(a[:1, :10], z[:, :10]) np.testing.assert_array_equal(np.zeros((1, 55 - 10), dtype="i4"), z[:, 10:55]) # via shape setter new_shape = (105, 105) z.shape = new_shape assert new_shape == z.shape assert new_shape == z[:].shape @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_append_1d(store: MemoryStore, zarr_format: ZarrFormat) -> None: a = np.arange(105) z = zarr.create(shape=a.shape, chunks=10, dtype=a.dtype, store=store, zarr_format=zarr_format) z[:] = a assert a.shape == z.shape assert a.dtype == z.dtype assert (10,) == z.chunks np.testing.assert_array_equal(a, z[:]) b = np.arange(105, 205) e = np.append(a, b) assert z.shape == (105,) z.append(b) assert e.shape == z.shape assert e.dtype == z.dtype assert (10,) == z.chunks np.testing.assert_array_equal(e, z[:]) # check append handles array-like c = [1, 2, 3] f = np.append(e, c) z.append(c) assert f.shape == z.shape assert f.dtype == z.dtype assert (10,) == z.chunks np.testing.assert_array_equal(f, z[:]) @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_append_2d(store: MemoryStore, zarr_format: ZarrFormat) -> None: a = np.arange(105 * 105, dtype="i4").reshape((105, 105)) z = zarr.create( shape=a.shape, chunks=(10, 10), dtype=a.dtype, store=store, zarr_format=zarr_format ) z[:] = a assert a.shape == z.shape assert a.dtype == z.dtype assert (10, 10) == z.chunks actual = z[:] np.testing.assert_array_equal(a, actual) b = np.arange(105 * 105, 2 * 105 * 105, dtype="i4").reshape((105, 105)) e = np.append(a, b, axis=0) z.append(b) assert e.shape == z.shape assert e.dtype == z.dtype assert (10, 10) == z.chunks actual = z[:] np.testing.assert_array_equal(e, actual) @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_append_2d_axis(store: MemoryStore, zarr_format: ZarrFormat) -> None: a = np.arange(105 * 105, dtype="i4").reshape((105, 105)) z = zarr.create( shape=a.shape, chunks=(10, 10), dtype=a.dtype, store=store, zarr_format=zarr_format ) z[:] = a assert a.shape == z.shape assert a.dtype == z.dtype assert (10, 10) == z.chunks np.testing.assert_array_equal(a, z[:]) b = np.arange(105 * 105, 2 * 105 * 105, dtype="i4").reshape((105, 105)) e = np.append(a, b, axis=1) z.append(b, axis=1) assert e.shape == z.shape assert e.dtype == z.dtype assert (10, 10) == z.chunks np.testing.assert_array_equal(e, z[:]) @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_append_bad_shape(store: MemoryStore, zarr_format: ZarrFormat) -> None: a = np.arange(100) z = zarr.create(shape=a.shape, chunks=10, dtype=a.dtype, store=store, zarr_format=zarr_format) z[:] = a b = a.reshape(10, 10) with pytest.raises(ValueError): z.append(b) @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize("write_empty_chunks", [True, False]) @pytest.mark.parametrize("fill_value", [0, 5]) def test_write_empty_chunks_behavior( zarr_format: ZarrFormat, store: MemoryStore, write_empty_chunks: bool, fill_value: int ) -> None: """ Check that the write_empty_chunks value of the config is applied correctly. We expect that when write_empty_chunks is True, writing chunks equal to the fill value will result in those chunks appearing in the store. When write_empty_chunks is False, writing chunks that are equal to the fill value will result in those chunks not being present in the store. In particular, they should be deleted if they were already present. """ arr = zarr.create_array( store=store, shape=(2,), zarr_format=zarr_format, dtype="i4", fill_value=fill_value, chunks=(1,), config={"write_empty_chunks": write_empty_chunks}, ) assert arr._async_array._config.write_empty_chunks == write_empty_chunks # initialize the store with some non-fill value chunks arr[:] = fill_value + 1 assert arr.nchunks_initialized == arr.nchunks arr[:] = fill_value if not write_empty_chunks: assert arr.nchunks_initialized == 0 else: assert arr.nchunks_initialized == arr.nchunks @pytest.mark.parametrize( ("fill_value", "expected"), [ (np.nan * 1j, ["NaN", "NaN"]), (np.nan, ["NaN", 0.0]), (np.inf, ["Infinity", 0.0]), (np.inf * 1j, ["NaN", "Infinity"]), (-np.inf, ["-Infinity", 0.0]), (math.inf, ["Infinity", 0.0]), ], ) async def test_special_complex_fill_values_roundtrip(fill_value: Any, expected: list[Any]) -> None: store = MemoryStore() zarr.create_array(store=store, shape=(1,), dtype=np.complex64, fill_value=fill_value) content = await store.get("zarr.json", prototype=default_buffer_prototype()) assert content is not None actual = json.loads(content.to_bytes()) assert actual["fill_value"] == expected @pytest.mark.parametrize("shape", [(1,), (2, 3), (4, 5, 6)]) @pytest.mark.parametrize("dtype", ["uint8", "float32"]) @pytest.mark.parametrize("array_type", ["async", "sync"]) async def test_nbytes( shape: tuple[int, ...], dtype: str, array_type: Literal["async", "sync"] ) -> None: """ Test that the ``nbytes`` attribute of an Array or AsyncArray correctly reports the capacity of the chunks of that array. """ store = MemoryStore() arr = zarr.create_array(store=store, shape=shape, dtype=dtype, fill_value=0) if array_type == "async": assert arr._async_array.nbytes == np.prod(arr.shape) * arr.dtype.itemsize else: assert arr.nbytes == np.prod(arr.shape) * arr.dtype.itemsize @pytest.mark.parametrize( ("array_shape", "chunk_shape"), [((256,), (2,))], ) def test_auto_partition_auto_shards( array_shape: tuple[int, ...], chunk_shape: tuple[int, ...] ) -> None: """ Test that automatically picking a shard size returns a tuple of 2 * the chunk shape for any axis where there are 8 or more chunks. """ dtype = np.dtype("uint8") expected_shards: tuple[int, ...] = () for cs, a_len in zip(chunk_shape, array_shape, strict=False): if a_len // cs >= 8: expected_shards += (2 * cs,) else: expected_shards += (cs,) auto_shards, _ = _auto_partition( array_shape=array_shape, chunk_shape=chunk_shape, shard_shape="auto", dtype=dtype ) assert auto_shards == expected_shards @pytest.mark.parametrize("store", ["memory"], indirect=True) class TestCreateArray: @staticmethod def test_chunks_and_shards(store: Store) -> None: spath = StorePath(store) shape = (100, 100) chunks = (5, 5) shards = (10, 10) arr_v3 = zarr.create_array(store=spath / "v3", shape=shape, chunks=chunks, dtype="i4") assert arr_v3.chunks == chunks assert arr_v3.shards is None arr_v3_sharding = zarr.create_array( store=spath / "v3_sharding", shape=shape, chunks=chunks, shards=shards, dtype="i4", ) assert arr_v3_sharding.chunks == chunks assert arr_v3_sharding.shards == shards arr_v2 = zarr.create_array( store=spath / "v2", shape=shape, chunks=chunks, zarr_format=2, dtype="i4" ) assert arr_v2.chunks == chunks assert arr_v2.shards is None @staticmethod @pytest.mark.parametrize( ("dtype", "fill_value_expected"), [(" None: a = zarr.create_array(store, shape=(5,), chunks=(5,), dtype=dtype) assert a.fill_value == fill_value_expected @staticmethod @pytest.mark.parametrize("dtype", ["uint8", "float32", "str"]) @pytest.mark.parametrize("empty_value", [None, ()]) async def test_no_filters_compressors( store: MemoryStore, dtype: str, empty_value: object, zarr_format: ZarrFormat ) -> None: """ Test that the default ``filters`` and ``compressors`` are removed when ``create_array`` is invoked. """ arr = await create_array( store=store, dtype=dtype, shape=(10,), zarr_format=zarr_format, compressors=empty_value, filters=empty_value, ) # Test metadata explicitly if zarr_format == 2: assert arr.metadata.zarr_format == 2 # guard for mypy # v2 spec requires that filters be either a collection with at least one filter, or None assert arr.metadata.filters is None # Compressor is a single element in v2 metadata; the absence of a compressor is encoded # as None assert arr.metadata.compressor is None assert arr.filters == () assert arr.compressors == () else: assert arr.metadata.zarr_format == 3 # guard for mypy if dtype == "str": assert arr.metadata.codecs == (VLenUTF8Codec(),) assert arr.serializer == VLenUTF8Codec() else: assert arr.metadata.codecs == (BytesCodec(),) assert arr.serializer == BytesCodec() @staticmethod @pytest.mark.parametrize("dtype", ["uint8", "float32", "str"]) @pytest.mark.parametrize( "compressors", [ "auto", None, (), (ZstdCodec(level=3),), (ZstdCodec(level=3), GzipCodec(level=0)), ZstdCodec(level=3), {"name": "zstd", "configuration": {"level": 3}}, ({"name": "zstd", "configuration": {"level": 3}},), ], ) @pytest.mark.parametrize( "filters", [ "auto", None, (), ( TransposeCodec( order=[ 0, ] ), ), ( TransposeCodec( order=[ 0, ] ), TransposeCodec( order=[ 0, ] ), ), TransposeCodec( order=[ 0, ] ), {"name": "transpose", "configuration": {"order": [0]}}, ({"name": "transpose", "configuration": {"order": [0]}},), ], ) @pytest.mark.parametrize(("chunks", "shards"), [((6,), None), ((3,), (6,))]) async def test_v3_chunk_encoding( store: MemoryStore, compressors: CompressorsLike, filters: FiltersLike, dtype: str, chunks: tuple[int, ...], shards: tuple[int, ...] | None, ) -> None: """ Test various possibilities for the compressors and filters parameter to create_array """ arr = await create_array( store=store, dtype=dtype, shape=(12,), chunks=chunks, shards=shards, zarr_format=3, filters=filters, compressors=compressors, ) filters_expected, _, compressors_expected = _parse_chunk_encoding_v3( filters=filters, compressors=compressors, serializer="auto", dtype=np.dtype(dtype) ) assert arr.filters == filters_expected assert arr.compressors == compressors_expected @staticmethod @pytest.mark.parametrize("dtype", ["uint8", "float32", "str"]) @pytest.mark.parametrize( "compressors", [ "auto", None, numcodecs.Zstd(level=3), (), (numcodecs.Zstd(level=3),), ], ) @pytest.mark.parametrize( "filters", ["auto", None, numcodecs.GZip(level=1), (numcodecs.GZip(level=1),)] ) async def test_v2_chunk_encoding( store: MemoryStore, compressors: CompressorsLike, filters: FiltersLike, dtype: str ) -> None: arr = await create_array( store=store, dtype=dtype, shape=(10,), zarr_format=2, compressors=compressors, filters=filters, ) filters_expected, compressor_expected = _parse_chunk_encoding_v2( filters=filters, compressor=compressors, dtype=np.dtype(dtype) ) assert arr.metadata.zarr_format == 2 # guard for mypy assert arr.metadata.compressor == compressor_expected assert arr.metadata.filters == filters_expected # Normalize for property getters compressor_expected = () if compressor_expected is None else (compressor_expected,) filters_expected = () if filters_expected is None else filters_expected assert arr.compressors == compressor_expected assert arr.filters == filters_expected @staticmethod @pytest.mark.parametrize("dtype", ["uint8", "float32", "str"]) async def test_default_filters_compressors( store: MemoryStore, dtype: str, zarr_format: ZarrFormat ) -> None: """ Test that the default ``filters`` and ``compressors`` are used when ``create_array`` is invoked with ``filters`` and ``compressors`` unspecified. """ arr = await create_array( store=store, dtype=dtype, shape=(10,), zarr_format=zarr_format, ) if zarr_format == 3: expected_filters, expected_serializer, expected_compressors = ( _get_default_chunk_encoding_v3(np_dtype=np.dtype(dtype)) ) elif zarr_format == 2: default_filters, default_compressors = _get_default_chunk_encoding_v2( np_dtype=np.dtype(dtype) ) if default_filters is None: expected_filters = () else: expected_filters = default_filters if default_compressors is None: expected_compressors = () else: expected_compressors = (default_compressors,) expected_serializer = None else: raise ValueError(f"Invalid zarr_format: {zarr_format}") assert arr.filters == expected_filters assert arr.serializer == expected_serializer assert arr.compressors == expected_compressors @staticmethod async def test_v2_no_shards(store: Store) -> None: """ Test that creating a Zarr v2 array with ``shard_shape`` set to a non-None value raises an error. """ msg = re.escape( "Zarr format 2 arrays can only be created with `shard_shape` set to `None`. Got `shard_shape=(5,)` instead." ) with pytest.raises(ValueError, match=msg): _ = await create_array( store=store, dtype="uint8", shape=(10,), shards=(5,), zarr_format=2, ) @staticmethod @pytest.mark.parametrize("impl", ["sync", "async"]) async def test_with_data(impl: Literal["sync", "async"], store: Store) -> None: """ Test that we can invoke ``create_array`` with a ``data`` parameter. """ data = np.arange(10) name = "foo" arr: AsyncArray[ArrayV2Metadata] | AsyncArray[ArrayV3Metadata] | Array if impl == "sync": arr = sync_api.create_array(store, name=name, data=data) stored = arr[:] elif impl == "async": arr = await create_array(store, name=name, data=data, zarr_format=3) stored = await arr._get_selection( BasicIndexer(..., shape=arr.shape, chunk_grid=arr.metadata.chunk_grid), prototype=default_buffer_prototype(), ) else: raise ValueError(f"Invalid impl: {impl}") assert np.array_equal(stored, data) @staticmethod async def test_with_data_invalid_params(store: Store) -> None: """ Test that failing to specify data AND shape / dtype results in a ValueError """ with pytest.raises(ValueError, match="shape was not specified"): await create_array(store, data=None, shape=None, dtype=None) # we catch shape=None first, so specifying a dtype should raise the same exception as before with pytest.raises(ValueError, match="shape was not specified"): await create_array(store, data=None, shape=None, dtype="uint8") with pytest.raises(ValueError, match="dtype was not specified"): await create_array(store, data=None, shape=(10, 10)) @staticmethod async def test_data_ignored_params(store: Store) -> None: """ Test that specifying data AND shape AND dtype results in a ValueError """ data = np.arange(10) with pytest.raises( ValueError, match="The data parameter was used, but the shape parameter was also used." ): await create_array(store, data=data, shape=data.shape, dtype=None, overwrite=True) # we catch shape first, so specifying a dtype should raise the same warning as before with pytest.raises( ValueError, match="The data parameter was used, but the shape parameter was also used." ): await create_array(store, data=data, shape=data.shape, dtype=data.dtype, overwrite=True) with pytest.raises( ValueError, match="The data parameter was used, but the dtype parameter was also used." ): await create_array(store, data=data, shape=None, dtype=data.dtype, overwrite=True) @staticmethod @pytest.mark.parametrize("order_config", ["C", "F", None]) def test_order( order_config: MemoryOrder | None, zarr_format: ZarrFormat, store: MemoryStore, ) -> None: """ Test that the arrays generated by array indexing have a memory order defined by the config order value, and that for zarr v2 arrays, the ``order`` field in the array metadata is set correctly. """ config: ArrayConfigLike = {} if order_config is None: config = {} expected = zarr.config.get("array.order") else: config = {"order": order_config} expected = order_config if zarr_format == 2: arr = zarr.create_array( store=store, shape=(2, 2), zarr_format=zarr_format, dtype="i4", order=expected, config=config, ) # guard for type checking assert arr.metadata.zarr_format == 2 assert arr.metadata.order == expected else: arr = zarr.create_array( store=store, shape=(2, 2), zarr_format=zarr_format, dtype="i4", config=config ) vals = np.asarray(arr) if expected == "C": assert vals.flags.c_contiguous elif expected == "F": assert vals.flags.f_contiguous else: raise AssertionError @staticmethod @pytest.mark.parametrize("write_empty_chunks", [True, False]) async def test_write_empty_chunks_config(write_empty_chunks: bool, store: Store) -> None: """ Test that the value of write_empty_chunks is sensitive to the global config when not set explicitly """ with zarr.config.set({"array.write_empty_chunks": write_empty_chunks}): arr = await create_array(store, shape=(2, 2), dtype="i4") assert arr._config.write_empty_chunks == write_empty_chunks @staticmethod @pytest.mark.parametrize("path", [None, "", "/", "/foo", "foo", "foo/bar"]) async def test_name(store: Store, zarr_format: ZarrFormat, path: str | None) -> None: arr = await create_array( store, shape=(2, 2), dtype="i4", name=path, zarr_format=zarr_format ) if path is None: expected_path = "" elif path.startswith("/"): expected_path = path.lstrip("/") else: expected_path = path assert arr.path == expected_path assert arr.name == "/" + expected_path # test that implicit groups were created path_parts = expected_path.split("/") if len(path_parts) > 1: *parents, _ = ["", *accumulate(path_parts, lambda x, y: "/".join([x, y]))] # noqa: FLY002 for parent_path in parents: # this will raise if these groups were not created _ = await zarr.api.asynchronous.open_group( store=store, path=parent_path, mode="r", zarr_format=zarr_format ) async def test_scalar_array() -> None: arr = zarr.array(1.5) assert arr[...] == 1.5 assert arr[()] == 1.5 assert arr.shape == () async def test_orthogonal_set_total_slice() -> None: """Ensure that a whole chunk overwrite does not read chunks""" store = MemoryStore() array = zarr.create_array(store, shape=(20, 20), chunks=(1, 2), dtype=int, fill_value=-1) with mock.patch("zarr.storage.MemoryStore.get", side_effect=RuntimeError): array[0, slice(4, 10)] = np.arange(6) array = zarr.create_array( store, shape=(20, 21), chunks=(1, 2), dtype=int, fill_value=-1, overwrite=True ) with mock.patch("zarr.storage.MemoryStore.get", side_effect=RuntimeError): array[0, :] = np.arange(21) with mock.patch("zarr.storage.MemoryStore.get", side_effect=RuntimeError): array[:] = 1 @pytest.mark.skipif( Version(numcodecs.__version__) < Version("0.15.1"), reason="codec configuration is overwritten on older versions. GH2800", ) def test_roundtrip_numcodecs() -> None: store = MemoryStore() compressors = [ {"name": "numcodecs.shuffle", "configuration": {"elementsize": 2}}, {"name": "numcodecs.zlib", "configuration": {"level": 4}}, ] filters = [ { "name": "numcodecs.fixedscaleoffset", "configuration": { "scale": 100.0, "offset": 0.0, "dtype": " Any: return arr[index] @pytest.mark.parametrize( "method", [ pytest.param( "fork", marks=pytest.mark.skipif( sys.platform in ("win32", "darwin"), reason="fork not supported on Windows or OSX" ), ), "spawn", pytest.param( "forkserver", marks=pytest.mark.skipif( sys.platform == "win32", reason="forkserver not supported on Windows" ), ), ], ) @pytest.mark.parametrize("store", ["local"], indirect=True) def test_multiprocessing(store: Store, method: Literal["fork", "spawn", "forkserver"]) -> None: """ Test that arrays can be pickled and indexed in child processes """ data = np.arange(100) arr = zarr.create_array(store=store, data=data) ctx = mp.get_context(method) pool = ctx.Pool() results = pool.starmap(_index_array, [(arr, slice(len(data)))]) assert all(np.array_equal(r, data) for r in results) async def test_sharding_coordinate_selection() -> None: store = MemoryStore() g = zarr.open_group(store, mode="w") arr = g.create_array( name="a", shape=(2, 3, 4), chunks=(1, 2, 2), overwrite=True, dtype=np.float32, shards=(2, 4, 4), ) arr[:] = np.arange(2 * 3 * 4).reshape((2, 3, 4)) assert (arr[1, [0, 1]] == np.array([[12, 13, 14, 15], [16, 17, 18, 19]])).all() # type: ignore[index] zarr-python-3.0.6/tests/test_attributes.py000066400000000000000000000044731476711733500210040ustar00rootroot00000000000000import pytest import zarr.core import zarr.core.attributes import zarr.storage def test_put() -> None: store = zarr.storage.MemoryStore() attrs = zarr.core.attributes.Attributes( zarr.Group.from_store(store, attributes={"a": 1, "b": 2}) ) attrs.put({"a": 3, "c": 4}) expected = {"a": 3, "c": 4} assert dict(attrs) == expected def test_asdict() -> None: store = zarr.storage.MemoryStore() attrs = zarr.core.attributes.Attributes( zarr.Group.from_store(store, attributes={"a": 1, "b": 2}) ) result = attrs.asdict() assert result == {"a": 1, "b": 2} def test_update_attributes_preserves_existing() -> None: """ Test that `update_attributes` only updates the specified attributes and preserves existing ones. """ store = zarr.storage.MemoryStore() z = zarr.create(10, store=store, overwrite=True) z.attrs["a"] = [] z.attrs["b"] = 3 assert dict(z.attrs) == {"a": [], "b": 3} z.update_attributes({"a": [3, 4], "c": 4}) assert dict(z.attrs) == {"a": [3, 4], "b": 3, "c": 4} def test_update_empty_attributes() -> None: """ Ensure updating when initial attributes are empty works. """ store = zarr.storage.MemoryStore() z = zarr.create(10, store=store, overwrite=True) assert dict(z.attrs) == {} z.update_attributes({"a": [3, 4], "c": 4}) assert dict(z.attrs) == {"a": [3, 4], "c": 4} def test_update_no_changes() -> None: """ Ensure updating when no new or modified attributes does not alter existing ones. """ store = zarr.storage.MemoryStore() z = zarr.create(10, store=store, overwrite=True) z.attrs["a"] = [] z.attrs["b"] = 3 z.update_attributes({}) assert dict(z.attrs) == {"a": [], "b": 3} @pytest.mark.parametrize("group", [True, False]) def test_del_works(group: bool) -> None: store = zarr.storage.MemoryStore() z: zarr.Group | zarr.Array if group: z = zarr.create_group(store) else: z = zarr.create_array(store=store, shape=10, dtype=int) assert dict(z.attrs) == {} z.update_attributes({"a": [3, 4], "c": 4}) del z.attrs["a"] assert dict(z.attrs) == {"c": 4} z2: zarr.Group | zarr.Array if group: z2 = zarr.open_group(store) else: z2 = zarr.open_array(store) assert dict(z2.attrs) == {"c": 4} zarr-python-3.0.6/tests/test_buffer.py000066400000000000000000000117171476711733500200660ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING import numpy as np import pytest import zarr from zarr.codecs.blosc import BloscCodec from zarr.codecs.crc32c_ import Crc32cCodec from zarr.codecs.gzip import GzipCodec from zarr.codecs.transpose import TransposeCodec from zarr.codecs.zstd import ZstdCodec from zarr.core.buffer import ArrayLike, BufferPrototype, NDArrayLike, cpu, gpu from zarr.storage import MemoryStore, StorePath from zarr.testing.buffer import ( NDBufferUsingTestNDArrayLike, StoreExpectingTestBuffer, TestBuffer, TestNDArrayLike, ) from zarr.testing.utils import gpu_test if TYPE_CHECKING: import types try: import cupy as cp except ImportError: cp = None if TYPE_CHECKING: import types def test_nd_array_like(xp: types.ModuleType) -> None: ary = xp.arange(10) assert isinstance(ary, ArrayLike) assert isinstance(ary, NDArrayLike) @pytest.mark.asyncio async def test_async_array_prototype() -> None: """Test the use of a custom buffer prototype""" expect = np.zeros((9, 9), dtype="uint16", order="F") a = await zarr.api.asynchronous.create_array( StorePath(StoreExpectingTestBuffer()) / "test_async_array_prototype", shape=expect.shape, chunks=(5, 5), dtype=expect.dtype, fill_value=0, ) expect[1:4, 3:6] = np.ones((3, 3)) my_prototype = BufferPrototype(buffer=TestBuffer, nd_buffer=NDBufferUsingTestNDArrayLike) await a.setitem( selection=(slice(1, 4), slice(3, 6)), value=np.ones((3, 3)), prototype=my_prototype, ) got = await a.getitem(selection=(slice(0, 9), slice(0, 9)), prototype=my_prototype) # ignoring a mypy error here that TestNDArrayLike doesn't meet the NDArrayLike protocol # The test passes, so it clearly does. assert isinstance(got, TestNDArrayLike) # type: ignore[unreachable] assert np.array_equal(expect, got) # type: ignore[unreachable] @gpu_test @pytest.mark.asyncio async def test_async_array_gpu_prototype() -> None: """Test the use of the GPU buffer prototype""" expect = cp.zeros((9, 9), dtype="uint16", order="F") a = await zarr.api.asynchronous.create_array( StorePath(MemoryStore()) / "test_async_array_gpu_prototype", shape=expect.shape, chunks=(5, 5), dtype=expect.dtype, fill_value=0, ) expect[1:4, 3:6] = cp.ones((3, 3)) await a.setitem( selection=(slice(1, 4), slice(3, 6)), value=cp.ones((3, 3)), prototype=gpu.buffer_prototype, ) got = await a.getitem(selection=(slice(0, 9), slice(0, 9)), prototype=gpu.buffer_prototype) assert isinstance(got, cp.ndarray) assert cp.array_equal(expect, got) @pytest.mark.asyncio async def test_codecs_use_of_prototype() -> None: expect = np.zeros((10, 10), dtype="uint16", order="F") a = await zarr.api.asynchronous.create_array( StorePath(StoreExpectingTestBuffer()) / "test_codecs_use_of_prototype", shape=expect.shape, chunks=(5, 5), dtype=expect.dtype, fill_value=0, compressors=[BloscCodec(), Crc32cCodec(), GzipCodec(), ZstdCodec()], filters=[TransposeCodec(order=(1, 0))], ) expect[:] = np.arange(100).reshape(10, 10) my_prototype = BufferPrototype(buffer=TestBuffer, nd_buffer=NDBufferUsingTestNDArrayLike) await a.setitem( selection=(slice(0, 10), slice(0, 10)), value=expect[:], prototype=my_prototype, ) got = await a.getitem(selection=(slice(0, 10), slice(0, 10)), prototype=my_prototype) # ignoring a mypy error here that TestNDArrayLike doesn't meet the NDArrayLike protocol # The test passes, so it clearly does. assert isinstance(got, TestNDArrayLike) # type: ignore[unreachable] assert np.array_equal(expect, got) # type: ignore[unreachable] @gpu_test @pytest.mark.asyncio async def test_codecs_use_of_gpu_prototype() -> None: expect = cp.zeros((10, 10), dtype="uint16", order="F") a = await zarr.api.asynchronous.create_array( StorePath(MemoryStore()) / "test_codecs_use_of_gpu_prototype", shape=expect.shape, chunks=(5, 5), dtype=expect.dtype, fill_value=0, compressors=[BloscCodec(), Crc32cCodec(), GzipCodec(), ZstdCodec()], filters=[TransposeCodec(order=(1, 0))], ) expect[:] = cp.arange(100).reshape(10, 10) await a.setitem( selection=(slice(0, 10), slice(0, 10)), value=expect[:], prototype=gpu.buffer_prototype, ) got = await a.getitem(selection=(slice(0, 10), slice(0, 10)), prototype=gpu.buffer_prototype) assert isinstance(got, cp.ndarray) assert cp.array_equal(expect, got) def test_numpy_buffer_prototype() -> None: buffer = cpu.buffer_prototype.buffer.create_zero_length() ndbuffer = cpu.buffer_prototype.nd_buffer.create(shape=(1, 2), dtype=np.dtype("int64")) assert isinstance(buffer.as_array_like(), np.ndarray) assert isinstance(ndbuffer.as_ndarray_like(), np.ndarray) zarr-python-3.0.6/tests/test_chunk_grids.py000066400000000000000000000036001476711733500211050ustar00rootroot00000000000000from typing import Any import numpy as np import pytest from zarr.core.chunk_grids import _guess_chunks, normalize_chunks @pytest.mark.parametrize( "shape", [(0,), (0,) * 2, (1, 2, 0, 4, 5), (10, 0), (10,), (100,) * 3, (1000000,), (10000,) * 2] ) @pytest.mark.parametrize("itemsize", [1, 2, 4]) def test_guess_chunks(shape: tuple[int, ...], itemsize: int) -> None: chunks = _guess_chunks(shape, itemsize) chunk_size = np.prod(chunks) * itemsize assert isinstance(chunks, tuple) assert len(chunks) == len(shape) assert chunk_size < (64 * 1024 * 1024) # doesn't make any sense to allow chunks to have zero length dimension assert all(0 < c <= max(s, 1) for c, s in zip(chunks, shape, strict=False)) @pytest.mark.parametrize( ("chunks", "shape", "typesize", "expected"), [ ((10,), (100,), 1, (10,)), ([10], (100,), 1, (10,)), (10, (100,), 1, (10,)), ((10, 10), (100, 10), 1, (10, 10)), (10, (100, 10), 1, (10, 10)), ((10, None), (100, 10), 1, (10, 10)), (30, (100, 20, 10), 1, (30, 30, 30)), ((30,), (100, 20, 10), 1, (30, 20, 10)), ((30, None), (100, 20, 10), 1, (30, 20, 10)), ((30, None, None), (100, 20, 10), 1, (30, 20, 10)), ((30, 20, None), (100, 20, 10), 1, (30, 20, 10)), ((30, 20, 10), (100, 20, 10), 1, (30, 20, 10)), # auto chunking (None, (100,), 1, (100,)), (-1, (100,), 1, (100,)), ((30, -1, None), (100, 20, 10), 1, (30, 20, 10)), ], ) def test_normalize_chunks( chunks: Any, shape: tuple[int, ...], typesize: int, expected: tuple[int, ...] ) -> None: assert expected == normalize_chunks(chunks, shape, typesize) def test_normalize_chunks_errors() -> None: with pytest.raises(ValueError): normalize_chunks("foo", (100,), 1) with pytest.raises(ValueError): normalize_chunks((100, 10), (100,), 1) zarr-python-3.0.6/tests/test_codec_entrypoints.py000066400000000000000000000031561476711733500223460ustar00rootroot00000000000000import os.path import sys from collections.abc import Generator import pytest import zarr.registry from zarr import config here = os.path.abspath(os.path.dirname(__file__)) @pytest.fixture def set_path() -> Generator[None, None, None]: sys.path.append(here) zarr.registry._collect_entrypoints() yield sys.path.remove(here) registries = zarr.registry._collect_entrypoints() for registry in registries: registry.lazy_load_list.clear() config.reset() @pytest.mark.usefixtures("set_path") @pytest.mark.parametrize("codec_name", ["TestEntrypointCodec", "TestEntrypointGroup.Codec"]) def test_entrypoint_codec(codec_name: str) -> None: config.set({"codecs.test": "package_with_entrypoint." + codec_name}) cls_test = zarr.registry.get_codec_class("test") assert cls_test.__qualname__ == codec_name @pytest.mark.usefixtures("set_path") def test_entrypoint_pipeline() -> None: config.set({"codec_pipeline.path": "package_with_entrypoint.TestEntrypointCodecPipeline"}) cls = zarr.registry.get_pipeline_class() assert cls.__name__ == "TestEntrypointCodecPipeline" @pytest.mark.usefixtures("set_path") @pytest.mark.parametrize("buffer_name", ["TestEntrypointBuffer", "TestEntrypointGroup.Buffer"]) def test_entrypoint_buffer(buffer_name: str) -> None: config.set( { "buffer": "package_with_entrypoint." + buffer_name, "ndbuffer": "package_with_entrypoint.TestEntrypointNDBuffer", } ) assert zarr.registry.get_buffer_class().__qualname__ == buffer_name assert zarr.registry.get_ndbuffer_class().__name__ == "TestEntrypointNDBuffer" zarr-python-3.0.6/tests/test_codecs/000077500000000000000000000000001476711733500174745ustar00rootroot00000000000000zarr-python-3.0.6/tests/test_codecs/__init__.py000066400000000000000000000000001476711733500215730ustar00rootroot00000000000000zarr-python-3.0.6/tests/test_codecs/test_blosc.py000066400000000000000000000036071476711733500222150ustar00rootroot00000000000000import json import numpy as np import pytest import zarr from zarr.abc.store import Store from zarr.codecs import BloscCodec from zarr.core.buffer import default_buffer_prototype from zarr.storage import StorePath @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) @pytest.mark.parametrize("dtype", ["uint8", "uint16"]) async def test_blosc_evolve(store: Store, dtype: str) -> None: typesize = np.dtype(dtype).itemsize path = "blosc_evolve" spath = StorePath(store, path) await zarr.api.asynchronous.create_array( spath, shape=(16, 16), chunks=(16, 16), dtype=dtype, fill_value=0, compressors=BloscCodec(), ) buf = await store.get(f"{path}/zarr.json", prototype=default_buffer_prototype()) assert buf is not None zarr_json = json.loads(buf.to_bytes()) blosc_configuration_json = zarr_json["codecs"][1]["configuration"] assert blosc_configuration_json["typesize"] == typesize if typesize == 1: assert blosc_configuration_json["shuffle"] == "bitshuffle" else: assert blosc_configuration_json["shuffle"] == "shuffle" path2 = "blosc_evolve_sharding" spath2 = StorePath(store, path2) await zarr.api.asynchronous.create_array( spath2, shape=(16, 16), chunks=(16, 16), shards=(16, 16), dtype=dtype, fill_value=0, compressors=BloscCodec(), ) buf = await store.get(f"{path2}/zarr.json", prototype=default_buffer_prototype()) assert buf is not None zarr_json = json.loads(buf.to_bytes()) blosc_configuration_json = zarr_json["codecs"][0]["configuration"]["codecs"][1]["configuration"] assert blosc_configuration_json["typesize"] == typesize if typesize == 1: assert blosc_configuration_json["shuffle"] == "bitshuffle" else: assert blosc_configuration_json["shuffle"] == "shuffle" zarr-python-3.0.6/tests/test_codecs/test_codecs.py000066400000000000000000000305511476711733500223510ustar00rootroot00000000000000from __future__ import annotations import json from dataclasses import dataclass from typing import TYPE_CHECKING import numpy as np import pytest import zarr import zarr.api import zarr.api.asynchronous from zarr import Array, AsyncArray, config from zarr.codecs import ( BytesCodec, GzipCodec, ShardingCodec, TransposeCodec, ) from zarr.core.buffer import default_buffer_prototype from zarr.core.indexing import Selection, morton_order_iter from zarr.storage import StorePath if TYPE_CHECKING: from zarr.abc.store import Store from zarr.core.buffer.core import NDArrayLike from zarr.core.common import MemoryOrder @dataclass(frozen=True) class _AsyncArrayProxy: array: AsyncArray def __getitem__(self, selection: Selection) -> _AsyncArraySelectionProxy: return _AsyncArraySelectionProxy(self.array, selection) @dataclass(frozen=True) class _AsyncArraySelectionProxy: array: AsyncArray selection: Selection async def get(self) -> NDArrayLike: return await self.array.getitem(self.selection) async def set(self, value: np.ndarray) -> None: return await self.array.setitem(self.selection, value) def order_from_dim(order: MemoryOrder, ndim: int) -> tuple[int, ...]: if order == "F": return tuple(ndim - x - 1 for x in range(ndim)) else: return tuple(range(ndim)) def test_sharding_pickle() -> None: """ Test that sharding codecs can be pickled """ @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) @pytest.mark.parametrize("input_order", ["F", "C"]) @pytest.mark.parametrize("store_order", ["F", "C"]) @pytest.mark.parametrize("runtime_write_order", ["F", "C"]) @pytest.mark.parametrize("runtime_read_order", ["F", "C"]) @pytest.mark.parametrize("with_sharding", [True, False]) async def test_order( store: Store, input_order: MemoryOrder, store_order: MemoryOrder, runtime_write_order: MemoryOrder, runtime_read_order: MemoryOrder, with_sharding: bool, ) -> None: data = np.arange(0, 256, dtype="uint16").reshape((32, 8), order=input_order) path = "order" spath = StorePath(store, path=path) a = await zarr.api.asynchronous.create_array( spath, shape=data.shape, chunks=(16, 8) if with_sharding else (32, 8), shards=(32, 8) if with_sharding else None, dtype=data.dtype, fill_value=0, chunk_key_encoding={"name": "v2", "separator": "."}, filters=[TransposeCodec(order=order_from_dim(store_order, data.ndim))], config={"order": runtime_write_order}, ) await _AsyncArrayProxy(a)[:, :].set(data) read_data = await _AsyncArrayProxy(a)[:, :].get() assert np.array_equal(data, read_data) with config.set({"array.order": runtime_read_order}): a = await AsyncArray.open( spath, ) read_data = await _AsyncArrayProxy(a)[:, :].get() assert np.array_equal(data, read_data) if runtime_read_order == "F": assert read_data.flags["F_CONTIGUOUS"] assert not read_data.flags["C_CONTIGUOUS"] else: assert not read_data.flags["F_CONTIGUOUS"] assert read_data.flags["C_CONTIGUOUS"] @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) @pytest.mark.parametrize("input_order", ["F", "C"]) @pytest.mark.parametrize("runtime_write_order", ["F", "C"]) @pytest.mark.parametrize("runtime_read_order", ["F", "C"]) @pytest.mark.parametrize("with_sharding", [True, False]) def test_order_implicit( store: Store, input_order: MemoryOrder, runtime_write_order: MemoryOrder, runtime_read_order: MemoryOrder, with_sharding: bool, ) -> None: data = np.arange(0, 256, dtype="uint16").reshape((16, 16), order=input_order) path = "order_implicit" spath = StorePath(store, path) with config.set({"array.order": runtime_write_order}): a = zarr.create_array( spath, shape=data.shape, chunks=(8, 8) if with_sharding else (16, 16), shards=(16, 16) if with_sharding else None, dtype=data.dtype, fill_value=0, ) a[:, :] = data with config.set({"array.order": runtime_read_order}): a = Array.open(spath) read_data = a[:, :] assert np.array_equal(data, read_data) if runtime_read_order == "F": assert read_data.flags["F_CONTIGUOUS"] assert not read_data.flags["C_CONTIGUOUS"] else: assert not read_data.flags["F_CONTIGUOUS"] assert read_data.flags["C_CONTIGUOUS"] @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_open(store: Store) -> None: spath = StorePath(store) a = zarr.create_array( spath, shape=(16, 16), chunks=(16, 16), dtype="int32", fill_value=0, ) b = Array.open(spath) assert a.metadata == b.metadata def test_morton() -> None: assert list(morton_order_iter((2, 2))) == [(0, 0), (1, 0), (0, 1), (1, 1)] assert list(morton_order_iter((2, 2, 2))) == [ (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), ] assert list(morton_order_iter((2, 2, 2, 2))) == [ (0, 0, 0, 0), (1, 0, 0, 0), (0, 1, 0, 0), (1, 1, 0, 0), (0, 0, 1, 0), (1, 0, 1, 0), (0, 1, 1, 0), (1, 1, 1, 0), (0, 0, 0, 1), (1, 0, 0, 1), (0, 1, 0, 1), (1, 1, 0, 1), (0, 0, 1, 1), (1, 0, 1, 1), (0, 1, 1, 1), (1, 1, 1, 1), ] @pytest.mark.parametrize( "shape", [ [2, 2, 2], [5, 2], [2, 5], [2, 9, 2], [3, 2, 12], [2, 5, 1], [4, 3, 6, 2, 7], [3, 2, 1, 6, 4, 5, 2], ], ) def test_morton2(shape) -> None: order = list(morton_order_iter(shape)) for i, x in enumerate(order): assert x not in order[:i] # no duplicates assert all(x[j] < shape[j] for j in range(len(shape))) # all indices are within bounds @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_write_partial_chunks(store: Store) -> None: data = np.arange(0, 256, dtype="uint16").reshape((16, 16)) spath = StorePath(store) a = zarr.create_array( spath, shape=data.shape, chunks=(20, 20), dtype=data.dtype, fill_value=1, ) a[0:16, 0:16] = data assert np.array_equal(a[0:16, 0:16], data) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) async def test_delete_empty_chunks(store: Store) -> None: data = np.ones((16, 16)) path = "delete_empty_chunks" spath = StorePath(store, path) a = await zarr.api.asynchronous.create_array( spath, shape=data.shape, chunks=(32, 32), dtype=data.dtype, fill_value=1, ) await _AsyncArrayProxy(a)[:16, :16].set(np.zeros((16, 16))) await _AsyncArrayProxy(a)[:16, :16].set(data) assert np.array_equal(await _AsyncArrayProxy(a)[:16, :16].get(), data) assert await store.get(f"{path}/c0/0", prototype=default_buffer_prototype()) is None @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) async def test_dimension_names(store: Store) -> None: data = np.arange(0, 256, dtype="uint16").reshape((16, 16)) path = "dimension_names" spath = StorePath(store, path) await zarr.api.asynchronous.create_array( spath, shape=data.shape, chunks=(16, 16), dtype=data.dtype, fill_value=0, dimension_names=("x", "y"), ) assert (await zarr.api.asynchronous.open_array(store=spath)).metadata.dimension_names == ( "x", "y", ) path2 = "dimension_names2" spath2 = StorePath(store, path2) await zarr.api.asynchronous.create_array( spath2, shape=data.shape, chunks=(16, 16), dtype=data.dtype, fill_value=0, ) assert (await AsyncArray.open(spath2)).metadata.dimension_names is None zarr_json_buffer = await store.get(f"{path2}/zarr.json", prototype=default_buffer_prototype()) assert zarr_json_buffer is not None assert "dimension_names" not in json.loads(zarr_json_buffer.to_bytes()) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_invalid_metadata(store: Store) -> None: spath2 = StorePath(store, "invalid_codec_order") with pytest.raises(TypeError): Array.create( spath2, shape=(16, 16), chunk_shape=(16, 16), dtype=np.dtype("uint8"), fill_value=0, codecs=[ BytesCodec(), TransposeCodec(order=order_from_dim("F", 2)), ], ) spath3 = StorePath(store, "invalid_order") with pytest.raises(TypeError): Array.create( spath3, shape=(16, 16), chunk_shape=(16, 16), dtype=np.dtype("uint8"), fill_value=0, codecs=[ TransposeCodec(order="F"), # type: ignore[arg-type] BytesCodec(), ], ) spath4 = StorePath(store, "invalid_missing_bytes_codec") with pytest.raises(ValueError): Array.create( spath4, shape=(16, 16), chunk_shape=(16, 16), dtype=np.dtype("uint8"), fill_value=0, codecs=[ TransposeCodec(order=order_from_dim("F", 2)), ], ) spath5 = StorePath(store, "invalid_inner_chunk_shape") with pytest.raises(ValueError): Array.create( spath5, shape=(16, 16), chunk_shape=(16, 16), dtype=np.dtype("uint8"), fill_value=0, codecs=[ ShardingCodec(chunk_shape=(8,)), ], ) spath6 = StorePath(store, "invalid_inner_chunk_shape") with pytest.raises(ValueError): Array.create( spath6, shape=(16, 16), chunk_shape=(16, 16), dtype=np.dtype("uint8"), fill_value=0, codecs=[ ShardingCodec(chunk_shape=(8, 7)), ], ) spath7 = StorePath(store, "warning_inefficient_codecs") with pytest.warns(UserWarning): Array.create( spath7, shape=(16, 16), chunk_shape=(16, 16), dtype=np.dtype("uint8"), fill_value=0, codecs=[ ShardingCodec(chunk_shape=(8, 8)), GzipCodec(), ], ) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_invalid_metadata_create_array(store: Store) -> None: spath = StorePath(store, "warning_inefficient_codecs") with pytest.warns(UserWarning): zarr.create_array( spath, shape=(16, 16), chunks=(16, 16), dtype=np.dtype("uint8"), fill_value=0, serializer=ShardingCodec(chunk_shape=(8, 8)), compressors=[ GzipCodec(), ], ) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) async def test_resize(store: Store) -> None: data = np.zeros((16, 18), dtype="uint16") path = "resize" spath = StorePath(store, path) a = await zarr.api.asynchronous.create_array( spath, shape=data.shape, chunks=(10, 10), dtype=data.dtype, chunk_key_encoding={"name": "v2", "separator": "."}, fill_value=1, ) await _AsyncArrayProxy(a)[:16, :18].set(data) assert await store.get(f"{path}/1.1", prototype=default_buffer_prototype()) is not None assert await store.get(f"{path}/0.0", prototype=default_buffer_prototype()) is not None assert await store.get(f"{path}/0.1", prototype=default_buffer_prototype()) is not None assert await store.get(f"{path}/1.0", prototype=default_buffer_prototype()) is not None await a.resize((10, 12)) assert a.metadata.shape == (10, 12) assert a.shape == (10, 12) assert await store.get(f"{path}/0.0", prototype=default_buffer_prototype()) is not None assert await store.get(f"{path}/0.1", prototype=default_buffer_prototype()) is not None assert await store.get(f"{path}/1.0", prototype=default_buffer_prototype()) is None assert await store.get(f"{path}/1.1", prototype=default_buffer_prototype()) is None zarr-python-3.0.6/tests/test_codecs/test_endian.py000066400000000000000000000035611476711733500223500ustar00rootroot00000000000000from typing import Literal import numpy as np import pytest import zarr from zarr.abc.store import Store from zarr.codecs import BytesCodec from zarr.storage import StorePath from .test_codecs import _AsyncArrayProxy @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) @pytest.mark.parametrize("endian", ["big", "little"]) async def test_endian(store: Store, endian: Literal["big", "little"]) -> None: data = np.arange(0, 256, dtype="uint16").reshape((16, 16)) path = "endian" spath = StorePath(store, path) a = await zarr.api.asynchronous.create_array( spath, shape=data.shape, chunks=(16, 16), dtype=data.dtype, fill_value=0, chunk_key_encoding={"name": "v2", "separator": "."}, serializer=BytesCodec(endian=endian), ) await _AsyncArrayProxy(a)[:, :].set(data) readback_data = await _AsyncArrayProxy(a)[:, :].get() assert np.array_equal(data, readback_data) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) @pytest.mark.parametrize("dtype_input_endian", [">u2", "u2", " None: data = np.arange(0, 256, dtype=dtype_input_endian).reshape((16, 16)) path = "endian" spath = StorePath(store, path) a = await zarr.api.asynchronous.create_array( spath, shape=data.shape, chunks=(16, 16), dtype="uint16", fill_value=0, chunk_key_encoding={"name": "v2", "separator": "."}, serializer=BytesCodec(endian=dtype_store_endian), ) await _AsyncArrayProxy(a)[:, :].set(data) readback_data = await _AsyncArrayProxy(a)[:, :].get() assert np.array_equal(data, readback_data) zarr-python-3.0.6/tests/test_codecs/test_gzip.py000066400000000000000000000011021476711733500220500ustar00rootroot00000000000000import numpy as np import pytest import zarr from zarr.abc.store import Store from zarr.codecs import GzipCodec from zarr.storage import StorePath @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_gzip(store: Store) -> None: data = np.arange(0, 256, dtype="uint16").reshape((16, 16)) a = zarr.create_array( StorePath(store), shape=data.shape, chunks=(16, 16), dtype=data.dtype, fill_value=0, compressors=GzipCodec(), ) a[:, :] = data assert np.array_equal(data, a[:, :]) zarr-python-3.0.6/tests/test_codecs/test_sharding.py000066400000000000000000000347041476711733500227140ustar00rootroot00000000000000import pickle from typing import Any import numpy as np import numpy.typing as npt import pytest import zarr import zarr.api import zarr.api.asynchronous from zarr import Array from zarr.abc.store import Store from zarr.codecs import ( BloscCodec, ShardingCodec, ShardingCodecIndexLocation, TransposeCodec, ) from zarr.core.buffer import default_buffer_prototype from zarr.storage import StorePath from ..conftest import ArrayRequest from .test_codecs import _AsyncArrayProxy, order_from_dim @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) @pytest.mark.parametrize("index_location", ["start", "end"]) @pytest.mark.parametrize( "array_fixture", [ ArrayRequest(shape=(128,) * 1, dtype="uint8", order="C"), ArrayRequest(shape=(128,) * 2, dtype="uint8", order="C"), ArrayRequest(shape=(128,) * 3, dtype="uint16", order="F"), ], indirect=["array_fixture"], ) @pytest.mark.parametrize("offset", [0, 10]) def test_sharding( store: Store, array_fixture: npt.NDArray[Any], index_location: ShardingCodecIndexLocation, offset: int, ) -> None: """ Test that we can create an array with a sharding codec, write data to that array, and get the same data out via indexing. """ data = array_fixture spath = StorePath(store) arr = zarr.create_array( spath, shape=tuple(s + offset for s in data.shape), chunks=(32,) * data.ndim, shards={"shape": (64,) * data.ndim, "index_location": index_location}, dtype=data.dtype, fill_value=6, filters=[TransposeCodec(order=order_from_dim("F", data.ndim))], compressors=BloscCodec(cname="lz4"), ) write_region = tuple(slice(offset, None) for dim in range(data.ndim)) arr[write_region] = data if offset > 0: empty_region = tuple(slice(0, offset) for dim in range(data.ndim)) assert np.all(arr[empty_region] == arr.metadata.fill_value) read_data = arr[write_region] assert data.shape == read_data.shape assert np.array_equal(data, read_data) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) @pytest.mark.parametrize("index_location", ["start", "end"]) @pytest.mark.parametrize("offset", [0, 10]) def test_sharding_scalar( store: Store, index_location: ShardingCodecIndexLocation, offset: int, ) -> None: """ Test that we can create an array with a sharding codec, write data to that array, and get the same data out via indexing. """ spath = StorePath(store) arr = zarr.create_array( spath, shape=(128, 128), chunks=(32, 32), shards={"shape": (64, 64), "index_location": index_location}, dtype="uint8", fill_value=6, filters=[TransposeCodec(order=order_from_dim("F", 2))], compressors=BloscCodec(cname="lz4"), ) arr[:16, :16] = 10 # intentionally write partial chunks read_data = arr[:16, :16] np.testing.assert_array_equal(read_data, 10) @pytest.mark.parametrize("index_location", ["start", "end"]) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) @pytest.mark.parametrize( "array_fixture", [ ArrayRequest(shape=(128,) * 3, dtype="uint16", order="F"), ], indirect=["array_fixture"], ) def test_sharding_partial( store: Store, array_fixture: npt.NDArray[Any], index_location: ShardingCodecIndexLocation ) -> None: data = array_fixture spath = StorePath(store) a = zarr.create_array( spath, shape=tuple(a + 10 for a in data.shape), chunks=(32, 32, 32), shards={"shape": (64, 64, 64), "index_location": index_location}, compressors=BloscCodec(cname="lz4"), filters=[TransposeCodec(order=order_from_dim("F", data.ndim))], dtype=data.dtype, fill_value=0, ) a[10:, 10:, 10:] = data read_data = a[0:10, 0:10, 0:10] assert np.all(read_data == 0) read_data = a[10:, 10:, 10:] assert data.shape == read_data.shape assert np.array_equal(data, read_data) @pytest.mark.parametrize("index_location", ["start", "end"]) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) @pytest.mark.parametrize( "array_fixture", [ ArrayRequest(shape=(128,) * 3, dtype="uint16", order="F"), ], indirect=["array_fixture"], ) def test_sharding_partial_readwrite( store: Store, array_fixture: npt.NDArray[Any], index_location: ShardingCodecIndexLocation ) -> None: data = array_fixture spath = StorePath(store) a = zarr.create_array( spath, shape=data.shape, chunks=(1, data.shape[1], data.shape[2]), shards={"shape": data.shape, "index_location": index_location}, dtype=data.dtype, fill_value=0, filters=None, compressors=None, ) a[:] = data for x in range(data.shape[0]): read_data = a[x, :, :] assert np.array_equal(data[x], read_data) @pytest.mark.parametrize( "array_fixture", [ ArrayRequest(shape=(128,) * 3, dtype="uint16", order="F"), ], indirect=["array_fixture"], ) @pytest.mark.parametrize("index_location", ["start", "end"]) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) def test_sharding_partial_read( store: Store, array_fixture: npt.NDArray[Any], index_location: ShardingCodecIndexLocation ) -> None: data = array_fixture spath = StorePath(store) a = zarr.create_array( spath, shape=tuple(a + 10 for a in data.shape), chunks=(32, 32, 32), shards={"shape": (64, 64, 64), "index_location": index_location}, compressors=BloscCodec(cname="lz4"), filters=[TransposeCodec(order=order_from_dim("F", data.ndim))], dtype=data.dtype, fill_value=1, ) read_data = a[0:10, 0:10, 0:10] assert np.all(read_data == 1) @pytest.mark.parametrize( "array_fixture", [ ArrayRequest(shape=(128,) * 3, dtype="uint16", order="F"), ], indirect=["array_fixture"], ) @pytest.mark.parametrize("index_location", ["start", "end"]) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) def test_sharding_partial_overwrite( store: Store, array_fixture: npt.NDArray[Any], index_location: ShardingCodecIndexLocation ) -> None: data = array_fixture[:10, :10, :10] spath = StorePath(store) a = zarr.create_array( spath, shape=tuple(a + 10 for a in data.shape), chunks=(32, 32, 32), shards={"shape": (64, 64, 64), "index_location": index_location}, compressors=BloscCodec(cname="lz4"), filters=[TransposeCodec(order=order_from_dim("F", data.ndim))], dtype=data.dtype, fill_value=1, ) a[:10, :10, :10] = data read_data = a[0:10, 0:10, 0:10] assert np.array_equal(data, read_data) data += 10 a[:10, :10, :10] = data read_data = a[0:10, 0:10, 0:10] assert np.array_equal(data, read_data) @pytest.mark.parametrize( "array_fixture", [ ArrayRequest(shape=(128,) * 3, dtype="uint16", order="F"), ], indirect=["array_fixture"], ) @pytest.mark.parametrize( "outer_index_location", ["start", "end"], ) @pytest.mark.parametrize( "inner_index_location", ["start", "end"], ) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) def test_nested_sharding( store: Store, array_fixture: npt.NDArray[Any], outer_index_location: ShardingCodecIndexLocation, inner_index_location: ShardingCodecIndexLocation, ) -> None: data = array_fixture spath = StorePath(store) a = Array.create( spath, shape=data.shape, chunk_shape=(64, 64, 64), dtype=data.dtype, fill_value=0, codecs=[ ShardingCodec( chunk_shape=(32, 32, 32), codecs=[ ShardingCodec(chunk_shape=(16, 16, 16), index_location=inner_index_location) ], index_location=outer_index_location, ) ], ) a[:, :, :] = data read_data = a[0 : data.shape[0], 0 : data.shape[1], 0 : data.shape[2]] assert data.shape == read_data.shape assert np.array_equal(data, read_data) @pytest.mark.parametrize( "array_fixture", [ ArrayRequest(shape=(128,) * 3, dtype="uint16", order="F"), ], indirect=["array_fixture"], ) @pytest.mark.parametrize( "outer_index_location", ["start", "end"], ) @pytest.mark.parametrize( "inner_index_location", ["start", "end"], ) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) def test_nested_sharding_create_array( store: Store, array_fixture: npt.NDArray[Any], outer_index_location: ShardingCodecIndexLocation, inner_index_location: ShardingCodecIndexLocation, ) -> None: data = array_fixture spath = StorePath(store) a = zarr.create_array( spath, shape=data.shape, chunks=(32, 32, 32), dtype=data.dtype, fill_value=0, serializer=ShardingCodec( chunk_shape=(32, 32, 32), codecs=[ShardingCodec(chunk_shape=(16, 16, 16), index_location=inner_index_location)], index_location=outer_index_location, ), filters=None, compressors=None, ) print(a.metadata.to_dict()) a[:, :, :] = data read_data = a[0 : data.shape[0], 0 : data.shape[1], 0 : data.shape[2]] assert data.shape == read_data.shape assert np.array_equal(data, read_data) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) def test_open_sharding(store: Store) -> None: path = "open_sharding" spath = StorePath(store, path) a = zarr.create_array( spath, shape=(16, 16), chunks=(8, 8), shards=(16, 16), filters=[TransposeCodec(order=order_from_dim("F", 2))], compressors=BloscCodec(), dtype="int32", fill_value=0, ) b = Array.open(spath) assert a.metadata == b.metadata @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) def test_write_partial_sharded_chunks(store: Store) -> None: data = np.arange(0, 16 * 16, dtype="uint16").reshape((16, 16)) spath = StorePath(store) a = zarr.create_array( spath, shape=(40, 40), chunks=(10, 10), shards=(20, 20), dtype=data.dtype, compressors=BloscCodec(), fill_value=1, ) a[0:16, 0:16] = data assert np.array_equal(a[0:16, 0:16], data) @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) async def test_delete_empty_shards(store: Store) -> None: if not store.supports_deletes: pytest.skip("store does not support deletes") path = "delete_empty_shards" spath = StorePath(store, path) a = await zarr.api.asynchronous.create_array( spath, shape=(16, 16), chunks=(8, 8), shards=(8, 16), dtype="uint16", compressors=None, fill_value=1, ) print(a.metadata.to_dict()) await _AsyncArrayProxy(a)[:, :].set(np.zeros((16, 16))) await _AsyncArrayProxy(a)[8:, :].set(np.ones((8, 16))) await _AsyncArrayProxy(a)[:, 8:].set(np.ones((16, 8))) # chunk (0, 0) is full # chunks (0, 1), (1, 0), (1, 1) are empty # shard (0, 0) is half-full # shard (1, 0) is empty data = np.ones((16, 16), dtype="uint16") data[:8, :8] = 0 assert np.array_equal(data, await _AsyncArrayProxy(a)[:, :].get()) assert await store.get(f"{path}/c/1/0", prototype=default_buffer_prototype()) is None chunk_bytes = await store.get(f"{path}/c/0/0", prototype=default_buffer_prototype()) assert chunk_bytes is not None assert len(chunk_bytes) == 16 * 2 + 8 * 8 * 2 + 4 def test_pickle() -> None: codec = ShardingCodec(chunk_shape=(8, 8)) assert pickle.loads(pickle.dumps(codec)) == codec @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) @pytest.mark.parametrize( "index_location", [ShardingCodecIndexLocation.start, ShardingCodecIndexLocation.end] ) async def test_sharding_with_empty_inner_chunk( store: Store, index_location: ShardingCodecIndexLocation ) -> None: data = np.arange(0, 16 * 16, dtype="uint32").reshape((16, 16)) fill_value = 1 path = f"sharding_with_empty_inner_chunk_{index_location}" spath = StorePath(store, path) a = await zarr.api.asynchronous.create_array( spath, shape=(16, 16), chunks=(4, 4), shards={"shape": (8, 8), "index_location": index_location}, dtype="uint32", fill_value=fill_value, ) data[:4, :4] = fill_value await a.setitem(..., data) print("read data") data_read = await a.getitem(...) assert np.array_equal(data_read, data) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) @pytest.mark.parametrize( "index_location", [ShardingCodecIndexLocation.start, ShardingCodecIndexLocation.end], ) @pytest.mark.parametrize("chunks_per_shard", [(5, 2), (2, 5), (5, 5)]) async def test_sharding_with_chunks_per_shard( store: Store, index_location: ShardingCodecIndexLocation, chunks_per_shard: tuple[int] ) -> None: chunk_shape = (2, 1) shape = tuple(x * y for x, y in zip(chunks_per_shard, chunk_shape, strict=False)) data = np.ones(np.prod(shape), dtype="int32").reshape(shape) fill_value = 42 path = f"test_sharding_with_chunks_per_shard_{index_location}" spath = StorePath(store, path) a = zarr.create_array( spath, shape=shape, chunks=chunk_shape, shards={"shape": shape, "index_location": index_location}, dtype="int32", fill_value=fill_value, ) a[...] = data data_read = a[...] assert np.array_equal(data_read, data) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_invalid_metadata(store: Store) -> None: spath1 = StorePath(store, "invalid_inner_chunk_shape") with pytest.raises(ValueError): zarr.create_array( spath1, shape=(16, 16), shards=(16, 16), chunks=(8,), dtype=np.dtype("uint8"), fill_value=0, ) spath2 = StorePath(store, "invalid_inner_chunk_shape") with pytest.raises(ValueError): zarr.create_array( spath2, shape=(16, 16), shards=(16, 16), chunks=(8, 7), dtype=np.dtype("uint8"), fill_value=0, ) zarr-python-3.0.6/tests/test_codecs/test_transpose.py000066400000000000000000000064221476711733500231270ustar00rootroot00000000000000import numpy as np import pytest import zarr from zarr import AsyncArray, config from zarr.abc.store import Store from zarr.codecs import TransposeCodec from zarr.core.common import MemoryOrder from zarr.storage import StorePath from .test_codecs import _AsyncArrayProxy @pytest.mark.parametrize("input_order", ["F", "C"]) @pytest.mark.parametrize("runtime_write_order", ["F", "C"]) @pytest.mark.parametrize("runtime_read_order", ["F", "C"]) @pytest.mark.parametrize("with_sharding", [True, False]) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) async def test_transpose( store: Store, input_order: MemoryOrder, runtime_write_order: MemoryOrder, runtime_read_order: MemoryOrder, with_sharding: bool, ) -> None: data = np.arange(0, 256, dtype="uint16").reshape((1, 32, 8), order=input_order) spath = StorePath(store, path="transpose") with config.set({"array.order": runtime_write_order}): a = await zarr.api.asynchronous.create_array( spath, shape=data.shape, chunks=(1, 16, 8) if with_sharding else (1, 32, 8), shards=(1, 32, 8) if with_sharding else None, dtype=data.dtype, fill_value=0, chunk_key_encoding={"name": "v2", "separator": "."}, filters=[TransposeCodec(order=(2, 1, 0))], ) await _AsyncArrayProxy(a)[:, :].set(data) read_data = await _AsyncArrayProxy(a)[:, :].get() assert np.array_equal(data, read_data) with config.set({"array.order": runtime_read_order}): a = await AsyncArray.open( spath, ) read_data = await _AsyncArrayProxy(a)[:, :].get() assert np.array_equal(data, read_data) if runtime_read_order == "F": assert read_data.flags["F_CONTIGUOUS"] assert not read_data.flags["C_CONTIGUOUS"] else: assert not read_data.flags["F_CONTIGUOUS"] assert read_data.flags["C_CONTIGUOUS"] @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) @pytest.mark.parametrize("order", [[1, 2, 0], [1, 2, 3, 0], [3, 2, 4, 0, 1]]) def test_transpose_non_self_inverse(store: Store, order: list[int]) -> None: shape = [i + 3 for i in range(len(order))] data = np.arange(0, np.prod(shape), dtype="uint16").reshape(shape) spath = StorePath(store, "transpose_non_self_inverse") a = zarr.create_array( spath, shape=data.shape, chunks=data.shape, dtype=data.dtype, fill_value=0, filters=[TransposeCodec(order=order)], ) a[:, :] = data read_data = a[:, :] assert np.array_equal(data, read_data) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_transpose_invalid( store: Store, ) -> None: data = np.arange(0, 256, dtype="uint16").reshape((1, 32, 8)) spath = StorePath(store, "transpose_invalid") for order in [(1, 0), (3, 2, 1), (3, 3, 1), "F", "C"]: with pytest.raises((ValueError, TypeError)): zarr.create_array( spath, shape=data.shape, chunks=(1, 32, 8), dtype=data.dtype, fill_value=0, chunk_key_encoding={"name": "v2", "separator": "."}, filters=[TransposeCodec(order=order)], ) zarr-python-3.0.6/tests/test_codecs/test_vlen.py000066400000000000000000000064371476711733500220630ustar00rootroot00000000000000from typing import Any import numpy as np import pytest import zarr from zarr import Array from zarr.abc.codec import Codec from zarr.abc.store import Store from zarr.codecs import ZstdCodec from zarr.core.metadata.v3 import ArrayV3Metadata, DataType from zarr.core.strings import _NUMPY_SUPPORTS_VLEN_STRING from zarr.storage import StorePath numpy_str_dtypes: list[type | str | None] = [None, str, "str", np.dtypes.StrDType] expected_zarr_string_dtype: np.dtype[Any] if _NUMPY_SUPPORTS_VLEN_STRING: numpy_str_dtypes.append(np.dtypes.StringDType) expected_zarr_string_dtype = np.dtypes.StringDType() else: expected_zarr_string_dtype = np.dtype("O") @pytest.mark.parametrize("store", ["memory", "local"], indirect=["store"]) @pytest.mark.parametrize("dtype", numpy_str_dtypes) @pytest.mark.parametrize("as_object_array", [False, True]) @pytest.mark.parametrize("compressor", [None, ZstdCodec()]) def test_vlen_string( store: Store, dtype: np.dtype[Any] | None, as_object_array: bool, compressor: Codec | None ) -> None: strings = ["hello", "world", "this", "is", "a", "test"] data = np.array(strings, dtype=dtype).reshape((2, 3)) sp = StorePath(store, path="string") a = zarr.create_array( sp, shape=data.shape, chunks=data.shape, dtype=data.dtype, fill_value="", compressors=compressor, ) assert isinstance(a.metadata, ArrayV3Metadata) # needed for mypy # should also work if input array is an object array, provided we explicitly specified # a stringlike dtype when creating the Array if as_object_array: data = data.astype("O") a[:, :] = data assert np.array_equal(data, a[:, :]) assert a.metadata.data_type == DataType.string assert a.dtype == expected_zarr_string_dtype # test round trip b = Array.open(sp) assert isinstance(b.metadata, ArrayV3Metadata) # needed for mypy assert np.array_equal(data, b[:, :]) assert b.metadata.data_type == DataType.string assert a.dtype == expected_zarr_string_dtype @pytest.mark.parametrize("store", ["memory", "local"], indirect=["store"]) @pytest.mark.parametrize("as_object_array", [False, True]) @pytest.mark.parametrize("compressor", [None, ZstdCodec()]) def test_vlen_bytes(store: Store, as_object_array: bool, compressor: Codec | None) -> None: bstrings = [b"hello", b"world", b"this", b"is", b"a", b"test"] data = np.array(bstrings).reshape((2, 3)) assert data.dtype == "|S5" sp = StorePath(store, path="string") a = zarr.create_array( sp, shape=data.shape, chunks=data.shape, dtype=data.dtype, fill_value=b"", compressors=compressor, ) assert isinstance(a.metadata, ArrayV3Metadata) # needed for mypy # should also work if input array is an object array, provided we explicitly specified # a bytesting-like dtype when creating the Array if as_object_array: data = data.astype("O") a[:, :] = data assert np.array_equal(data, a[:, :]) assert a.metadata.data_type == DataType.bytes assert a.dtype == "O" # test round trip b = Array.open(sp) assert isinstance(b.metadata, ArrayV3Metadata) # needed for mypy assert np.array_equal(data, b[:, :]) assert b.metadata.data_type == DataType.bytes assert a.dtype == "O" zarr-python-3.0.6/tests/test_codecs/test_zstd.py000066400000000000000000000012551476711733500220740ustar00rootroot00000000000000import numpy as np import pytest import zarr from zarr.abc.store import Store from zarr.codecs import ZstdCodec from zarr.storage import StorePath @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) @pytest.mark.parametrize("checksum", [True, False]) def test_zstd(store: Store, checksum: bool) -> None: data = np.arange(0, 256, dtype="uint16").reshape((16, 16)) a = zarr.create_array( StorePath(store, path="zstd"), shape=data.shape, chunks=(16, 16), dtype=data.dtype, fill_value=0, compressors=ZstdCodec(level=0, checksum=checksum), ) a[:, :] = data assert np.array_equal(data, a[:, :]) zarr-python-3.0.6/tests/test_common.py000066400000000000000000000071761476711733500201110ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING if TYPE_CHECKING: from collections.abc import Iterable from typing import Any, Literal import numpy as np import pytest from zarr.core.common import parse_name, parse_shapelike, product from zarr.core.config import parse_indexing_order @pytest.mark.parametrize("data", [(0, 0, 0, 0), (1, 3, 4, 5, 6), (2, 4)]) def test_product(data: tuple[int, ...]) -> None: assert product(data) == np.prod(data) # todo: test def test_concurrent_map() -> None: ... # todo: test def test_to_thread() -> None: ... # todo: test def test_enum_names() -> None: ... # todo: test def test_parse_enum() -> None: ... @pytest.mark.parametrize("data", [("foo", "bar"), (10, 11)]) def test_parse_name_invalid(data: tuple[Any, Any]) -> None: observed, expected = data if isinstance(observed, str): with pytest.raises(ValueError, match=f"Expected '{expected}'. Got {observed} instead."): parse_name(observed, expected) else: with pytest.raises( TypeError, match=f"Expected a string, got an instance of {type(observed)}." ): parse_name(observed, expected) @pytest.mark.parametrize("data", [("foo", "foo"), ("10", "10")]) def test_parse_name_valid(data: tuple[Any, Any]) -> None: observed, expected = data assert parse_name(observed, expected) == observed @pytest.mark.parametrize("data", [0, 1, "hello", "f"]) def test_parse_indexing_order_invalid(data: Any) -> None: with pytest.raises(ValueError, match="Expected one of"): parse_indexing_order(data) @pytest.mark.parametrize("data", ["C", "F"]) def parse_indexing_order_valid(data: Literal["C", "F"]) -> None: assert parse_indexing_order(data) == data @pytest.mark.parametrize("data", [lambda v: v, slice(None)]) def test_parse_shapelike_invalid_single_type(data: Any) -> None: """ Test that we get the expected error message when passing in a value that is not an integer or an iterable of integers. """ with pytest.raises(TypeError, match="Expected an integer or an iterable of integers."): parse_shapelike(data) def test_parse_shapelike_invalid_single_value() -> None: """ Test that we get the expected error message when passing in a negative integer. """ with pytest.raises(ValueError, match="Expected a non-negative integer."): parse_shapelike(-1) @pytest.mark.parametrize("data", ["shape", ("0", 1, 2, 3), {"0": "0"}, ((1, 2), (2, 2)), (4.0, 2)]) def test_parse_shapelike_invalid_iterable_types(data: Any) -> None: """ Test that we get the expected error message when passing in an iterable containing non-integer elements """ with pytest.raises(TypeError, match="Expected an iterable of integers"): parse_shapelike(data) @pytest.mark.parametrize("data", [(1, 2, 3, -1), (-10,)]) def test_parse_shapelike_invalid_iterable_values(data: Any) -> None: """ Test that we get the expected error message when passing in an iterable containing negative integers """ with pytest.raises(ValueError, match="Expected all values to be non-negative."): parse_shapelike(data) @pytest.mark.parametrize("data", [range(10), [0, 1, 2, 3], (3, 4, 5), ()]) def test_parse_shapelike_valid(data: Iterable[int]) -> None: assert parse_shapelike(data) == tuple(data) # todo: more dtypes @pytest.mark.parametrize("data", [("uint8", np.uint8), ("float64", np.float64)]) def parse_dtype(data: tuple[str, np.dtype[Any]]) -> None: unparsed, parsed = data assert parse_dtype(unparsed) == parsed # todo: figure out what it means to test this def test_parse_fill_value() -> None: ... zarr-python-3.0.6/tests/test_config.py000066400000000000000000000257131476711733500200630ustar00rootroot00000000000000import os from collections.abc import Iterable from typing import Any from unittest import mock from unittest.mock import Mock import numpy as np import pytest import zarr import zarr.api from zarr import zeros from zarr.abc.codec import CodecInput, CodecOutput, CodecPipeline from zarr.abc.store import ByteSetter, Store from zarr.codecs import ( BloscCodec, BytesCodec, Crc32cCodec, GzipCodec, ShardingCodec, ) from zarr.core.array_spec import ArraySpec from zarr.core.buffer import NDBuffer from zarr.core.codec_pipeline import BatchedCodecPipeline from zarr.core.config import BadConfigError, config from zarr.core.indexing import SelectorTuple from zarr.registry import ( fully_qualified_name, get_buffer_class, get_codec_class, get_ndbuffer_class, get_pipeline_class, register_buffer, register_codec, register_ndbuffer, register_pipeline, ) from zarr.storage import MemoryStore from zarr.testing.buffer import ( NDBufferUsingTestNDArrayLike, StoreExpectingTestBuffer, TestBuffer, TestNDArrayLike, ) def test_config_defaults_set() -> None: # regression test for available defaults assert config.defaults == [ { "default_zarr_format": 3, "array": { "order": "C", "write_empty_chunks": False, "v2_default_compressor": { "numeric": {"id": "zstd", "level": 0, "checksum": False}, "string": {"id": "zstd", "level": 0, "checksum": False}, "bytes": {"id": "zstd", "level": 0, "checksum": False}, }, "v2_default_filters": { "numeric": None, "string": [{"id": "vlen-utf8"}], "bytes": [{"id": "vlen-bytes"}], "raw": None, }, "v3_default_filters": {"numeric": [], "string": [], "bytes": []}, "v3_default_serializer": { "numeric": {"name": "bytes", "configuration": {"endian": "little"}}, "string": {"name": "vlen-utf8"}, "bytes": {"name": "vlen-bytes"}, }, "v3_default_compressors": { "numeric": [ {"name": "zstd", "configuration": {"level": 0, "checksum": False}}, ], "string": [ {"name": "zstd", "configuration": {"level": 0, "checksum": False}}, ], "bytes": [ {"name": "zstd", "configuration": {"level": 0, "checksum": False}}, ], }, }, "async": {"concurrency": 10, "timeout": None}, "threading": {"max_workers": None}, "json_indent": 2, "codec_pipeline": { "path": "zarr.core.codec_pipeline.BatchedCodecPipeline", "batch_size": 1, }, "buffer": "zarr.core.buffer.cpu.Buffer", "ndbuffer": "zarr.core.buffer.cpu.NDBuffer", "codecs": { "blosc": "zarr.codecs.blosc.BloscCodec", "gzip": "zarr.codecs.gzip.GzipCodec", "zstd": "zarr.codecs.zstd.ZstdCodec", "bytes": "zarr.codecs.bytes.BytesCodec", "endian": "zarr.codecs.bytes.BytesCodec", "crc32c": "zarr.codecs.crc32c_.Crc32cCodec", "sharding_indexed": "zarr.codecs.sharding.ShardingCodec", "transpose": "zarr.codecs.transpose.TransposeCodec", "vlen-utf8": "zarr.codecs.vlen_utf8.VLenUTF8Codec", "vlen-bytes": "zarr.codecs.vlen_utf8.VLenBytesCodec", }, } ] assert config.get("array.order") == "C" assert config.get("async.concurrency") == 10 assert config.get("async.timeout") is None assert config.get("codec_pipeline.batch_size") == 1 assert config.get("json_indent") == 2 @pytest.mark.parametrize( ("key", "old_val", "new_val"), [("array.order", "C", "F"), ("async.concurrency", 10, 20), ("json_indent", 2, 0)], ) def test_config_defaults_can_be_overridden(key: str, old_val: Any, new_val: Any) -> None: assert config.get(key) == old_val with config.set({key: new_val}): assert config.get(key) == new_val def test_fully_qualified_name() -> None: class MockClass: pass assert ( fully_qualified_name(MockClass) == "tests.test_config.test_fully_qualified_name..MockClass" ) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_config_codec_pipeline_class(store: Store) -> None: # has default value assert get_pipeline_class().__name__ != "" config.set({"codec_pipeline.name": "zarr.core.codec_pipeline.BatchedCodecPipeline"}) assert get_pipeline_class() == zarr.core.codec_pipeline.BatchedCodecPipeline _mock = Mock() class MockCodecPipeline(BatchedCodecPipeline): async def write( self, batch_info: Iterable[tuple[ByteSetter, ArraySpec, SelectorTuple, SelectorTuple]], value: NDBuffer, drop_axes: tuple[int, ...] = (), ) -> None: _mock.call() register_pipeline(MockCodecPipeline) config.set({"codec_pipeline.path": fully_qualified_name(MockCodecPipeline)}) assert get_pipeline_class() == MockCodecPipeline # test if codec is used arr = zarr.create_array( store=store, shape=(100,), chunks=(10,), zarr_format=3, dtype="i4", ) arr[:] = range(100) _mock.call.assert_called() with pytest.raises(BadConfigError): config.set({"codec_pipeline.path": "wrong_name"}) get_pipeline_class() class MockEnvCodecPipeline(CodecPipeline): pass register_pipeline(MockEnvCodecPipeline) with mock.patch.dict( os.environ, {"ZARR_CODEC_PIPELINE__PATH": fully_qualified_name(MockEnvCodecPipeline)} ): assert get_pipeline_class(reload_config=True) == MockEnvCodecPipeline @pytest.mark.filterwarnings("error") @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_config_codec_implementation(store: Store) -> None: # has default value assert fully_qualified_name(get_codec_class("blosc")) == config.defaults[0]["codecs"]["blosc"] _mock = Mock() class MockBloscCodec(BloscCodec): async def _encode_single( self, chunk_data: CodecInput, chunk_spec: ArraySpec ) -> CodecOutput | None: _mock.call() register_codec("blosc", MockBloscCodec) with config.set({"codecs.blosc": fully_qualified_name(MockBloscCodec)}): assert get_codec_class("blosc") == MockBloscCodec # test if codec is used arr = zarr.create_array( store=store, shape=(100,), chunks=(10,), zarr_format=3, dtype="i4", compressors=[{"name": "blosc", "configuration": {}}], ) arr[:] = range(100) _mock.call.assert_called() # test set codec with environment variable class NewBloscCodec(BloscCodec): pass register_codec("blosc", NewBloscCodec) with mock.patch.dict(os.environ, {"ZARR_CODECS__BLOSC": fully_qualified_name(NewBloscCodec)}): assert get_codec_class("blosc", reload_config=True) == NewBloscCodec @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_config_ndbuffer_implementation(store: Store) -> None: # has default value assert fully_qualified_name(get_ndbuffer_class()) == config.defaults[0]["ndbuffer"] # set custom ndbuffer with TestNDArrayLike implementation register_ndbuffer(NDBufferUsingTestNDArrayLike) with config.set({"ndbuffer": fully_qualified_name(NDBufferUsingTestNDArrayLike)}): assert get_ndbuffer_class() == NDBufferUsingTestNDArrayLike arr = zarr.create_array( store=store, shape=(100,), chunks=(10,), zarr_format=3, dtype="i4", ) got = arr[:] assert isinstance(got, TestNDArrayLike) def test_config_buffer_implementation() -> None: # has default value assert fully_qualified_name(get_buffer_class()) == config.defaults[0]["buffer"] arr = zeros(shape=(100), store=StoreExpectingTestBuffer()) # AssertionError of StoreExpectingTestBuffer when not using my buffer with pytest.raises(AssertionError): arr[:] = np.arange(100) register_buffer(TestBuffer) with config.set({"buffer": fully_qualified_name(TestBuffer)}): assert get_buffer_class() == TestBuffer # no error using TestBuffer data = np.arange(100) arr[:] = np.arange(100) assert np.array_equal(arr[:], data) data2d = np.arange(1000).reshape(100, 10) arr_sharding = zeros( shape=(100, 10), store=StoreExpectingTestBuffer(), codecs=[ShardingCodec(chunk_shape=(10, 10))], ) arr_sharding[:] = data2d assert np.array_equal(arr_sharding[:], data2d) arr_Crc32c = zeros( shape=(100, 10), store=StoreExpectingTestBuffer(), codecs=[BytesCodec(), Crc32cCodec()], ) arr_Crc32c[:] = data2d assert np.array_equal(arr_Crc32c[:], data2d) @pytest.mark.filterwarnings("error") def test_warning_on_missing_codec_config() -> None: class NewCodec(BytesCodec): pass class NewCodec2(BytesCodec): pass # error if codec is not registered with pytest.raises(KeyError): get_codec_class("missing_codec") # no warning if only one implementation is available register_codec("new_codec", NewCodec) get_codec_class("new_codec") # warning because multiple implementations are available but none is selected in the config register_codec("new_codec", NewCodec2) with pytest.warns(UserWarning): get_codec_class("new_codec") # no warning if multiple implementations are available and one is selected in the config with config.set({"codecs.new_codec": fully_qualified_name(NewCodec)}): get_codec_class("new_codec") @pytest.mark.parametrize("dtype", ["int", "bytes", "str"]) async def test_default_codecs(dtype: str) -> None: with config.set( { "array.v3_default_compressors": { # test setting non-standard codecs "numeric": [ {"name": "gzip", "configuration": {"level": 5}}, ], "string": [ {"name": "gzip", "configuration": {"level": 5}}, ], "bytes": [ {"name": "gzip", "configuration": {"level": 5}}, ], } } ): arr = await zarr.api.asynchronous.create_array( shape=(100,), chunks=(100,), dtype=np.dtype(dtype), zarr_format=3, store=MemoryStore(), ) assert arr.compressors == (GzipCodec(),) zarr-python-3.0.6/tests/test_group.py000066400000000000000000002216511476711733500177510ustar00rootroot00000000000000from __future__ import annotations import contextlib import inspect import operator import pickle import re import time import warnings from typing import TYPE_CHECKING, Any, Literal import numpy as np import pytest from numcodecs import Blosc import zarr import zarr.api.asynchronous import zarr.api.synchronous import zarr.storage from zarr import Array, AsyncArray, AsyncGroup, Group from zarr.abc.store import Store from zarr.core import sync_group from zarr.core._info import GroupInfo from zarr.core.buffer import default_buffer_prototype from zarr.core.config import config as zarr_config from zarr.core.group import ( ConsolidatedMetadata, GroupMetadata, ImplicitGroupMarker, _build_metadata_v3, _get_roots, _parse_hierarchy_dict, create_hierarchy, create_nodes, create_rooted_hierarchy, get_node, ) from zarr.core.metadata.v3 import ArrayV3Metadata from zarr.core.sync import _collect_aiterator, sync from zarr.errors import ContainsArrayError, ContainsGroupError, MetadataValidationError from zarr.storage import LocalStore, MemoryStore, StorePath, ZipStore from zarr.storage._common import make_store_path from zarr.storage._utils import _join_paths, normalize_path from zarr.testing.store import LatencyStore from .conftest import meta_from_array, parse_store if TYPE_CHECKING: from collections.abc import Callable from _pytest.compat import LEGACY_PATH from zarr.core.common import JSON, ZarrFormat @pytest.fixture(params=["local", "memory", "zip"]) async def store(request: pytest.FixtureRequest, tmpdir: LEGACY_PATH) -> Store: result = await parse_store(request.param, str(tmpdir)) if not isinstance(result, Store): raise TypeError("Wrong store class returned by test fixture! got " + result + " instead") return result @pytest.fixture(params=[True, False]) def overwrite(request: pytest.FixtureRequest) -> bool: result = request.param if not isinstance(result, bool): raise TypeError("Wrong type returned by test fixture.") return result def test_group_init(store: Store, zarr_format: ZarrFormat) -> None: """ Test that initializing a group from an asyncgroup works. """ agroup = sync(AsyncGroup.from_store(store=store, zarr_format=zarr_format)) group = Group(agroup) assert group._async_group == agroup async def test_create_creates_parents(store: Store, zarr_format: ZarrFormat) -> None: # prepare a root node, with some data set await zarr.api.asynchronous.open_group( store=store, path="a", zarr_format=zarr_format, attributes={"key": "value"} ) objs = {x async for x in store.list()} if zarr_format == 2: assert objs == {".zgroup", ".zattrs", "a/.zgroup", "a/.zattrs"} else: assert objs == {"zarr.json", "a/zarr.json"} # test that root group node was created root = await zarr.api.asynchronous.open_group( store=store, ) agroup = await root.getitem("a") assert agroup.attrs == {"key": "value"} # create a child node with a couple intermediates await zarr.api.asynchronous.open_group(store=store, path="a/b/c/d", zarr_format=zarr_format) parts = ["a", "a/b", "a/b/c"] if zarr_format == 2: files = [".zattrs", ".zgroup"] else: files = ["zarr.json"] expected = [f"{part}/{file}" for file in files for part in parts] if zarr_format == 2: expected.extend([".zgroup", ".zattrs", "a/b/c/d/.zgroup", "a/b/c/d/.zattrs"]) else: expected.extend(["zarr.json", "a/b/c/d/zarr.json"]) expected = sorted(expected) result = sorted([x async for x in store.list_prefix("")]) assert result == expected paths = ["a", "a/b", "a/b/c"] for path in paths: g = await zarr.api.asynchronous.open_group(store=store, path=path) assert isinstance(g, AsyncGroup) if path == "a": # ensure we didn't overwrite the root attributes assert g.attrs == {"key": "value"} else: assert g.attrs == {} @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize("root_name", ["", "/", "a", "/a"]) @pytest.mark.parametrize("branch_name", ["foo", "/foo", "foo/bar", "/foo/bar"]) def test_group_name_properties( store: Store, zarr_format: ZarrFormat, root_name: str, branch_name: str ) -> None: """ Test that the path, name, and basename attributes of a group and its subgroups are consistent """ root = Group.from_store(store=StorePath(store=store, path=root_name), zarr_format=zarr_format) assert root.path == normalize_path(root_name) assert root.name == "/" + root.path assert root.basename == root.path branch = root.create_group(branch_name) if root.path == "": assert branch.path == normalize_path(branch_name) else: assert branch.path == "/".join([root.path, normalize_path(branch_name)]) assert branch.name == "/" + branch.path assert branch.basename == branch_name.split("/")[-1] @pytest.mark.parametrize("consolidated_metadata", [True, False]) def test_group_members(store: Store, zarr_format: ZarrFormat, consolidated_metadata: bool) -> None: """ Test that `Group.members` returns correct values, i.e. the arrays and groups (explicit and implicit) contained in that group. """ # group/ # subgroup/ # subsubgroup/ # subsubsubgroup # subarray path = "group" group = Group.from_store( store=store, zarr_format=zarr_format, ) members_expected: dict[str, Array | Group] = {} members_expected["subgroup"] = group.create_group("subgroup") # make a sub-sub-subgroup, to ensure that the children calculation doesn't go # too deep in the hierarchy subsubgroup = members_expected["subgroup"].create_group("subsubgroup") subsubsubgroup = subsubgroup.create_group("subsubsubgroup") members_expected["subarray"] = group.create_array( "subarray", shape=(100,), dtype="uint8", chunks=(10,), overwrite=True ) # add an extra object to the domain of the group. # the list of children should ignore this object. sync( store.set( f"{path}/extra_object-1", default_buffer_prototype().buffer.from_bytes(b"000000"), ) ) # add an extra object under a directory-like prefix in the domain of the group. # this creates a directory with a random key in it # this should not show up as a member sync( store.set( f"{path}/extra_directory/extra_object-2", default_buffer_prototype().buffer.from_bytes(b"000000"), ) ) # this warning shows up when extra objects show up in the hierarchy warn_context = pytest.warns( UserWarning, match=r"Object at .* is not recognized as a component of a Zarr hierarchy." ) if consolidated_metadata: with warn_context: zarr.consolidate_metadata(store=store, zarr_format=zarr_format) # now that we've consolidated the store, we shouldn't get the warnings from the unrecognized objects anymore # we use a nullcontext to handle these cases warn_context = contextlib.nullcontext() group = zarr.open_consolidated(store=store, zarr_format=zarr_format) with warn_context: members_observed = group.members() # members are not guaranteed to be ordered, so sort before comparing assert sorted(dict(members_observed)) == sorted(members_expected) # partial with warn_context: members_observed = group.members(max_depth=1) members_expected["subgroup/subsubgroup"] = subsubgroup # members are not guaranteed to be ordered, so sort before comparing assert sorted(dict(members_observed)) == sorted(members_expected) # total with warn_context: members_observed = group.members(max_depth=None) members_expected["subgroup/subsubgroup/subsubsubgroup"] = subsubsubgroup # members are not guaranteed to be ordered, so sort before comparing assert sorted(dict(members_observed)) == sorted(members_expected) with pytest.raises(ValueError, match="max_depth"): members_observed = group.members(max_depth=-1) def test_group(store: Store, zarr_format: ZarrFormat) -> None: """ Test basic Group routines. """ store_path = StorePath(store) agroup = AsyncGroup(metadata=GroupMetadata(zarr_format=zarr_format), store_path=store_path) group = Group(agroup) assert agroup.metadata is group.metadata assert agroup.store_path == group.store_path == store_path # create two groups foo = group.create_group("foo") bar = foo.create_group("bar", attributes={"baz": "qux"}) # create an array from the "bar" group data = np.arange(0, 4 * 4, dtype="uint16").reshape((4, 4)) arr = bar.create_array("baz", shape=data.shape, dtype=data.dtype, chunks=(2, 2), overwrite=True) arr[:] = data # check the array assert arr == bar["baz"] assert arr.shape == data.shape assert arr.dtype == data.dtype # TODO: update this once the array api settles down assert arr.chunks == (2, 2) bar2 = foo["bar"] assert dict(bar2.attrs) == {"baz": "qux"} # update a group's attributes bar2.attrs.update({"name": "bar"}) # bar.attrs was modified in-place assert dict(bar2.attrs) == {"baz": "qux", "name": "bar"} # and the attrs were modified in the store bar3 = foo["bar"] assert dict(bar3.attrs) == {"baz": "qux", "name": "bar"} def test_group_create(store: Store, overwrite: bool, zarr_format: ZarrFormat) -> None: """ Test that `Group.from_store` works as expected. """ attributes = {"foo": 100} group = Group.from_store( store, attributes=attributes, zarr_format=zarr_format, overwrite=overwrite ) assert group.attrs == attributes if not overwrite: with pytest.raises(ContainsGroupError): _ = Group.from_store(store, overwrite=overwrite, zarr_format=zarr_format) def test_group_open(store: Store, zarr_format: ZarrFormat, overwrite: bool) -> None: """ Test the `Group.open` method. """ spath = StorePath(store) # attempt to open a group that does not exist with pytest.raises(FileNotFoundError): Group.open(store) # create the group attrs = {"path": "foo"} group_created = Group.from_store( store, attributes=attrs, zarr_format=zarr_format, overwrite=overwrite ) assert group_created.attrs == attrs assert group_created.metadata.zarr_format == zarr_format assert group_created.store_path == spath # attempt to create a new group in place, to test overwrite new_attrs = {"path": "bar"} if not overwrite: with pytest.raises(ContainsGroupError): Group.from_store(store, attributes=attrs, zarr_format=zarr_format, overwrite=overwrite) else: if not store.supports_deletes: pytest.skip( "Store does not support deletes but `overwrite` is True, requiring deletes to override a group" ) group_created_again = Group.from_store( store, attributes=new_attrs, zarr_format=zarr_format, overwrite=overwrite ) assert group_created_again.attrs == new_attrs assert group_created_again.metadata.zarr_format == zarr_format assert group_created_again.store_path == spath @pytest.mark.parametrize("consolidated", [True, False]) def test_group_getitem(store: Store, zarr_format: ZarrFormat, consolidated: bool) -> None: """ Test the `Group.__getitem__` method. """ group = Group.from_store(store, zarr_format=zarr_format) subgroup = group.create_group(name="subgroup") subarray = group.create_array(name="subarray", shape=(10,), chunks=(10,), dtype="uint8") subsubarray = subgroup.create_array(name="subarray", shape=(10,), chunks=(10,), dtype="uint8") if consolidated: group = zarr.api.synchronous.consolidate_metadata(store=store, zarr_format=zarr_format) # we're going to assume that `group.metadata` is correct, and reuse that to focus # on indexing in this test. Other tests verify the correctness of group.metadata object.__setattr__( subgroup.metadata, "consolidated_metadata", ConsolidatedMetadata( metadata={"subarray": group.metadata.consolidated_metadata.metadata["subarray"]} ), ) assert group["subgroup"] == subgroup assert group["subarray"] == subarray assert group["subgroup"]["subarray"] == subsubarray assert group["subgroup/subarray"] == subsubarray with pytest.raises(KeyError): group["nope"] with pytest.raises(KeyError, match="subarray/subsubarray"): group["subarray/subsubarray"] # Now test the mixed case if consolidated: object.__setattr__( group.metadata.consolidated_metadata.metadata["subgroup"], "consolidated_metadata", None, ) # test the implementation directly with pytest.raises(KeyError): group._async_group._getitem_consolidated( group.store_path, "subgroup/subarray", prefix="/" ) with pytest.raises(KeyError): # We've chosen to trust the consolidated metadata, which doesn't # contain this array group["subgroup/subarray"] with pytest.raises(KeyError, match="subarray/subsubarray"): group["subarray/subsubarray"] def test_group_get_with_default(store: Store, zarr_format: ZarrFormat) -> None: group = Group.from_store(store, zarr_format=zarr_format) # default behavior result = group.get("subgroup") assert result is None # custom default result = group.get("subgroup", 8) assert result == 8 # now with a group subgroup = group.require_group("subgroup") subgroup.attrs["foo"] = "bar" result = group.get("subgroup", 8) assert result.attrs["foo"] == "bar" @pytest.mark.parametrize("consolidated", [True, False]) def test_group_delitem(store: Store, zarr_format: ZarrFormat, consolidated: bool) -> None: """ Test the `Group.__delitem__` method. """ if not store.supports_deletes: pytest.skip("store does not support deletes") group = Group.from_store(store, zarr_format=zarr_format) subgroup = group.create_group(name="subgroup") subarray = group.create_array(name="subarray", shape=(10,), chunks=(10,), dtype="uint8") if consolidated: group = zarr.api.synchronous.consolidate_metadata(store=store, zarr_format=zarr_format) object.__setattr__( subgroup.metadata, "consolidated_metadata", ConsolidatedMetadata(metadata={}) ) assert group["subgroup"] == subgroup assert group["subarray"] == subarray del group["subgroup"] with pytest.raises(KeyError): group["subgroup"] del group["subarray"] with pytest.raises(KeyError): group["subarray"] def test_group_iter(store: Store, zarr_format: ZarrFormat) -> None: """ Test the `Group.__iter__` method. """ group = Group.from_store(store, zarr_format=zarr_format) assert list(group) == [] def test_group_len(store: Store, zarr_format: ZarrFormat) -> None: """ Test the `Group.__len__` method. """ group = Group.from_store(store, zarr_format=zarr_format) assert len(group) == 0 def test_group_setitem(store: Store, zarr_format: ZarrFormat) -> None: """ Test the `Group.__setitem__` method. """ group = Group.from_store(store, zarr_format=zarr_format) arr = np.ones((2, 4)) group["key"] = arr assert list(group.array_keys()) == ["key"] assert group["key"].shape == (2, 4) np.testing.assert_array_equal(group["key"][:], arr) if store.supports_deletes: key = "key" else: # overwriting with another array requires deletes # for stores that don't support this, we just use a new key key = "key2" # overwrite with another array arr = np.zeros((3, 5)) group[key] = arr assert key in list(group.array_keys()) assert group[key].shape == (3, 5) np.testing.assert_array_equal(group[key], arr) def test_group_contains(store: Store, zarr_format: ZarrFormat) -> None: """ Test the `Group.__contains__` method """ group = Group.from_store(store, zarr_format=zarr_format) assert "foo" not in group _ = group.create_group(name="foo") assert "foo" in group @pytest.mark.parametrize("consolidate", [True, False]) def test_group_child_iterators(store: Store, zarr_format: ZarrFormat, consolidate: bool): group = Group.from_store(store, zarr_format=zarr_format) expected_group_keys = ["g0", "g1"] expected_group_values = [group.create_group(name=name) for name in expected_group_keys] expected_groups = list(zip(expected_group_keys, expected_group_values, strict=False)) fill_value = 3 dtype = "uint8" expected_group_values[0].create_group("subgroup") expected_group_values[0].create_array( "subarray", shape=(1,), dtype=dtype, fill_value=fill_value ) expected_array_keys = ["a0", "a1"] expected_array_values = [ group.create_array(name=name, shape=(1,), dtype=dtype, fill_value=fill_value) for name in expected_array_keys ] expected_arrays = list(zip(expected_array_keys, expected_array_values, strict=False)) if consolidate: group = zarr.consolidate_metadata(store) if zarr_format == 2: metadata = { "subarray": { "attributes": {}, "dtype": dtype, "fill_value": fill_value, "shape": (1,), "chunks": (1,), "order": "C", "filters": None, "compressor": Blosc(), "zarr_format": zarr_format, }, "subgroup": { "attributes": {}, "consolidated_metadata": { "metadata": {}, "kind": "inline", "must_understand": False, }, "node_type": "group", "zarr_format": zarr_format, }, } else: metadata = { "subarray": { "attributes": {}, "chunk_grid": { "configuration": {"chunk_shape": (1,)}, "name": "regular", }, "chunk_key_encoding": { "configuration": {"separator": "/"}, "name": "default", }, "codecs": ( {"configuration": {"endian": "little"}, "name": "bytes"}, {"configuration": {}, "name": "zstd"}, ), "data_type": dtype, "fill_value": fill_value, "node_type": "array", "shape": (1,), "zarr_format": zarr_format, }, "subgroup": { "attributes": {}, "consolidated_metadata": { "metadata": {}, "kind": "inline", "must_understand": False, }, "node_type": "group", "zarr_format": zarr_format, }, } object.__setattr__( expected_group_values[0].metadata, "consolidated_metadata", ConsolidatedMetadata.from_dict( { "kind": "inline", "metadata": metadata, "must_understand": False, } ), ) object.__setattr__( expected_group_values[1].metadata, "consolidated_metadata", ConsolidatedMetadata(metadata={}), ) result = sorted(group.groups(), key=operator.itemgetter(0)) assert result == expected_groups assert sorted(group.groups(), key=operator.itemgetter(0)) == expected_groups assert sorted(group.group_keys()) == expected_group_keys assert sorted(group.group_values(), key=lambda x: x.name) == expected_group_values assert sorted(group.arrays(), key=operator.itemgetter(0)) == expected_arrays assert sorted(group.array_keys()) == expected_array_keys assert sorted(group.array_values(), key=lambda x: x.name) == expected_array_values def test_group_update_attributes(store: Store, zarr_format: ZarrFormat) -> None: """ Test the behavior of `Group.update_attributes` """ attrs = {"foo": 100} group = Group.from_store(store, zarr_format=zarr_format, attributes=attrs) assert group.attrs == attrs new_attrs = {"bar": 100} new_group = group.update_attributes(new_attrs) updated_attrs = attrs.copy() updated_attrs.update(new_attrs) assert new_group.attrs == updated_attrs async def test_group_update_attributes_async(store: Store, zarr_format: ZarrFormat) -> None: """ Test the behavior of `Group.update_attributes_async` """ attrs = {"foo": 100} group = Group.from_store(store, zarr_format=zarr_format, attributes=attrs) assert group.attrs == attrs new_attrs = {"bar": 100} new_group = await group.update_attributes_async(new_attrs) assert new_group.attrs == new_attrs @pytest.mark.parametrize("method", ["create_array", "array"]) @pytest.mark.parametrize("name", ["a", "/a"]) def test_group_create_array( store: Store, zarr_format: ZarrFormat, overwrite: bool, method: Literal["create_array", "array"], name: str, ) -> None: """ Test `Group.from_store` """ group = Group.from_store(store, zarr_format=zarr_format) shape = (10, 10) dtype = "uint8" data = np.arange(np.prod(shape)).reshape(shape).astype(dtype) if method == "create_array": array = group.create_array(name=name, shape=shape, dtype=dtype) array[:] = data elif method == "array": with pytest.warns(DeprecationWarning): array = group.array(name=name, data=data, shape=shape, dtype=dtype) else: raise AssertionError if not overwrite: if method == "create_array": with pytest.raises(ContainsArrayError): a = group.create_array(name=name, shape=shape, dtype=dtype) a[:] = data elif method == "array": with pytest.raises(ContainsArrayError), pytest.warns(DeprecationWarning): a = group.array(name=name, shape=shape, dtype=dtype) a[:] = data assert array.path == normalize_path(name) assert array.name == "/" + array.path assert array.shape == shape assert array.dtype == np.dtype(dtype) assert np.array_equal(array[:], data) def test_group_array_creation( store: Store, zarr_format: ZarrFormat, ): group = Group.from_store(store, zarr_format=zarr_format) shape = (10, 10) empty_array = group.empty(name="empty", shape=shape) assert isinstance(empty_array, Array) assert empty_array.fill_value == 0 assert empty_array.shape == shape assert empty_array.store_path.store == store assert empty_array.store_path.path == "empty" empty_like_array = group.empty_like(name="empty_like", data=empty_array) assert isinstance(empty_like_array, Array) assert empty_like_array.fill_value == 0 assert empty_like_array.shape == shape assert empty_like_array.store_path.store == store empty_array_bool = group.empty(name="empty_bool", shape=shape, dtype=np.dtype("bool")) assert isinstance(empty_array_bool, Array) assert not empty_array_bool.fill_value assert empty_array_bool.shape == shape assert empty_array_bool.store_path.store == store empty_like_array_bool = group.empty_like(name="empty_like_bool", data=empty_array_bool) assert isinstance(empty_like_array_bool, Array) assert not empty_like_array_bool.fill_value assert empty_like_array_bool.shape == shape assert empty_like_array_bool.store_path.store == store zeros_array = group.zeros(name="zeros", shape=shape) assert isinstance(zeros_array, Array) assert zeros_array.fill_value == 0 assert zeros_array.shape == shape assert zeros_array.store_path.store == store zeros_like_array = group.zeros_like(name="zeros_like", data=zeros_array) assert isinstance(zeros_like_array, Array) assert zeros_like_array.fill_value == 0 assert zeros_like_array.shape == shape assert zeros_like_array.store_path.store == store ones_array = group.ones(name="ones", shape=shape) assert isinstance(ones_array, Array) assert ones_array.fill_value == 1 assert ones_array.shape == shape assert ones_array.store_path.store == store ones_like_array = group.ones_like(name="ones_like", data=ones_array) assert isinstance(ones_like_array, Array) assert ones_like_array.fill_value == 1 assert ones_like_array.shape == shape assert ones_like_array.store_path.store == store full_array = group.full(name="full", shape=shape, fill_value=42) assert isinstance(full_array, Array) assert full_array.fill_value == 42 assert full_array.shape == shape assert full_array.store_path.store == store full_like_array = group.full_like(name="full_like", data=full_array, fill_value=43) assert isinstance(full_like_array, Array) assert full_like_array.fill_value == 43 assert full_like_array.shape == shape assert full_like_array.store_path.store == store @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) @pytest.mark.parametrize("zarr_format", [2, 3]) @pytest.mark.parametrize("overwrite", [True, False]) @pytest.mark.parametrize("extant_node", ["array", "group"]) def test_group_creation_existing_node( store: Store, zarr_format: ZarrFormat, overwrite: bool, extant_node: Literal["array", "group"], ) -> None: """ Check that an existing array or group is handled as expected during group creation. """ spath = StorePath(store) group = Group.from_store(spath, zarr_format=zarr_format) expected_exception: type[ContainsArrayError | ContainsGroupError] attributes: dict[str, JSON] = {"old": True} if extant_node == "array": expected_exception = ContainsArrayError _ = group.create_array("extant", shape=(10,), dtype="uint8", attributes=attributes) elif extant_node == "group": expected_exception = ContainsGroupError _ = group.create_group("extant", attributes=attributes) else: raise AssertionError new_attributes = {"new": True} if overwrite: if not store.supports_deletes: pytest.skip("store does not support deletes but overwrite is True") node_new = Group.from_store( spath / "extant", attributes=new_attributes, zarr_format=zarr_format, overwrite=overwrite, ) assert node_new.attrs == new_attributes else: with pytest.raises(expected_exception): node_new = Group.from_store( spath / "extant", attributes=new_attributes, zarr_format=zarr_format, overwrite=overwrite, ) async def test_asyncgroup_create( store: Store, overwrite: bool, zarr_format: ZarrFormat, ) -> None: """ Test that `AsyncGroup.from_store` works as expected. """ spath = StorePath(store=store) attributes = {"foo": 100} agroup = await AsyncGroup.from_store( store, attributes=attributes, overwrite=overwrite, zarr_format=zarr_format, ) assert agroup.metadata == GroupMetadata(zarr_format=zarr_format, attributes=attributes) assert agroup.store_path == await make_store_path(store) if not overwrite: with pytest.raises(ContainsGroupError): agroup = await AsyncGroup.from_store( spath, attributes=attributes, overwrite=overwrite, zarr_format=zarr_format, ) # create an array at our target path collision_name = "foo" _ = await zarr.api.asynchronous.create_array( spath / collision_name, shape=(10,), dtype="uint8", zarr_format=zarr_format ) with pytest.raises(ContainsArrayError): _ = await AsyncGroup.from_store( StorePath(store=store) / collision_name, attributes=attributes, overwrite=overwrite, zarr_format=zarr_format, ) async def test_asyncgroup_attrs(store: Store, zarr_format: ZarrFormat) -> None: attributes = {"foo": 100} agroup = await AsyncGroup.from_store(store, zarr_format=zarr_format, attributes=attributes) assert agroup.attrs == agroup.metadata.attributes == attributes async def test_asyncgroup_open( store: Store, zarr_format: ZarrFormat, ) -> None: """ Create an `AsyncGroup`, then ensure that we can open it using `AsyncGroup.open` """ attributes = {"foo": 100} group_w = await AsyncGroup.from_store( store=store, attributes=attributes, overwrite=False, zarr_format=zarr_format, ) group_r = await AsyncGroup.open(store=store, zarr_format=zarr_format) assert group_w.attrs == group_w.attrs == attributes assert group_w == group_r async def test_asyncgroup_open_wrong_format( store: Store, zarr_format: ZarrFormat, ) -> None: _ = await AsyncGroup.from_store(store=store, overwrite=False, zarr_format=zarr_format) zarr_format_wrong: ZarrFormat # try opening with the wrong zarr format if zarr_format == 3: zarr_format_wrong = 2 elif zarr_format == 2: zarr_format_wrong = 3 else: raise AssertionError with pytest.raises(FileNotFoundError): await AsyncGroup.open(store=store, zarr_format=zarr_format_wrong) # todo: replace the dict[str, Any] type with something a bit more specific # should this be async? @pytest.mark.parametrize( "data", [ {"zarr_format": 3, "node_type": "group", "attributes": {"foo": 100}}, {"zarr_format": 2, "attributes": {"foo": 100}}, ], ) def test_asyncgroup_from_dict(store: Store, data: dict[str, Any]) -> None: """ Test that we can create an AsyncGroup from a dict """ path = "test" store_path = StorePath(store=store, path=path) group = AsyncGroup.from_dict(store_path, data=data) assert group.metadata.zarr_format == data["zarr_format"] assert group.metadata.attributes == data["attributes"] # todo: replace this with a declarative API where we model a full hierarchy async def test_asyncgroup_getitem(store: Store, zarr_format: ZarrFormat) -> None: """ Create an `AsyncGroup`, then create members of that group, and ensure that we can access those members via the `AsyncGroup.getitem` method. """ agroup = await AsyncGroup.from_store(store=store, zarr_format=zarr_format) array_name = "sub_array" sub_array = await agroup.create_array(name=array_name, shape=(10,), dtype="uint8", chunks=(2,)) assert await agroup.getitem(array_name) == sub_array sub_group_path = "sub_group" sub_group = await agroup.create_group(sub_group_path, attributes={"foo": 100}) assert await agroup.getitem(sub_group_path) == sub_group # check that asking for a nonexistent key raises KeyError with pytest.raises(KeyError): await agroup.getitem("foo") async def test_asyncgroup_delitem(store: Store, zarr_format: ZarrFormat) -> None: if not store.supports_deletes: pytest.skip("store does not support deletes") agroup = await AsyncGroup.from_store(store=store, zarr_format=zarr_format) array_name = "sub_array" _ = await agroup.create_array( name=array_name, shape=(10,), dtype="uint8", chunks=(2,), attributes={"foo": 100}, ) await agroup.delitem(array_name) # todo: clean up the code duplication here if zarr_format == 2: assert not await agroup.store_path.store.exists(array_name + "/" + ".zarray") assert not await agroup.store_path.store.exists(array_name + "/" + ".zattrs") elif zarr_format == 3: assert not await agroup.store_path.store.exists(array_name + "/" + "zarr.json") else: raise AssertionError sub_group_path = "sub_group" _ = await agroup.create_group(sub_group_path, attributes={"foo": 100}) await agroup.delitem(sub_group_path) if zarr_format == 2: assert not await agroup.store_path.store.exists(array_name + "/" + ".zgroup") assert not await agroup.store_path.store.exists(array_name + "/" + ".zattrs") elif zarr_format == 3: assert not await agroup.store_path.store.exists(array_name + "/" + "zarr.json") else: raise AssertionError @pytest.mark.parametrize("name", ["a", "/a"]) async def test_asyncgroup_create_group( store: Store, name: str, zarr_format: ZarrFormat, ) -> None: agroup = await AsyncGroup.from_store(store=store, zarr_format=zarr_format) attributes = {"foo": 999} subgroup = await agroup.create_group(name=name, attributes=attributes) assert isinstance(subgroup, AsyncGroup) assert subgroup.path == normalize_path(name) assert subgroup.name == "/" + subgroup.path assert subgroup.attrs == attributes assert subgroup.store_path.path == subgroup.path assert subgroup.store_path.store == store assert subgroup.metadata.zarr_format == zarr_format async def test_asyncgroup_create_array( store: Store, zarr_format: ZarrFormat, overwrite: bool ) -> None: """ Test that the AsyncGroup.create_array method works correctly. We ensure that array properties specified in create_array are present on the resulting array. """ agroup = await AsyncGroup.from_store(store=store, zarr_format=zarr_format) if not overwrite: with pytest.raises(ContainsGroupError): agroup = await AsyncGroup.from_store(store=store, zarr_format=zarr_format) shape = (10,) dtype = "uint8" chunk_shape = (4,) attributes: dict[str, JSON] = {"foo": 100} sub_node_path = "sub_array" subnode = await agroup.create_array( name=sub_node_path, shape=shape, dtype=dtype, chunks=chunk_shape, attributes=attributes, ) assert isinstance(subnode, AsyncArray) assert subnode.attrs == attributes assert subnode.store_path.path == sub_node_path assert subnode.store_path.store == store assert subnode.shape == shape assert subnode.dtype == dtype # todo: fix the type annotation of array.metadata.chunk_grid so that we get some autocomplete # here. assert subnode.metadata.chunk_grid.chunk_shape == chunk_shape assert subnode.metadata.zarr_format == zarr_format async def test_asyncgroup_update_attributes(store: Store, zarr_format: ZarrFormat) -> None: """ Test that the AsyncGroup.update_attributes method works correctly. """ attributes_old = {"foo": 10} attributes_new = {"baz": "new"} agroup = await AsyncGroup.from_store( store=store, zarr_format=zarr_format, attributes=attributes_old ) agroup_new_attributes = await agroup.update_attributes(attributes_new) attributes_updated = attributes_old.copy() attributes_updated.update(attributes_new) assert agroup_new_attributes.attrs == attributes_updated @pytest.mark.parametrize("store", ["local"], indirect=["store"]) @pytest.mark.parametrize("zarr_format", [2, 3]) async def test_serializable_async_group(store: LocalStore, zarr_format: ZarrFormat) -> None: expected = await AsyncGroup.from_store( store=store, attributes={"foo": 999}, zarr_format=zarr_format ) p = pickle.dumps(expected) actual = pickle.loads(p) assert actual == expected @pytest.mark.parametrize("store", ["local"], indirect=["store"]) @pytest.mark.parametrize("zarr_format", [2, 3]) def test_serializable_sync_group(store: LocalStore, zarr_format: ZarrFormat) -> None: expected = Group.from_store(store=store, attributes={"foo": 999}, zarr_format=zarr_format) p = pickle.dumps(expected) actual = pickle.loads(p) assert actual == expected @pytest.mark.parametrize("consolidated_metadata", [True, False]) async def test_group_members_async(store: Store, consolidated_metadata: bool) -> None: group = await AsyncGroup.from_store( store=store, ) a0 = await group.create_array("a0", shape=(1,), dtype="uint8") g0 = await group.create_group("g0") a1 = await g0.create_array("a1", shape=(1,), dtype="uint8") g1 = await g0.create_group("g1") a2 = await g1.create_array("a2", shape=(1,), dtype="uint8") g2 = await g1.create_group("g2") # immediate children children = sorted([x async for x in group.members()], key=operator.itemgetter(0)) assert children == [ ("a0", a0), ("g0", g0), ] nmembers = await group.nmembers() assert nmembers == 2 # partial children = sorted([x async for x in group.members(max_depth=1)], key=operator.itemgetter(0)) expected = [ ("a0", a0), ("g0", g0), ("g0/a1", a1), ("g0/g1", g1), ] assert children == expected nmembers = await group.nmembers(max_depth=1) assert nmembers == 4 # all children all_children = sorted( [x async for x in group.members(max_depth=None)], key=operator.itemgetter(0) ) expected = [ ("a0", a0), ("g0", g0), ("g0/a1", a1), ("g0/g1", g1), ("g0/g1/a2", a2), ("g0/g1/g2", g2), ] assert all_children == expected if consolidated_metadata: await zarr.api.asynchronous.consolidate_metadata(store=store) group = await zarr.api.asynchronous.open_group(store=store) nmembers = await group.nmembers(max_depth=None) assert nmembers == 6 with pytest.raises(ValueError, match="max_depth"): [x async for x in group.members(max_depth=-1)] if consolidated_metadata: # test for mixed known and unknown metadata. # For now, we trust the consolidated metadata. object.__setattr__( group.metadata.consolidated_metadata.metadata["g0"].consolidated_metadata.metadata[ "g1" ], "consolidated_metadata", None, ) all_children = sorted( [x async for x in group.members(max_depth=None)], key=operator.itemgetter(0) ) assert len(all_children) == 4 nmembers = await group.nmembers(max_depth=None) assert nmembers == 4 async def test_require_group(store: LocalStore | MemoryStore, zarr_format: ZarrFormat) -> None: root = await AsyncGroup.from_store(store=store, zarr_format=zarr_format) # create foo group _ = await root.create_group("foo", attributes={"foo": 100}) # test that we can get the group using require_group foo_group = await root.require_group("foo") assert foo_group.attrs == {"foo": 100} # test that we can get the group using require_group and overwrite=True if store.supports_deletes: foo_group = await root.require_group("foo", overwrite=True) assert foo_group.attrs == {} _ = await foo_group.create_array( "bar", shape=(10,), dtype="uint8", chunks=(2,), attributes={"foo": 100} ) # test that overwriting a group w/ children fails # TODO: figure out why ensure_no_existing_node is not catching the foo.bar array # # with pytest.raises(ContainsArrayError): # await root.require_group("foo", overwrite=True) # test that requiring a group where an array is fails with pytest.raises(TypeError): await foo_group.require_group("bar") async def test_require_groups(store: LocalStore | MemoryStore, zarr_format: ZarrFormat) -> None: root = await AsyncGroup.from_store(store=store, zarr_format=zarr_format) # create foo group _ = await root.create_group("foo", attributes={"foo": 100}) # create bar group _ = await root.create_group("bar", attributes={"bar": 200}) foo_group, bar_group = await root.require_groups("foo", "bar") assert foo_group.attrs == {"foo": 100} assert bar_group.attrs == {"bar": 200} # get a mix of existing and new groups foo_group, spam_group = await root.require_groups("foo", "spam") assert foo_group.attrs == {"foo": 100} assert spam_group.attrs == {} # no names no_group = await root.require_groups() assert no_group == () def test_create_dataset_with_data(store: Store, zarr_format: ZarrFormat) -> None: """Check that deprecated create_dataset method allows input data. See https://github.com/zarr-developers/zarr-python/issues/2631. """ root = Group.from_store(store=store, zarr_format=zarr_format) arr = np.random.random((5, 5)) with pytest.warns(DeprecationWarning): data = root.create_dataset("random", data=arr, shape=arr.shape) np.testing.assert_array_equal(np.asarray(data), arr) async def test_create_dataset(store: Store, zarr_format: ZarrFormat) -> None: root = await AsyncGroup.from_store(store=store, zarr_format=zarr_format) with pytest.warns(DeprecationWarning): foo = await root.create_dataset("foo", shape=(10,), dtype="uint8") assert foo.shape == (10,) with pytest.raises(ContainsArrayError), pytest.warns(DeprecationWarning): await root.create_dataset("foo", shape=(100,), dtype="int8") _ = await root.create_group("bar") with pytest.raises(ContainsGroupError), pytest.warns(DeprecationWarning): await root.create_dataset("bar", shape=(100,), dtype="int8") async def test_require_array(store: Store, zarr_format: ZarrFormat) -> None: root = await AsyncGroup.from_store(store=store, zarr_format=zarr_format) foo1 = await root.require_array("foo", shape=(10,), dtype="i8", attributes={"foo": 101}) assert foo1.attrs == {"foo": 101} foo2 = await root.require_array("foo", shape=(10,), dtype="i8") assert foo2.attrs == {"foo": 101} # exact = False _ = await root.require_array("foo", shape=10, dtype="f8") # errors w/ exact True with pytest.raises(TypeError, match="Incompatible dtype"): await root.require_array("foo", shape=(10,), dtype="f8", exact=True) with pytest.raises(TypeError, match="Incompatible shape"): await root.require_array("foo", shape=(100, 100), dtype="i8") with pytest.raises(TypeError, match="Incompatible dtype"): await root.require_array("foo", shape=(10,), dtype="f4") _ = await root.create_group("bar") with pytest.raises(TypeError, match="Incompatible object"): await root.require_array("bar", shape=(10,), dtype="int8") @pytest.mark.parametrize("consolidate", [True, False]) async def test_members_name(store: Store, consolidate: bool, zarr_format: ZarrFormat): group = Group.from_store(store=store, zarr_format=zarr_format) a = group.create_group(name="a") a.create_array("array", shape=(1,), dtype="uint8") b = a.create_group(name="b") b.create_array("array", shape=(1,), dtype="uint8") if consolidate: group = zarr.api.synchronous.consolidate_metadata(store) result = group["a"]["b"] assert result.name == "/a/b" paths = sorted(x.name for _, x in group.members(max_depth=None)) expected = ["/a", "/a/array", "/a/b", "/a/b/array"] assert paths == expected # regression test for https://github.com/zarr-developers/zarr-python/pull/2356 g = zarr.open_group(store, use_consolidated=False) with warnings.catch_warnings(): warnings.simplefilter("error") assert list(g) async def test_open_mutable_mapping(): group = await zarr.api.asynchronous.open_group( store={}, ) assert isinstance(group.store_path.store, MemoryStore) def test_open_mutable_mapping_sync(): group = zarr.open_group( store={}, ) assert isinstance(group.store_path.store, MemoryStore) class TestConsolidated: async def test_group_getitem_consolidated(self, store: Store) -> None: root = await AsyncGroup.from_store(store=store) # Set up the test structure with # / # g0/ # group /g0 # g1/ # group /g0/g1 # g2/ # group /g0/g1/g2 # x1/ # group /x0 # x2/ # group /x0/x1 # x3/ # group /x0/x1/x2 g0 = await root.create_group("g0") g1 = await g0.create_group("g1") await g1.create_group("g2") x0 = await root.create_group("x0") x1 = await x0.create_group("x1") await x1.create_group("x2") await zarr.api.asynchronous.consolidate_metadata(store) # On disk, we've consolidated all the metadata in the root zarr.json group = await zarr.api.asynchronous.open(store=store) rg0 = await group.getitem("g0") expected = ConsolidatedMetadata( metadata={ "g1": GroupMetadata( attributes={}, zarr_format=3, consolidated_metadata=ConsolidatedMetadata( metadata={ "g2": GroupMetadata( attributes={}, zarr_format=3, consolidated_metadata=ConsolidatedMetadata(metadata={}), ) } ), ), } ) assert rg0.metadata.consolidated_metadata == expected rg1 = await rg0.getitem("g1") assert rg1.metadata.consolidated_metadata == expected.metadata["g1"].consolidated_metadata rg2 = await rg1.getitem("g2") assert rg2.metadata.consolidated_metadata == ConsolidatedMetadata(metadata={}) async def test_group_delitem_consolidated(self, store: Store) -> None: if isinstance(store, ZipStore): raise pytest.skip("Not implemented") root = await AsyncGroup.from_store(store=store) # Set up the test structure with # / # g0/ # group /g0 # g1/ # group /g0/g1 # g2/ # group /g0/g1/g2 # data # array # x1/ # group /x0 # x2/ # group /x0/x1 # x3/ # group /x0/x1/x2 # data # array g0 = await root.create_group("g0") g1 = await g0.create_group("g1") g2 = await g1.create_group("g2") await g2.create_array("data", shape=(1,), dtype="uint8") x0 = await root.create_group("x0") x1 = await x0.create_group("x1") x2 = await x1.create_group("x2") await x2.create_array("data", shape=(1,), dtype="uint8") await zarr.api.asynchronous.consolidate_metadata(store) group = await zarr.api.asynchronous.open_consolidated(store=store) assert len(group.metadata.consolidated_metadata.metadata) == 2 assert "g0" in group.metadata.consolidated_metadata.metadata await group.delitem("g0") assert len(group.metadata.consolidated_metadata.metadata) == 1 assert "g0" not in group.metadata.consolidated_metadata.metadata def test_open_consolidated_raises(self, store: Store) -> None: if isinstance(store, ZipStore): raise pytest.skip("Not implemented") root = Group.from_store(store=store) # fine to be missing by default zarr.open_group(store=store) with pytest.raises(ValueError, match="Consolidated metadata requested."): zarr.open_group(store=store, use_consolidated=True) # Now create consolidated metadata... root.create_group("g0") zarr.consolidate_metadata(store) # and explicitly ignore it. group = zarr.open_group(store=store, use_consolidated=False) assert group.metadata.consolidated_metadata is None async def test_open_consolidated_raises_async(self, store: Store) -> None: if isinstance(store, ZipStore): raise pytest.skip("Not implemented") root = await AsyncGroup.from_store(store=store) # fine to be missing by default await zarr.api.asynchronous.open_group(store=store) with pytest.raises(ValueError, match="Consolidated metadata requested."): await zarr.api.asynchronous.open_group(store=store, use_consolidated=True) # Now create consolidated metadata... await root.create_group("g0") await zarr.api.asynchronous.consolidate_metadata(store) # and explicitly ignore it. group = await zarr.api.asynchronous.open_group(store=store, use_consolidated=False) assert group.metadata.consolidated_metadata is None class TestGroupMetadata: def test_from_dict_extra_fields(self): data = { "attributes": {"key": "value"}, "_nczarr_superblock": {"version": "2.0.0"}, "zarr_format": 2, } result = GroupMetadata.from_dict(data) expected = GroupMetadata(attributes={"key": "value"}, zarr_format=2) assert result == expected class TestInfo: def test_info(self): store = zarr.storage.MemoryStore() A = zarr.group(store=store, path="A") B = A.create_group(name="B") B.create_array(name="x", shape=(1,), dtype="uint8") B.create_array(name="y", shape=(2,), dtype="uint8") result = A.info expected = GroupInfo( _name="A", _read_only=False, _store_type="MemoryStore", _zarr_format=3, ) assert result == expected result = A.info_complete() expected = GroupInfo( _name="A", _read_only=False, _store_type="MemoryStore", _zarr_format=3, _count_members=3, _count_arrays=2, _count_groups=1, ) assert result == expected def test_update_attrs() -> None: # regression test for https://github.com/zarr-developers/zarr-python/issues/2328 root = Group.from_store( MemoryStore(), ) root.attrs["foo"] = "bar" assert root.attrs["foo"] == "bar" @pytest.mark.parametrize("method", ["empty", "zeros", "ones", "full"]) def test_group_deprecated_positional_args(method: str) -> None: if method == "full": kwargs = {"fill_value": 0} else: kwargs = {} root = zarr.group() with pytest.warns(FutureWarning, match=r"Pass name=.* as keyword args."): arr = getattr(root, method)("foo", shape=1, **kwargs) assert arr.shape == (1,) method += "_like" data = np.ones(1) with pytest.warns(FutureWarning, match=r"Pass name=.*, data=.* as keyword args."): arr = getattr(root, method)("foo_like", data, **kwargs) assert arr.shape == data.shape @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) def test_delitem_removes_children(store: Store, zarr_format: ZarrFormat) -> None: # https://github.com/zarr-developers/zarr-python/issues/2191 g1 = zarr.group(store=store, zarr_format=zarr_format) g1.create_group("0") g1.create_group("0/0") arr = g1.create_array("0/0/0", shape=(1,), dtype="uint8") arr[:] = 1 del g1["0"] with pytest.raises(KeyError): g1["0/0"] @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize("impl", ["async", "sync"]) async def test_create_nodes( impl: Literal["async", "sync"], store: Store, zarr_format: ZarrFormat ) -> None: """ Ensure that ``create_nodes`` can create a zarr hierarchy from a model of that hierarchy in dict form. Note that this creates an incomplete Zarr hierarchy. """ node_spec = { "group": GroupMetadata(attributes={"foo": 10}), "group/array_0": meta_from_array(np.arange(3), zarr_format=zarr_format), "group/array_1": meta_from_array(np.arange(4), zarr_format=zarr_format), "group/subgroup/array_0": meta_from_array(np.arange(4), zarr_format=zarr_format), "group/subgroup/array_1": meta_from_array(np.arange(5), zarr_format=zarr_format), } if impl == "sync": observed_nodes = dict(sync_group.create_nodes(store=store, nodes=node_spec)) elif impl == "async": observed_nodes = dict(await _collect_aiterator(create_nodes(store=store, nodes=node_spec))) else: raise ValueError(f"Invalid impl: {impl}") assert node_spec == {k: v.metadata for k, v in observed_nodes.items()} @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_create_nodes_concurrency_limit(store: MemoryStore) -> None: """ Test that the execution time of create_nodes can be constrained by the async concurrency configuration setting. """ set_latency = 0.02 num_groups = 10 groups = {str(idx): GroupMetadata() for idx in range(num_groups)} latency_store = LatencyStore(store, set_latency=set_latency) # check how long it takes to iterate over the groups # if create_nodes is sensitive to IO latency, # this should take (num_groups * get_latency) seconds # otherwise, it should take only marginally more than get_latency seconds with zarr_config.set({"async.concurrency": 1}): start = time.time() _ = tuple(sync_group.create_nodes(store=latency_store, nodes=groups)) elapsed = time.time() - start assert elapsed > num_groups * set_latency @pytest.mark.parametrize( ("a_func", "b_func"), [ (zarr.core.group.AsyncGroup.create_hierarchy, zarr.core.group.Group.create_hierarchy), (zarr.core.group.create_hierarchy, zarr.core.sync_group.create_hierarchy), (zarr.core.group.create_nodes, zarr.core.sync_group.create_nodes), (zarr.core.group.create_rooted_hierarchy, zarr.core.sync_group.create_rooted_hierarchy), (zarr.core.group.get_node, zarr.core.sync_group.get_node), ], ) def test_consistent_signatures( a_func: Callable[[object], object], b_func: Callable[[object], object] ) -> None: """ Ensure that pairs of functions have consistent signatures """ base_sig = inspect.signature(a_func) test_sig = inspect.signature(b_func) assert test_sig.parameters == base_sig.parameters @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize("overwrite", [True, False]) @pytest.mark.parametrize("impl", ["async", "sync"]) async def test_create_hierarchy( impl: Literal["async", "sync"], store: Store, overwrite: bool, zarr_format: ZarrFormat ) -> None: """ Test that ``create_hierarchy`` can create a complete Zarr hierarchy, even if the input describes an incomplete one. """ hierarchy_spec = { "group": GroupMetadata(attributes={"path": "group"}, zarr_format=zarr_format), "group/array_0": meta_from_array( np.arange(3), attributes={"path": "group/array_0"}, zarr_format=zarr_format ), "group/subgroup/array_0": meta_from_array( np.arange(4), attributes={"path": "group/subgroup/array_0"}, zarr_format=zarr_format ), } pre_existing_nodes = { "group/extra": GroupMetadata(zarr_format=zarr_format, attributes={"path": "group/extra"}), "": GroupMetadata(zarr_format=zarr_format, attributes={"name": "root"}), } # we expect create_hierarchy to insert a group that was missing from the hierarchy spec expected_meta = hierarchy_spec | {"group/subgroup": GroupMetadata(zarr_format=zarr_format)} # initialize the group with some nodes _ = dict(sync_group.create_nodes(store=store, nodes=pre_existing_nodes)) if impl == "sync": created = dict( sync_group.create_hierarchy(store=store, nodes=hierarchy_spec, overwrite=overwrite) ) elif impl == "async": created = dict( [ a async for a in create_hierarchy( store=store, nodes=hierarchy_spec, overwrite=overwrite ) ] ) else: raise ValueError(f"Invalid impl: {impl}") if not overwrite: extra_group = sync_group.get_node(store=store, path="group/extra", zarr_format=zarr_format) assert extra_group.metadata.attributes == {"path": "group/extra"} else: with pytest.raises(FileNotFoundError): await get_node(store=store, path="group/extra", zarr_format=zarr_format) assert expected_meta == {k: v.metadata for k, v in created.items()} @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize("extant_node", ["array", "group"]) @pytest.mark.parametrize("impl", ["async", "sync"]) async def test_create_hierarchy_existing_nodes( impl: Literal["async", "sync"], store: Store, extant_node: Literal["array", "group"], zarr_format: ZarrFormat, ) -> None: """ Test that create_hierarchy with overwrite = False will not overwrite an existing array or group, and raises an exception instead. """ extant_node_path = "node" if extant_node == "array": extant_metadata = meta_from_array( np.zeros(4), zarr_format=zarr_format, attributes={"extant": True} ) new_metadata = meta_from_array(np.zeros(4), zarr_format=zarr_format) err_cls = ContainsArrayError else: extant_metadata = GroupMetadata(zarr_format=zarr_format, attributes={"extant": True}) new_metadata = GroupMetadata(zarr_format=zarr_format) err_cls = ContainsGroupError # write the extant metadata tuple(sync_group.create_nodes(store=store, nodes={extant_node_path: extant_metadata})) msg = f"{extant_node} exists in store {store!r} at path {extant_node_path!r}." # ensure that we cannot invoke create_hierarchy with overwrite=False here if impl == "sync": with pytest.raises(err_cls, match=re.escape(msg)): tuple( sync_group.create_hierarchy( store=store, nodes={"node": new_metadata}, overwrite=False ) ) elif impl == "async": with pytest.raises(err_cls, match=re.escape(msg)): tuple( [ x async for x in create_hierarchy( store=store, nodes={"node": new_metadata}, overwrite=False ) ] ) else: raise ValueError(f"Invalid impl: {impl}") # ensure that the extant metadata was not overwritten assert ( await get_node(store=store, path=extant_node_path, zarr_format=zarr_format) ).metadata.attributes == {"extant": True} @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize("overwrite", [True, False]) @pytest.mark.parametrize("group_path", ["", "foo"]) @pytest.mark.parametrize("impl", ["async", "sync"]) async def test_group_create_hierarchy( store: Store, zarr_format: ZarrFormat, overwrite: bool, group_path: str, impl: Literal["async", "sync"], ) -> None: """ Test that the Group.create_hierarchy method creates specified nodes and returns them in a dict. Also test that off-target nodes are not deleted, and that the root group is not deleted """ root_attrs = {"root": True} g = sync_group.create_rooted_hierarchy( store=store, nodes={group_path: GroupMetadata(zarr_format=zarr_format, attributes=root_attrs)}, ) node_spec = { "a": GroupMetadata(zarr_format=zarr_format, attributes={"name": "a"}), "a/b": GroupMetadata(zarr_format=zarr_format, attributes={"name": "a/b"}), "a/b/c": meta_from_array( np.zeros(5), zarr_format=zarr_format, attributes={"name": "a/b/c"} ), } # This node should be kept if overwrite is True extant_spec = {"b": GroupMetadata(zarr_format=zarr_format, attributes={"name": "b"})} if impl == "async": extant_created = dict( await _collect_aiterator(g._async_group.create_hierarchy(extant_spec, overwrite=False)) ) nodes_created = dict( await _collect_aiterator( g._async_group.create_hierarchy(node_spec, overwrite=overwrite) ) ) elif impl == "sync": extant_created = dict(g.create_hierarchy(extant_spec, overwrite=False)) nodes_created = dict(g.create_hierarchy(node_spec, overwrite=overwrite)) all_members = dict(g.members(max_depth=None)) for k, v in node_spec.items(): assert all_members[k].metadata == v == nodes_created[k].metadata # if overwrite is True, the extant nodes should be erased for k, v in extant_spec.items(): if overwrite: assert k in all_members else: assert all_members[k].metadata == v == extant_created[k].metadata # ensure that we left the root group as-is assert ( sync_group.get_node(store=store, path=group_path, zarr_format=zarr_format).attrs.asdict() == root_attrs ) @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize("overwrite", [True, False]) def test_group_create_hierarchy_no_root( store: Store, zarr_format: ZarrFormat, overwrite: bool ) -> None: """ Test that the Group.create_hierarchy method will error if the dict provided contains a root. """ g = Group.from_store(store, zarr_format=zarr_format) tree = { "": GroupMetadata(zarr_format=zarr_format, attributes={"name": "a"}), } with pytest.raises( ValueError, match="It is an error to use this method to create a root node. " ): _ = dict(g.create_hierarchy(tree, overwrite=overwrite)) class TestParseHierarchyDict: """ Tests for the function that parses dicts of str : Metadata pairs, ensuring that the output models a valid Zarr hierarchy """ @staticmethod def test_normed_keys() -> None: """ Test that keys get normalized properly """ nodes = { "a": GroupMetadata(), "/b": GroupMetadata(), "": GroupMetadata(), "/a//c////": GroupMetadata(), } observed = _parse_hierarchy_dict(data=nodes) expected = {normalize_path(k): v for k, v in nodes.items()} assert observed == expected @staticmethod def test_empty() -> None: """ Test that an empty dict passes through """ assert _parse_hierarchy_dict(data={}) == {} @staticmethod def test_implicit_groups() -> None: """ Test that implicit groups were added as needed. """ requested = {"a/b/c": GroupMetadata()} expected = requested | { "": ImplicitGroupMarker(), "a": ImplicitGroupMarker(), "a/b": ImplicitGroupMarker(), } observed = _parse_hierarchy_dict(data=requested) assert observed == expected @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_group_create_hierarchy_invalid_mixed_zarr_format( store: Store, zarr_format: ZarrFormat ) -> None: """ Test that ``Group.create_hierarchy`` will raise an error if the zarr_format of the nodes is different from the parent group. """ other_format = 2 if zarr_format == 3 else 3 g = Group.from_store(store, zarr_format=other_format) tree = { "a": GroupMetadata(zarr_format=zarr_format, attributes={"name": "a"}), "a/b": meta_from_array(np.zeros(5), zarr_format=zarr_format, attributes={"name": "a/c"}), } msg = "The zarr_format of the nodes must be the same as the parent group." with pytest.raises(ValueError, match=msg): _ = tuple(g.create_hierarchy(tree)) @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize("defect", ["array/array", "array/group"]) @pytest.mark.parametrize("impl", ["async", "sync"]) async def test_create_hierarchy_invalid_nested( impl: Literal["async", "sync"], store: Store, defect: tuple[str, str], zarr_format: ZarrFormat ) -> None: """ Test that create_hierarchy will not create a Zarr array that contains a Zarr group or Zarr array. """ if defect == "array/array": hierarchy_spec = { "array_0": meta_from_array(np.arange(3), zarr_format=zarr_format), "array_0/subarray": meta_from_array(np.arange(4), zarr_format=zarr_format), } elif defect == "array/group": hierarchy_spec = { "array_0": meta_from_array(np.arange(3), zarr_format=zarr_format), "array_0/subgroup": GroupMetadata(attributes={"foo": 10}, zarr_format=zarr_format), } msg = "Only Zarr groups can contain other nodes." if impl == "sync": with pytest.raises(ValueError, match=msg): tuple(sync_group.create_hierarchy(store=store, nodes=hierarchy_spec)) elif impl == "async": with pytest.raises(ValueError, match=msg): await _collect_aiterator(create_hierarchy(store=store, nodes=hierarchy_spec)) @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize("impl", ["async", "sync"]) async def test_create_hierarchy_invalid_mixed_format( impl: Literal["async", "sync"], store: Store ) -> None: """ Test that create_hierarchy will not create a Zarr group that contains a both Zarr v2 and Zarr v3 nodes. """ msg = ( "Got data with both Zarr v2 and Zarr v3 nodes, which is invalid. " "The following keys map to Zarr v2 nodes: ['v2']. " "The following keys map to Zarr v3 nodes: ['v3']." "Ensure that all nodes have the same Zarr format." ) nodes = { "v2": GroupMetadata(zarr_format=2), "v3": GroupMetadata(zarr_format=3), } if impl == "sync": with pytest.raises(ValueError, match=re.escape(msg)): tuple( sync_group.create_hierarchy( store=store, nodes=nodes, ) ) elif impl == "async": with pytest.raises(ValueError, match=re.escape(msg)): await _collect_aiterator( create_hierarchy( store=store, nodes=nodes, ) ) else: raise ValueError(f"Invalid impl: {impl}") @pytest.mark.parametrize("store", ["memory", "local"], indirect=True) @pytest.mark.parametrize("zarr_format", [2, 3]) @pytest.mark.parametrize("root_key", ["", "root"]) @pytest.mark.parametrize("impl", ["async", "sync"]) async def test_create_rooted_hierarchy_group( impl: Literal["async", "sync"], store: Store, zarr_format, root_key: str ) -> None: """ Test that the _create_rooted_hierarchy can create a group. """ root_meta = {root_key: GroupMetadata(zarr_format=zarr_format, attributes={"path": root_key})} group_names = ["a", "a/b"] array_names = ["a/b/c", "a/b/d"] # just to ensure that we don't use the same name twice in tests assert set(group_names) & set(array_names) == set() groups_expected_meta = { _join_paths([root_key, node_name]): GroupMetadata( zarr_format=zarr_format, attributes={"path": node_name} ) for node_name in group_names } arrays_expected_meta = { _join_paths([root_key, node_name]): meta_from_array(np.zeros(4), zarr_format=zarr_format) for node_name in array_names } nodes_create = root_meta | groups_expected_meta | arrays_expected_meta if impl == "sync": g = sync_group.create_rooted_hierarchy(store=store, nodes=nodes_create) assert isinstance(g, Group) members = g.members(max_depth=None) elif impl == "async": g = await create_rooted_hierarchy(store=store, nodes=nodes_create) assert isinstance(g, AsyncGroup) members = await _collect_aiterator(g.members(max_depth=None)) else: raise ValueError(f"Unknown implementation: {impl}") assert g.metadata.attributes == {"path": root_key} members_observed_meta = {k: v.metadata for k, v in members} members_expected_meta_relative = { k.removeprefix(root_key).lstrip("/"): v for k, v in (groups_expected_meta | arrays_expected_meta).items() } assert members_observed_meta == members_expected_meta_relative @pytest.mark.parametrize("store", ["memory", "local"], indirect=True) @pytest.mark.parametrize("zarr_format", [2, 3]) @pytest.mark.parametrize("root_key", ["", "root"]) @pytest.mark.parametrize("impl", ["async", "sync"]) async def test_create_rooted_hierarchy_array( impl: Literal["async", "sync"], store: Store, zarr_format, root_key: str ) -> None: """ Test that _create_rooted_hierarchy can create an array. """ root_meta = { root_key: meta_from_array( np.arange(3), zarr_format=zarr_format, attributes={"path": root_key} ) } nodes_create = root_meta if impl == "sync": a = sync_group.create_rooted_hierarchy(store=store, nodes=nodes_create, overwrite=True) assert isinstance(a, Array) elif impl == "async": a = await create_rooted_hierarchy(store=store, nodes=nodes_create, overwrite=True) assert isinstance(a, AsyncArray) else: raise ValueError(f"Invalid impl: {impl}") assert a.metadata.attributes == {"path": root_key} @pytest.mark.parametrize("impl", ["async", "sync"]) async def test_create_rooted_hierarchy_invalid(impl: Literal["async", "sync"]) -> None: """ Ensure _create_rooted_hierarchy will raise a ValueError if the input does not contain a root node. """ zarr_format = 3 nodes = { "a": GroupMetadata(zarr_format=zarr_format), "b": GroupMetadata(zarr_format=zarr_format), } msg = "The input does not specify a root node. " if impl == "sync": with pytest.raises(ValueError, match=msg): sync_group.create_rooted_hierarchy(store=store, nodes=nodes) elif impl == "async": with pytest.raises(ValueError, match=msg): await create_rooted_hierarchy(store=store, nodes=nodes) else: raise ValueError(f"Invalid impl: {impl}") @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_group_members_performance(store: Store) -> None: """ Test that the execution time of Group.members is less than the number of members times the latency for accessing each member. """ get_latency = 0.1 # use the input store to create some groups group_create = zarr.group(store=store) num_groups = 10 # Create some groups for i in range(num_groups): group_create.create_group(f"group{i}") latency_store = LatencyStore(store, get_latency=get_latency) # create a group with some latency on get operations group_read = zarr.group(store=latency_store) # check how long it takes to iterate over the groups # if .members is sensitive to IO latency, # this should take (num_groups * get_latency) seconds # otherwise, it should take only marginally more than get_latency seconds start = time.time() _ = group_read.members() elapsed = time.time() - start assert elapsed < (num_groups * get_latency) @pytest.mark.parametrize("store", ["memory"], indirect=True) def test_group_members_concurrency_limit(store: MemoryStore) -> None: """ Test that the execution time of Group.members can be constrained by the async concurrency configuration setting. """ get_latency = 0.02 # use the input store to create some groups group_create = zarr.group(store=store) num_groups = 10 # Create some groups for i in range(num_groups): group_create.create_group(f"group{i}") latency_store = LatencyStore(store, get_latency=get_latency) # create a group with some latency on get operations group_read = zarr.group(store=latency_store) # check how long it takes to iterate over the groups # if .members is sensitive to IO latency, # this should take (num_groups * get_latency) seconds # otherwise, it should take only marginally more than get_latency seconds from zarr.core.config import config with config.set({"async.concurrency": 1}): start = time.time() _ = group_read.members() elapsed = time.time() - start assert elapsed > num_groups * get_latency @pytest.mark.parametrize("option", ["array", "group", "invalid"]) def test_build_metadata_v3(option: Literal["array", "group", "invalid"]) -> None: """ Test that _build_metadata_v3 returns the correct metadata for a v3 array or group """ match option: case "array": metadata_dict = meta_from_array(np.arange(10), zarr_format=3).to_dict() assert _build_metadata_v3(metadata_dict) == ArrayV3Metadata.from_dict(metadata_dict) case "group": metadata_dict = GroupMetadata(attributes={"foo": 10}, zarr_format=3).to_dict() assert _build_metadata_v3(metadata_dict) == GroupMetadata.from_dict(metadata_dict) case "invalid": metadata_dict = GroupMetadata(zarr_format=3).to_dict() metadata_dict.pop("node_type") # TODO: fix the error message msg = "Invalid value for 'node_type'. Expected 'array or group'. Got 'nothing (the key is missing)'." with pytest.raises(MetadataValidationError, match=re.escape(msg)): _build_metadata_v3(metadata_dict) @pytest.mark.parametrize("roots", [("",), ("a", "b")]) def test_get_roots(roots: tuple[str, ...]): root_nodes = {k: GroupMetadata(attributes={"name": k}) for k in roots} child_nodes = { _join_paths([k, "foo"]): GroupMetadata(attributes={"name": _join_paths([k, "foo"])}) for k in roots } data = root_nodes | child_nodes assert set(_get_roots(data)) == set(roots) zarr-python-3.0.6/tests/test_indexing.py000066400000000000000000002005521476711733500204170ustar00rootroot00000000000000from __future__ import annotations import itertools from collections import Counter from typing import TYPE_CHECKING, Any from uuid import uuid4 import numpy as np import numpy.typing as npt import pytest from numpy.testing import assert_array_equal import zarr from zarr import Array from zarr.core.buffer import default_buffer_prototype from zarr.core.indexing import ( BasicSelection, CoordinateSelection, OrthogonalSelection, Selection, _iter_grid, make_slice_selection, normalize_integer_selection, oindex, oindex_set, replace_ellipsis, ) from zarr.registry import get_ndbuffer_class from zarr.storage import MemoryStore, StorePath if TYPE_CHECKING: from collections.abc import AsyncGenerator from zarr.core.buffer import BufferPrototype from zarr.core.buffer.core import Buffer from zarr.core.common import ChunkCoords @pytest.fixture async def store() -> AsyncGenerator[StorePath]: return StorePath(await MemoryStore.open()) def zarr_array_from_numpy_array( store: StorePath, a: npt.NDArray[Any], chunk_shape: ChunkCoords | None = None, ) -> zarr.Array: z = zarr.create_array( store=store / str(uuid4()), shape=a.shape, dtype=a.dtype, chunks=chunk_shape or a.shape, chunk_key_encoding={"name": "v2", "separator": "."}, ) z[()] = a return z class CountingDict(MemoryStore): counter: Counter[tuple[str, str]] @classmethod async def open(cls) -> CountingDict: store = await super().open() store.counter = Counter() return store async def get( self, key: str, prototype: BufferPrototype, byte_range: tuple[int | None, int | None] | None = None, ) -> Buffer | None: key_suffix = "/".join(key.split("/")[1:]) self.counter["__getitem__", key_suffix] += 1 return await super().get(key, prototype, byte_range) async def set(self, key: str, value: Buffer, byte_range: tuple[int, int] | None = None) -> None: key_suffix = "/".join(key.split("/")[1:]) self.counter["__setitem__", key_suffix] += 1 return await super().set(key, value, byte_range) def test_normalize_integer_selection() -> None: assert 1 == normalize_integer_selection(1, 100) assert 99 == normalize_integer_selection(-1, 100) with pytest.raises(IndexError): normalize_integer_selection(100, 100) with pytest.raises(IndexError): normalize_integer_selection(1000, 100) with pytest.raises(IndexError): normalize_integer_selection(-1000, 100) def test_replace_ellipsis() -> None: # 1D, single item assert (0,) == replace_ellipsis(0, (100,)) # 1D assert (slice(None),) == replace_ellipsis(Ellipsis, (100,)) assert (slice(None),) == replace_ellipsis(slice(None), (100,)) assert (slice(None, 100),) == replace_ellipsis(slice(None, 100), (100,)) assert (slice(0, None),) == replace_ellipsis(slice(0, None), (100,)) assert (slice(None),) == replace_ellipsis((slice(None), Ellipsis), (100,)) assert (slice(None),) == replace_ellipsis((Ellipsis, slice(None)), (100,)) # 2D, single item assert (0, 0) == replace_ellipsis((0, 0), (100, 100)) assert (-1, 1) == replace_ellipsis((-1, 1), (100, 100)) # 2D, single col/row assert (0, slice(None)) == replace_ellipsis((0, slice(None)), (100, 100)) assert (0, slice(None)) == replace_ellipsis((0,), (100, 100)) assert (slice(None), 0) == replace_ellipsis((slice(None), 0), (100, 100)) # 2D slice assert (slice(None), slice(None)) == replace_ellipsis(Ellipsis, (100, 100)) assert (slice(None), slice(None)) == replace_ellipsis(slice(None), (100, 100)) assert (slice(None), slice(None)) == replace_ellipsis((slice(None), slice(None)), (100, 100)) assert (slice(None), slice(None)) == replace_ellipsis((Ellipsis, slice(None)), (100, 100)) assert (slice(None), slice(None)) == replace_ellipsis((slice(None), Ellipsis), (100, 100)) assert (slice(None), slice(None)) == replace_ellipsis( (slice(None), Ellipsis, slice(None)), (100, 100) ) assert (slice(None), slice(None)) == replace_ellipsis( (Ellipsis, slice(None), slice(None)), (100, 100) ) assert (slice(None), slice(None)) == replace_ellipsis( (slice(None), slice(None), Ellipsis), (100, 100) ) @pytest.mark.parametrize( ("value", "dtype"), [ (42, "uint8"), pytest.param( (b"aaa", 1, 4.2), [("foo", "S3"), ("bar", "i4"), ("baz", "f8")], marks=pytest.mark.xfail ), ], ) @pytest.mark.parametrize("use_out", [True, False]) def test_get_basic_selection_0d(store: StorePath, use_out: bool, value: Any, dtype: Any) -> None: # setup arr_np = np.array(value, dtype=dtype) arr_z = zarr_array_from_numpy_array(store, arr_np) assert_array_equal(arr_np, arr_z.get_basic_selection(Ellipsis)) assert_array_equal(arr_np, arr_z[...]) assert value == arr_z.get_basic_selection(()) assert value == arr_z[()] if use_out: # test out param b = default_buffer_prototype().nd_buffer.from_numpy_array(np.zeros_like(arr_np)) arr_z.get_basic_selection(Ellipsis, out=b) assert_array_equal(arr_np, b.as_ndarray_like()) # todo: uncomment the structured array tests when we can make them pass, # or delete them if we formally decide not to support structured dtypes. # test structured array # value = (b"aaa", 1, 4.2) # a = np.array(value, dtype=[("foo", "S3"), ("bar", "i4"), ("baz", "f8")]) # z = zarr_array_from_numpy_array(store, a) # z[()] = value # assert_array_equal(a, z.get_basic_selection(Ellipsis)) # assert_array_equal(a, z[...]) # assert a[()] == z.get_basic_selection(()) # assert a[()] == z[()] # assert b"aaa" == z.get_basic_selection((), fields="foo") # assert b"aaa" == z["foo"] # assert a[["foo", "bar"]] == z.get_basic_selection((), fields=["foo", "bar"]) # assert a[["foo", "bar"]] == z["foo", "bar"] # # test out param # b = NDBuffer.from_numpy_array(np.zeros_like(a)) # z.get_basic_selection(Ellipsis, out=b) # assert_array_equal(a, b) # c = NDBuffer.from_numpy_array(np.zeros_like(a[["foo", "bar"]])) # z.get_basic_selection(Ellipsis, out=c, fields=["foo", "bar"]) # assert_array_equal(a[["foo", "bar"]], c) basic_selections_1d: list[BasicSelection] = [ # single value 42, -1, # slices slice(0, 1050), slice(50, 150), slice(0, 2000), slice(-150, -50), slice(-2000, 2000), slice(0, 0), # empty result slice(-1, 0), # empty result # total selections slice(None), Ellipsis, (), (Ellipsis, slice(None)), # slice with step slice(None), slice(None, None), slice(None, None, 1), slice(None, None, 10), slice(None, None, 100), slice(None, None, 1000), slice(None, None, 10000), slice(0, 1050), slice(0, 1050, 1), slice(0, 1050, 10), slice(0, 1050, 100), slice(0, 1050, 1000), slice(0, 1050, 10000), slice(1, 31, 3), slice(1, 31, 30), slice(1, 31, 300), slice(81, 121, 3), slice(81, 121, 30), slice(81, 121, 300), slice(50, 150), slice(50, 150, 1), slice(50, 150, 10), ] basic_selections_1d_bad = [ # only positive step supported slice(None, None, -1), slice(None, None, -10), slice(None, None, -100), slice(None, None, -1000), slice(None, None, -10000), slice(1050, -1, -1), slice(1050, -1, -10), slice(1050, -1, -100), slice(1050, -1, -1000), slice(1050, -1, -10000), slice(1050, 0, -1), slice(1050, 0, -10), slice(1050, 0, -100), slice(1050, 0, -1000), slice(1050, 0, -10000), slice(150, 50, -1), slice(150, 50, -10), slice(31, 1, -3), slice(121, 81, -3), slice(-1, 0, -1), # bad stuff 2.3, "foo", b"xxx", None, (0, 0), (slice(None), slice(None)), ] def _test_get_basic_selection( a: npt.NDArray[Any] | Array, z: Array, selection: BasicSelection ) -> None: expect = a[selection] actual = z.get_basic_selection(selection) assert_array_equal(expect, actual) actual = z[selection] assert_array_equal(expect, actual) # test out param b = default_buffer_prototype().nd_buffer.from_numpy_array( np.empty(shape=expect.shape, dtype=expect.dtype) ) z.get_basic_selection(selection, out=b) assert_array_equal(expect, b.as_numpy_array()) # noinspection PyStatementEffect def test_get_basic_selection_1d(store: StorePath) -> None: # setup a = np.arange(1050, dtype=int) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) for selection in basic_selections_1d: _test_get_basic_selection(a, z, selection) for selection_bad in basic_selections_1d_bad: with pytest.raises(IndexError): z.get_basic_selection(selection_bad) # type: ignore[arg-type] with pytest.raises(IndexError): z[selection_bad] # type: ignore[index] with pytest.raises(IndexError): z.get_basic_selection([1, 0]) # type: ignore[arg-type] basic_selections_2d: list[BasicSelection] = [ # single row 42, -1, (42, slice(None)), (-1, slice(None)), # single col (slice(None), 4), (slice(None), -1), # row slices slice(None), slice(0, 1000), slice(250, 350), slice(0, 2000), slice(-350, -250), slice(0, 0), # empty result slice(-1, 0), # empty result slice(-2000, 0), slice(-2000, 2000), # 2D slices (slice(None), slice(1, 5)), (slice(250, 350), slice(None)), (slice(250, 350), slice(1, 5)), (slice(250, 350), slice(-5, -1)), (slice(250, 350), slice(-50, 50)), (slice(250, 350, 10), slice(1, 5)), (slice(250, 350), slice(1, 5, 2)), (slice(250, 350, 33), slice(1, 5, 3)), # total selections (slice(None), slice(None)), Ellipsis, (), (Ellipsis, slice(None)), (Ellipsis, slice(None), slice(None)), ] basic_selections_2d_bad = [ # bad stuff 2.3, "foo", b"xxx", None, (2.3, slice(None)), # only positive step supported slice(None, None, -1), (slice(None, None, -1), slice(None)), (0, 0, 0), (slice(None), slice(None), slice(None)), ] # noinspection PyStatementEffect def test_get_basic_selection_2d(store: StorePath) -> None: # setup a = np.arange(10000, dtype=int).reshape(1000, 10) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) for selection in basic_selections_2d: _test_get_basic_selection(a, z, selection) bad_selections = basic_selections_2d_bad + [ # integer arrays [0, 1], (slice(None), [0, 1]), ] for selection_bad in bad_selections: with pytest.raises(IndexError): z.get_basic_selection(selection_bad) # type: ignore[arg-type] # check fallback on fancy indexing fancy_selection = ([0, 1], [0, 1]) np.testing.assert_array_equal(z[fancy_selection], [0, 11]) def test_fancy_indexing_fallback_on_get_setitem(store: StorePath) -> None: z = zarr_array_from_numpy_array(store, np.zeros((20, 20))) z[[1, 2, 3], [1, 2, 3]] = 1 np.testing.assert_array_equal( z[:4, :4], [ [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ], ) np.testing.assert_array_equal(z[[1, 2, 3], [1, 2, 3]], 1) # test broadcasting np.testing.assert_array_equal(z[1, [1, 2, 3]], [1, 0, 0]) # test 1D fancy indexing z2 = zarr_array_from_numpy_array(store, np.zeros(5)) z2[[1, 2, 3]] = 1 np.testing.assert_array_equal(z2[:], [0, 1, 1, 1, 0]) @pytest.mark.parametrize( ("index", "expected_result"), [ # Single iterable of integers ([0, 1], [[0, 1, 2], [3, 4, 5]]), # List first, then slice (([0, 1], slice(None)), [[0, 1, 2], [3, 4, 5]]), # List first, then slice (([0, 1], slice(1, None)), [[1, 2], [4, 5]]), # Slice first, then list ((slice(0, 2), [0, 2]), [[0, 2], [3, 5]]), # Slices only ((slice(0, 2), slice(0, 2)), [[0, 1], [3, 4]]), # List with repeated index (([1, 0, 1], slice(1, None)), [[4, 5], [1, 2], [4, 5]]), # 1D indexing (([1, 0, 1]), [[3, 4, 5], [0, 1, 2], [3, 4, 5]]), ], ) def test_orthogonal_indexing_fallback_on_getitem_2d( store: StorePath, index: Selection, expected_result: npt.ArrayLike ) -> None: """ Tests the orthogonal indexing fallback on __getitem__ for a 2D matrix. In addition to checking expected behavior, all indexing is also checked against numpy. """ # [0, 1, 2], # [3, 4, 5], # [6, 7, 8] a = np.arange(9).reshape(3, 3) z = zarr_array_from_numpy_array(store, a) np.testing.assert_array_equal(z[index], a[index], err_msg="Indexing disagrees with numpy") np.testing.assert_array_equal(z[index], expected_result) @pytest.mark.skip(reason="fails on ubuntu, windows; numpy=2.2; in CI") def test_setitem_repeated_index(): array = zarr.array(data=np.zeros((4,)), chunks=(1,)) indexer = np.array([-1, -1, 0, 0]) array.oindex[(indexer,)] = [0, 1, 2, 3] np.testing.assert_array_equal(array[:], np.array([3, 0, 0, 1])) indexer = np.array([-1, 0, 0, -1]) array.oindex[(indexer,)] = [0, 1, 2, 3] np.testing.assert_array_equal(array[:], np.array([2, 0, 0, 3])) Index = list[int] | tuple[slice | int | list[int], ...] @pytest.mark.parametrize( ("index", "expected_result"), [ # Single iterable of integers ([0, 1], [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[9, 10, 11], [12, 13, 14], [15, 16, 17]]]), # One slice, two integers ((slice(0, 2), 1, 1), [4, 13]), # One integer, two slices ((slice(0, 2), 1, slice(0, 2)), [[3, 4], [12, 13]]), # Two slices and a list ((slice(0, 2), [1, 2], slice(0, 2)), [[[3, 4], [6, 7]], [[12, 13], [15, 16]]]), ], ) def test_orthogonal_indexing_fallback_on_getitem_3d( store: StorePath, index: Selection, expected_result: npt.ArrayLike ) -> None: """ Tests the orthogonal indexing fallback on __getitem__ for a 3D matrix. In addition to checking expected behavior, all indexing is also checked against numpy. """ # [[[ 0, 1, 2], # [ 3, 4, 5], # [ 6, 7, 8]], # [[ 9, 10, 11], # [12, 13, 14], # [15, 16, 17]], # [[18, 19, 20], # [21, 22, 23], # [24, 25, 26]]] a = np.arange(27).reshape(3, 3, 3) z = zarr_array_from_numpy_array(store, a) np.testing.assert_array_equal(z[index], a[index], err_msg="Indexing disagrees with numpy") np.testing.assert_array_equal(z[index], expected_result) @pytest.mark.parametrize( ("index", "expected_result"), [ # Single iterable of integers ([0, 1], [[1, 1, 1], [1, 1, 1], [0, 0, 0]]), # List and slice combined (([0, 1], slice(1, 3)), [[0, 1, 1], [0, 1, 1], [0, 0, 0]]), # Index repetition is ignored on setitem (([0, 1, 1, 1, 1, 1, 1], slice(1, 3)), [[0, 1, 1], [0, 1, 1], [0, 0, 0]]), # Slice with step (([0, 2], slice(None, None, 2)), [[1, 0, 1], [0, 0, 0], [1, 0, 1]]), ], ) def test_orthogonal_indexing_fallback_on_setitem_2d( store: StorePath, index: Selection, expected_result: npt.ArrayLike ) -> None: """ Tests the orthogonal indexing fallback on __setitem__ for a 3D matrix. In addition to checking expected behavior, all indexing is also checked against numpy. """ # Slice + fancy index a = np.zeros((3, 3)) z = zarr_array_from_numpy_array(store, a) z[index] = 1 a[index] = 1 np.testing.assert_array_equal(z[:], expected_result) np.testing.assert_array_equal(z[:], a, err_msg="Indexing disagrees with numpy") def test_fancy_indexing_doesnt_mix_with_implicit_slicing(store: StorePath) -> None: z2 = zarr_array_from_numpy_array(store, np.zeros((5, 5, 5))) with pytest.raises(IndexError): z2[[1, 2, 3], [1, 2, 3]] = 2 with pytest.raises(IndexError): np.testing.assert_array_equal(z2[[1, 2, 3], [1, 2, 3]], 0) with pytest.raises(IndexError): z2[..., [1, 2, 3]] = 2 # type: ignore[index] with pytest.raises(IndexError): np.testing.assert_array_equal(z2[..., [1, 2, 3]], 0) # type: ignore[index] @pytest.mark.parametrize( ("value", "dtype"), [ (42, "uint8"), pytest.param( (b"aaa", 1, 4.2), [("foo", "S3"), ("bar", "i4"), ("baz", "f8")], marks=pytest.mark.xfail ), ], ) def test_set_basic_selection_0d( store: StorePath, value: Any, dtype: str | list[tuple[str, str]] ) -> None: arr_np = np.array(value, dtype=dtype) arr_np_zeros = np.zeros_like(arr_np, dtype=dtype) arr_z = zarr_array_from_numpy_array(store, arr_np_zeros) assert_array_equal(arr_np_zeros, arr_z) arr_z.set_basic_selection(Ellipsis, value) assert_array_equal(value, arr_z) arr_z[...] = 0 assert_array_equal(arr_np_zeros, arr_z) arr_z[...] = value assert_array_equal(value, arr_z) # todo: uncomment the structured array tests when we can make them pass, # or delete them if we formally decide not to support structured dtypes. # arr_z.set_basic_selection(Ellipsis, v["foo"], fields="foo") # assert v["foo"] == arr_z["foo"] # assert arr_np_zeros["bar"] == arr_z["bar"] # assert arr_np_zeros["baz"] == arr_z["baz"] # arr_z["bar"] = v["bar"] # assert v["foo"] == arr_z["foo"] # assert v["bar"] == arr_z["bar"] # assert arr_np_zeros["baz"] == arr_z["baz"] # # multiple field assignment not supported # with pytest.raises(IndexError): # arr_z.set_basic_selection(Ellipsis, v[["foo", "bar"]], fields=["foo", "bar"]) # with pytest.raises(IndexError): # arr_z[..., "foo", "bar"] = v[["foo", "bar"]] def _test_get_orthogonal_selection( a: npt.NDArray[Any], z: Array, selection: OrthogonalSelection ) -> None: expect = oindex(a, selection) actual = z.get_orthogonal_selection(selection) assert_array_equal(expect, actual) actual = z.oindex[selection] assert_array_equal(expect, actual) # noinspection PyStatementEffect def test_get_orthogonal_selection_1d_bool(store: StorePath) -> None: # setup a = np.arange(1050, dtype=int) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: ix = np.random.binomial(1, p, size=a.shape[0]).astype(bool) _test_get_orthogonal_selection(a, z, ix) # test errors with pytest.raises(IndexError): z.oindex[np.zeros(50, dtype=bool)] # too short with pytest.raises(IndexError): z.oindex[np.zeros(2000, dtype=bool)] # too long with pytest.raises(IndexError): # too many dimensions z.oindex[[[True, False], [False, True]]] # type: ignore[index] # noinspection PyStatementEffect def test_get_orthogonal_selection_1d_int(store: StorePath) -> None: # setup a = np.arange(1050, dtype=int) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) np.random.seed(42) # test with different degrees of sparseness for p in 2, 0.5, 0.1, 0.01: # unordered ix = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True) _test_get_orthogonal_selection(a, z, ix) # increasing ix.sort() _test_get_orthogonal_selection(a, z, ix) # decreasing ix = ix[::-1] _test_get_orthogonal_selection(a, z, ix) selections = basic_selections_1d + [ # test wraparound [0, 3, 10, -23, -12, -1], # explicit test not sorted [3, 105, 23, 127], ] for selection in selections: _test_get_orthogonal_selection(a, z, selection) bad_selections = basic_selections_1d_bad + [ [a.shape[0] + 1], # out of bounds [-(a.shape[0] + 1)], # out of bounds [[2, 4], [6, 8]], # too many dimensions ] for bad_selection in bad_selections: with pytest.raises(IndexError): z.get_orthogonal_selection(bad_selection) # type: ignore[arg-type] with pytest.raises(IndexError): z.oindex[bad_selection] # type: ignore[index] def _test_get_orthogonal_selection_2d( a: npt.NDArray[Any], z: Array, ix0: npt.NDArray[np.bool], ix1: npt.NDArray[np.bool] ) -> None: selections = [ # index both axes with array (ix0, ix1), # mixed indexing with array / slice (ix0, slice(1, 5)), (ix0, slice(1, 5, 2)), (slice(250, 350), ix1), (slice(250, 350, 10), ix1), # mixed indexing with array / int (ix0, 4), (42, ix1), ] for selection in selections: _test_get_orthogonal_selection(a, z, selection) # noinspection PyStatementEffect def test_get_orthogonal_selection_2d(store: StorePath) -> None: # setup a = np.arange(10000, dtype=int).reshape(1000, 10) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: # boolean arrays ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool) ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool) _test_get_orthogonal_selection_2d(a, z, ix0, ix1) # mixed int array / bool array selections = ( (ix0, np.nonzero(ix1)[0]), (np.nonzero(ix0)[0], ix1), ) for selection in selections: _test_get_orthogonal_selection(a, z, selection) # integer arrays ix0 = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True) ix1 = np.random.choice(a.shape[1], size=int(a.shape[1] * 0.5), replace=True) _test_get_orthogonal_selection_2d(a, z, ix0, ix1) ix0.sort() ix1.sort() _test_get_orthogonal_selection_2d(a, z, ix0, ix1) ix0 = ix0[::-1] ix1 = ix1[::-1] _test_get_orthogonal_selection_2d(a, z, ix0, ix1) for selection_2d in basic_selections_2d: _test_get_orthogonal_selection(a, z, selection_2d) for selection_2d_bad in basic_selections_2d_bad: with pytest.raises(IndexError): z.get_orthogonal_selection(selection_2d_bad) # type: ignore[arg-type] with pytest.raises(IndexError): z.oindex[selection_2d_bad] # type: ignore[index] def _test_get_orthogonal_selection_3d( a: npt.NDArray, z: Array, ix0: npt.NDArray[np.bool], ix1: npt.NDArray[np.bool], ix2: npt.NDArray[np.bool], ) -> None: selections = [ # single value (84, 42, 4), (-1, -1, -1), # index all axes with array (ix0, ix1, ix2), # mixed indexing with single array / slices (ix0, slice(15, 25), slice(1, 5)), (slice(50, 70), ix1, slice(1, 5)), (slice(50, 70), slice(15, 25), ix2), (ix0, slice(15, 25, 5), slice(1, 5, 2)), (slice(50, 70, 3), ix1, slice(1, 5, 2)), (slice(50, 70, 3), slice(15, 25, 5), ix2), # mixed indexing with single array / ints (ix0, 42, 4), (84, ix1, 4), (84, 42, ix2), # mixed indexing with single array / slice / int (ix0, slice(15, 25), 4), (42, ix1, slice(1, 5)), (slice(50, 70), 42, ix2), # mixed indexing with two array / slice (ix0, ix1, slice(1, 5)), (slice(50, 70), ix1, ix2), (ix0, slice(15, 25), ix2), # mixed indexing with two array / integer (ix0, ix1, 4), (42, ix1, ix2), (ix0, 42, ix2), ] for selection in selections: _test_get_orthogonal_selection(a, z, selection) def test_get_orthogonal_selection_3d(store: StorePath) -> None: # setup a = np.arange(100000, dtype=int).reshape(200, 50, 10) z = zarr_array_from_numpy_array(store, a, chunk_shape=(60, 20, 3)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: # boolean arrays ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool) ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool) ix2 = np.random.binomial(1, 0.5, size=a.shape[2]).astype(bool) _test_get_orthogonal_selection_3d(a, z, ix0, ix1, ix2) # integer arrays ix0 = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True) ix1 = np.random.choice(a.shape[1], size=int(a.shape[1] * 0.5), replace=True) ix2 = np.random.choice(a.shape[2], size=int(a.shape[2] * 0.5), replace=True) _test_get_orthogonal_selection_3d(a, z, ix0, ix1, ix2) ix0.sort() ix1.sort() ix2.sort() _test_get_orthogonal_selection_3d(a, z, ix0, ix1, ix2) ix0 = ix0[::-1] ix1 = ix1[::-1] ix2 = ix2[::-1] _test_get_orthogonal_selection_3d(a, z, ix0, ix1, ix2) def test_orthogonal_indexing_edge_cases(store: StorePath) -> None: a = np.arange(6).reshape(1, 2, 3) z = zarr_array_from_numpy_array(store, a, chunk_shape=(1, 2, 3)) expect = oindex(a, (0, slice(None), [0, 1, 2])) actual = z.oindex[0, :, [0, 1, 2]] assert_array_equal(expect, actual) expect = oindex(a, (0, slice(None), [True, True, True])) actual = z.oindex[0, :, [True, True, True]] assert_array_equal(expect, actual) def _test_set_orthogonal_selection( v: npt.NDArray[np.int_], a: npt.NDArray[Any], z: Array, selection: OrthogonalSelection ) -> None: for value in 42, oindex(v, selection), oindex(v, selection).tolist(): if isinstance(value, list) and value == []: # skip these cases as cannot preserve all dimensions continue # setup expectation a[:] = 0 oindex_set(a, selection, value) # long-form API z[:] = 0 z.set_orthogonal_selection(selection, value) assert_array_equal(a, z[:]) # short-form API z[:] = 0 z.oindex[selection] = value assert_array_equal(a, z[:]) def test_set_orthogonal_selection_1d(store: StorePath) -> None: # setup v = np.arange(1050, dtype=int) a = np.empty(v.shape, dtype=int) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) # test with different degrees of sparseness np.random.seed(42) for p in 0.5, 0.1, 0.01: # boolean arrays ix = np.random.binomial(1, p, size=a.shape[0]).astype(bool) _test_set_orthogonal_selection(v, a, z, ix) # integer arrays ix = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True) _test_set_orthogonal_selection(v, a, z, ix) ix.sort() _test_set_orthogonal_selection(v, a, z, ix) ix = ix[::-1] _test_set_orthogonal_selection(v, a, z, ix) # basic selections for selection in basic_selections_1d: _test_set_orthogonal_selection(v, a, z, selection) def test_set_item_1d_last_two_chunks(store: StorePath): # regression test for GH2849 g = zarr.open_group(store=store, zarr_format=3, mode="w") a = g.create_array("bar", shape=(10,), chunks=(3,), dtype=int) data = np.array([7, 8, 9]) a[slice(7, 10)] = data np.testing.assert_array_equal(a[slice(7, 10)], data) z = zarr.open_group(store=store, mode="w") z.create_array("zoo", dtype=float, shape=()) z["zoo"][...] = np.array(1) # why doesn't [:] work? np.testing.assert_equal(z["zoo"][()], np.array(1)) z = zarr.open_group(store=store, mode="w") z.create_array("zoo", dtype=float, shape=()) z["zoo"][...] = 1 # why doesn't [:] work? np.testing.assert_equal(z["zoo"][()], np.array(1)) def _test_set_orthogonal_selection_2d( v: npt.NDArray[np.int_], a: npt.NDArray[np.int_], z: Array, ix0: npt.NDArray[np.bool], ix1: npt.NDArray[np.bool], ) -> None: selections = [ # index both axes with array (ix0, ix1), # mixed indexing with array / slice or int (ix0, slice(1, 5)), (slice(250, 350), ix1), (ix0, 4), (42, ix1), ] for selection in selections: _test_set_orthogonal_selection(v, a, z, selection) def test_set_orthogonal_selection_2d(store: StorePath) -> None: # setup v = np.arange(10000, dtype=int).reshape(1000, 10) a = np.empty_like(v) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: # boolean arrays ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool) ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool) _test_set_orthogonal_selection_2d(v, a, z, ix0, ix1) # integer arrays ix0 = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True) ix1 = np.random.choice(a.shape[1], size=int(a.shape[1] * 0.5), replace=True) _test_set_orthogonal_selection_2d(v, a, z, ix0, ix1) ix0.sort() ix1.sort() _test_set_orthogonal_selection_2d(v, a, z, ix0, ix1) ix0 = ix0[::-1] ix1 = ix1[::-1] _test_set_orthogonal_selection_2d(v, a, z, ix0, ix1) for selection in basic_selections_2d: _test_set_orthogonal_selection(v, a, z, selection) def _test_set_orthogonal_selection_3d( v: npt.NDArray[np.int_], a: npt.NDArray[np.int_], z: Array, ix0: npt.NDArray[np.bool], ix1: npt.NDArray[np.bool], ix2: npt.NDArray[np.bool], ) -> None: selections = ( # single value (84, 42, 4), (-1, -1, -1), # index all axes with bool array (ix0, ix1, ix2), # mixed indexing with single bool array / slice or int (ix0, slice(15, 25), slice(1, 5)), (slice(50, 70), ix1, slice(1, 5)), (slice(50, 70), slice(15, 25), ix2), (ix0, 42, 4), (84, ix1, 4), (84, 42, ix2), (ix0, slice(15, 25), 4), (slice(50, 70), ix1, 4), (slice(50, 70), 42, ix2), # indexing with two arrays / slice (ix0, ix1, slice(1, 5)), # indexing with two arrays / integer (ix0, ix1, 4), ) for selection in selections: _test_set_orthogonal_selection(v, a, z, selection) def test_set_orthogonal_selection_3d(store: StorePath) -> None: # setup v = np.arange(100000, dtype=int).reshape(200, 50, 10) a = np.empty_like(v) z = zarr_array_from_numpy_array(store, a, chunk_shape=(60, 20, 3)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: # boolean arrays ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool) ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool) ix2 = np.random.binomial(1, 0.5, size=a.shape[2]).astype(bool) _test_set_orthogonal_selection_3d(v, a, z, ix0, ix1, ix2) # integer arrays ix0 = np.random.choice(a.shape[0], size=int(a.shape[0] * p), replace=True) ix1 = np.random.choice(a.shape[1], size=int(a.shape[1] * 0.5), replace=True) ix2 = np.random.choice(a.shape[2], size=int(a.shape[2] * 0.5), replace=True) _test_set_orthogonal_selection_3d(v, a, z, ix0, ix1, ix2) # sorted increasing ix0.sort() ix1.sort() ix2.sort() _test_set_orthogonal_selection_3d(v, a, z, ix0, ix1, ix2) # sorted decreasing ix0 = ix0[::-1] ix1 = ix1[::-1] ix2 = ix2[::-1] _test_set_orthogonal_selection_3d(v, a, z, ix0, ix1, ix2) def test_orthogonal_indexing_fallback_on_get_setitem(store: StorePath) -> None: z = zarr_array_from_numpy_array(store, np.zeros((20, 20))) z[[1, 2, 3], [1, 2, 3]] = 1 np.testing.assert_array_equal( z[:4, :4], [ [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ], ) np.testing.assert_array_equal(z[[1, 2, 3], [1, 2, 3]], 1) # test broadcasting np.testing.assert_array_equal(z[1, [1, 2, 3]], [1, 0, 0]) # test 1D fancy indexing z2 = zarr_array_from_numpy_array(store, np.zeros(5)) z2[[1, 2, 3]] = 1 np.testing.assert_array_equal(z2[:], [0, 1, 1, 1, 0]) def _test_get_coordinate_selection( a: npt.NDArray, z: Array, selection: CoordinateSelection ) -> None: expect = a[selection] actual = z.get_coordinate_selection(selection) assert_array_equal(expect, actual) actual = z.vindex[selection] assert_array_equal(expect, actual) coordinate_selections_1d_bad = [ # slice not supported slice(5, 15), slice(None), Ellipsis, # bad stuff 2.3, "foo", b"xxx", None, (0, 0), (slice(None), slice(None)), ] # noinspection PyStatementEffect def test_get_coordinate_selection_1d(store: StorePath) -> None: # setup a = np.arange(1050, dtype=int) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) np.random.seed(42) # test with different degrees of sparseness for p in 2, 0.5, 0.1, 0.01: n = int(a.size * p) ix = np.random.choice(a.shape[0], size=n, replace=True) _test_get_coordinate_selection(a, z, ix) ix.sort() _test_get_coordinate_selection(a, z, ix) ix = ix[::-1] _test_get_coordinate_selection(a, z, ix) selections = [ # test single item 42, -1, # test wraparound [0, 3, 10, -23, -12, -1], # test out of order [3, 105, 23, 127], # not monotonically increasing # test multi-dimensional selection np.array([[2, 4], [6, 8]]), ] for selection in selections: _test_get_coordinate_selection(a, z, selection) # test errors bad_selections = coordinate_selections_1d_bad + [ [a.shape[0] + 1], # out of bounds [-(a.shape[0] + 1)], # out of bounds ] for selection in bad_selections: with pytest.raises(IndexError): z.get_coordinate_selection(selection) # type: ignore[arg-type] with pytest.raises(IndexError): z.vindex[selection] # type: ignore[index] def test_get_coordinate_selection_2d(store: StorePath) -> None: # setup a = np.arange(10000, dtype=int).reshape(1000, 10) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) np.random.seed(42) ix0: npt.ArrayLike ix1: npt.ArrayLike # test with different degrees of sparseness for p in 2, 0.5, 0.1, 0.01: n = int(a.size * p) ix0 = np.random.choice(a.shape[0], size=n, replace=True) ix1 = np.random.choice(a.shape[1], size=n, replace=True) selections = [ # single value (42, 4), (-1, -1), # index both axes with array (ix0, ix1), # mixed indexing with array / int (ix0, 4), (42, ix1), (42, 4), ] for selection in selections: _test_get_coordinate_selection(a, z, selection) # not monotonically increasing (first dim) ix0 = [3, 3, 4, 2, 5] ix1 = [1, 3, 5, 7, 9] _test_get_coordinate_selection(a, z, (ix0, ix1)) # not monotonically increasing (second dim) ix0 = [1, 1, 2, 2, 5] ix1 = [1, 3, 2, 1, 0] _test_get_coordinate_selection(a, z, (ix0, ix1)) # multi-dimensional selection ix0 = np.array([[1, 1, 2], [2, 2, 5]]) ix1 = np.array([[1, 3, 2], [1, 0, 0]]) _test_get_coordinate_selection(a, z, (ix0, ix1)) with pytest.raises(IndexError): selection = slice(5, 15), [1, 2, 3] z.get_coordinate_selection(selection) # type:ignore[arg-type] with pytest.raises(IndexError): selection = [1, 2, 3], slice(5, 15) z.get_coordinate_selection(selection) # type:ignore[arg-type] with pytest.raises(IndexError): selection = Ellipsis, [1, 2, 3] z.get_coordinate_selection(selection) # type:ignore[arg-type] with pytest.raises(IndexError): selection = Ellipsis z.get_coordinate_selection(selection) # type:ignore[arg-type] def _test_set_coordinate_selection( v: npt.NDArray, a: npt.NDArray, z: Array, selection: CoordinateSelection ) -> None: for value in 42, v[selection], v[selection].tolist(): # setup expectation a[:] = 0 a[selection] = value # test long-form API z[:] = 0 z.set_coordinate_selection(selection, value) assert_array_equal(a, z[:]) # test short-form API z[:] = 0 z.vindex[selection] = value assert_array_equal(a, z[:]) def test_set_coordinate_selection_1d(store: StorePath) -> None: # setup v = np.arange(1050, dtype=int) a = np.empty(v.shape, dtype=v.dtype) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) np.random.seed(42) # test with different degrees of sparseness for p in 2, 0.5, 0.1, 0.01: n = int(a.size * p) ix = np.random.choice(a.shape[0], size=n, replace=True) _test_set_coordinate_selection(v, a, z, ix) # multi-dimensional selection ix = np.array([[2, 4], [6, 8]]) _test_set_coordinate_selection(v, a, z, ix) for selection in coordinate_selections_1d_bad: with pytest.raises(IndexError): z.set_coordinate_selection(selection, 42) # type:ignore[arg-type] with pytest.raises(IndexError): z.vindex[selection] = 42 # type:ignore[index] def test_set_coordinate_selection_2d(store: StorePath) -> None: # setup v = np.arange(10000, dtype=int).reshape(1000, 10) a = np.empty_like(v) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) np.random.seed(42) # test with different degrees of sparseness for p in 2, 0.5, 0.1, 0.01: n = int(a.size * p) ix0 = np.random.choice(a.shape[0], size=n, replace=True) ix1 = np.random.choice(a.shape[1], size=n, replace=True) selections = ( (42, 4), (-1, -1), # index both axes with array (ix0, ix1), # mixed indexing with array / int (ix0, 4), (42, ix1), ) for selection in selections: _test_set_coordinate_selection(v, a, z, selection) # multi-dimensional selection ix0 = np.array([[1, 2, 3], [4, 5, 6]]) ix1 = np.array([[1, 3, 2], [2, 0, 5]]) _test_set_coordinate_selection(v, a, z, (ix0, ix1)) def _test_get_block_selection( a: npt.NDArray[Any], z: Array, selection: BasicSelection, expected_idx: slice | tuple[slice, ...], ) -> None: expect = a[expected_idx] actual = z.get_block_selection(selection) assert_array_equal(expect, actual) actual = z.blocks[selection] assert_array_equal(expect, actual) block_selections_1d: list[BasicSelection] = [ # test single item 0, 5, # test wraparound -1, -4, # test slice slice(5), slice(None, 3), slice(5, 6), slice(-3, -1), slice(None), # Full slice ] block_selections_1d_array_projection: list[slice] = [ # test single item slice(100), slice(500, 600), # test wraparound slice(1000, None), slice(700, 800), # test slice slice(500), slice(None, 300), slice(500, 600), slice(800, 1000), slice(None), ] block_selections_1d_bad = [ # slice not supported slice(3, 8, 2), # bad stuff 2.3, # "foo", # TODO b"xxx", None, (0, 0), (slice(None), slice(None)), [0, 5, 3], ] def test_get_block_selection_1d(store: StorePath) -> None: # setup a = np.arange(1050, dtype=int) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) for selection, expected_idx in zip( block_selections_1d, block_selections_1d_array_projection, strict=True ): _test_get_block_selection(a, z, selection, expected_idx) bad_selections = block_selections_1d_bad + [ z.metadata.chunk_grid.get_nchunks(z.shape) + 1, # out of bounds -(z.metadata.chunk_grid.get_nchunks(z.shape) + 1), # out of bounds ] for selection_bad in bad_selections: with pytest.raises(IndexError): z.get_block_selection(selection_bad) # type:ignore[arg-type] with pytest.raises(IndexError): z.blocks[selection_bad] # type:ignore[index] block_selections_2d: list[BasicSelection] = [ # test single item (0, 0), (1, 2), # test wraparound (-1, -1), (-3, -2), # test slice (slice(1), slice(2)), (slice(None, 2), slice(-2, -1)), (slice(2, 3), slice(-2, None)), (slice(-3, -1), slice(-3, -2)), (slice(None), slice(None)), # Full slice ] block_selections_2d_array_projection: list[tuple[slice, slice]] = [ # test single item (slice(300), slice(3)), (slice(300, 600), slice(6, 9)), # test wraparound (slice(900, None), slice(9, None)), (slice(300, 600), slice(6, 9)), # test slice (slice(300), slice(6)), (slice(None, 600), slice(6, 9)), (slice(600, 900), slice(6, None)), (slice(300, 900), slice(3, 6)), (slice(None), slice(None)), # Full slice ] def test_get_block_selection_2d(store: StorePath) -> None: # setup a = np.arange(10000, dtype=int).reshape(1000, 10) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) for selection, expected_idx in zip( block_selections_2d, block_selections_2d_array_projection, strict=True ): _test_get_block_selection(a, z, selection, expected_idx) with pytest.raises(IndexError): selection = slice(5, 15), [1, 2, 3] z.get_block_selection(selection) with pytest.raises(IndexError): selection = Ellipsis, [1, 2, 3] z.get_block_selection(selection) with pytest.raises(IndexError): # out of bounds selection = slice(15, 20), slice(None) z.get_block_selection(selection) def _test_set_block_selection( v: npt.NDArray[Any], a: npt.NDArray[Any], z: zarr.Array, selection: BasicSelection, expected_idx: slice, ) -> None: for value in 42, v[expected_idx], v[expected_idx].tolist(): # setup expectation a[:] = 0 a[expected_idx] = value # test long-form API z[:] = 0 z.set_block_selection(selection, value) assert_array_equal(a, z[:]) # test short-form API z[:] = 0 z.blocks[selection] = value assert_array_equal(a, z[:]) def test_set_block_selection_1d(store: StorePath) -> None: # setup v = np.arange(1050, dtype=int) a = np.empty(v.shape, dtype=v.dtype) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) for selection, expected_idx in zip( block_selections_1d, block_selections_1d_array_projection, strict=True ): _test_set_block_selection(v, a, z, selection, expected_idx) for selection_bad in block_selections_1d_bad: with pytest.raises(IndexError): z.set_block_selection(selection_bad, 42) # type:ignore[arg-type] with pytest.raises(IndexError): z.blocks[selection_bad] = 42 # type:ignore[index] def test_set_block_selection_2d(store: StorePath) -> None: # setup v = np.arange(10000, dtype=int).reshape(1000, 10) a = np.empty(v.shape, dtype=v.dtype) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) for selection, expected_idx in zip( block_selections_2d, block_selections_2d_array_projection, strict=True ): _test_set_block_selection(v, a, z, selection, expected_idx) with pytest.raises(IndexError): selection = slice(5, 15), [1, 2, 3] z.set_block_selection(selection, 42) with pytest.raises(IndexError): selection = Ellipsis, [1, 2, 3] z.set_block_selection(selection, 42) with pytest.raises(IndexError): # out of bounds selection = slice(15, 20), slice(None) z.set_block_selection(selection, 42) def _test_get_mask_selection(a: npt.NDArray[Any], z: Array, selection: npt.NDArray) -> None: expect = a[selection] actual = z.get_mask_selection(selection) assert_array_equal(expect, actual) actual = z.vindex[selection] assert_array_equal(expect, actual) actual = z[selection] assert_array_equal(expect, actual) mask_selections_1d_bad = [ # slice not supported slice(5, 15), slice(None), Ellipsis, # bad stuff 2.3, "foo", b"xxx", None, (0, 0), (slice(None), slice(None)), ] # noinspection PyStatementEffect def test_get_mask_selection_1d(store: StorePath) -> None: # setup a = np.arange(1050, dtype=int) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: ix = np.random.binomial(1, p, size=a.shape[0]).astype(bool) _test_get_mask_selection(a, z, ix) # test errors bad_selections = mask_selections_1d_bad + [ np.zeros(50, dtype=bool), # too short np.zeros(2000, dtype=bool), # too long [[True, False], [False, True]], # too many dimensions ] for selection in bad_selections: with pytest.raises(IndexError): z.get_mask_selection(selection) # type: ignore[arg-type] with pytest.raises(IndexError): z.vindex[selection] # type:ignore[index] # noinspection PyStatementEffect def test_get_mask_selection_2d(store: StorePath) -> None: # setup a = np.arange(10000, dtype=int).reshape(1000, 10) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: ix = np.random.binomial(1, p, size=a.size).astype(bool).reshape(a.shape) _test_get_mask_selection(a, z, ix) # test errors with pytest.raises(IndexError): z.vindex[np.zeros((1000, 5), dtype=bool)] # too short with pytest.raises(IndexError): z.vindex[np.zeros((2000, 10), dtype=bool)] # too long with pytest.raises(IndexError): z.vindex[[True, False]] # wrong no. dimensions def _test_set_mask_selection( v: npt.NDArray, a: npt.NDArray, z: Array, selection: npt.NDArray ) -> None: a[:] = 0 z[:] = 0 a[selection] = v[selection] z.set_mask_selection(selection, v[selection]) assert_array_equal(a, z[:]) z[:] = 0 z.vindex[selection] = v[selection] assert_array_equal(a, z[:]) z[:] = 0 z[selection] = v[selection] assert_array_equal(a, z[:]) def test_set_mask_selection_1d(store: StorePath) -> None: # setup v = np.arange(1050, dtype=int) a = np.empty_like(v) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: ix = np.random.binomial(1, p, size=a.shape[0]).astype(bool) _test_set_mask_selection(v, a, z, ix) for selection in mask_selections_1d_bad: with pytest.raises(IndexError): z.set_mask_selection(selection, 42) # type: ignore[arg-type] with pytest.raises(IndexError): z.vindex[selection] = 42 # type: ignore[index] def test_set_mask_selection_2d(store: StorePath) -> None: # setup v = np.arange(10000, dtype=int).reshape(1000, 10) a = np.empty_like(v) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: ix = np.random.binomial(1, p, size=a.size).astype(bool).reshape(a.shape) _test_set_mask_selection(v, a, z, ix) def test_get_selection_out(store: StorePath) -> None: # basic selections a = np.arange(1050) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) selections = [ slice(50, 150), slice(0, 1050), slice(1, 2), ] for selection in selections: expect = a[selection] out = get_ndbuffer_class().from_numpy_array(np.empty(expect.shape)) z.get_basic_selection(selection, out=out) assert_array_equal(expect, out.as_numpy_array()[:]) with pytest.raises(TypeError): z.get_basic_selection(Ellipsis, out=[]) # type: ignore[arg-type] # orthogonal selections a = np.arange(10000, dtype=int).reshape(1000, 10) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool) ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool) selections = [ # index both axes with array (ix0, ix1), # mixed indexing with array / slice (ix0, slice(1, 5)), (slice(250, 350), ix1), # mixed indexing with array / int (ix0, 4), (42, ix1), # mixed int array / bool array (ix0, np.nonzero(ix1)[0]), (np.nonzero(ix0)[0], ix1), ] for selection in selections: expect = oindex(a, selection) out = get_ndbuffer_class().from_numpy_array(np.zeros(expect.shape, dtype=expect.dtype)) z.get_orthogonal_selection(selection, out=out) assert_array_equal(expect, out.as_numpy_array()[:]) # coordinate selections a = np.arange(10000, dtype=int).reshape(1000, 10) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) np.random.seed(42) # test with different degrees of sparseness for p in 0.5, 0.1, 0.01: n = int(a.size * p) ix0 = np.random.choice(a.shape[0], size=n, replace=True) ix1 = np.random.choice(a.shape[1], size=n, replace=True) selections = [ # index both axes with array (ix0, ix1), # mixed indexing with array / int (ix0, 4), (42, ix1), ] for selection in selections: expect = a[selection] out = get_ndbuffer_class().from_numpy_array(np.zeros(expect.shape, dtype=expect.dtype)) z.get_coordinate_selection(selection, out=out) assert_array_equal(expect, out.as_numpy_array()[:]) @pytest.mark.xfail(reason="fields are not supported in v3") def test_get_selections_with_fields(store: StorePath) -> None: a = np.array( [("aaa", 1, 4.2), ("bbb", 2, 8.4), ("ccc", 3, 12.6)], dtype=[("foo", "S3"), ("bar", "i4"), ("baz", "f8")], ) z = zarr_array_from_numpy_array(store, a, chunk_shape=(2,)) fields_fixture: list[str | list[str]] = [ "foo", ["foo"], ["foo", "bar"], ["foo", "baz"], ["bar", "baz"], ["foo", "bar", "baz"], ["bar", "foo"], ["baz", "bar", "foo"], ] for fields in fields_fixture: # total selection expect = a[fields] actual = z.get_basic_selection(Ellipsis, fields=fields) assert_array_equal(expect, actual) # alternative API if isinstance(fields, str): actual = z[fields] assert_array_equal(expect, actual) elif len(fields) == 2: actual = z[fields[0], fields[1]] assert_array_equal(expect, actual) if isinstance(fields, str): actual = z[..., fields] assert_array_equal(expect, actual) elif len(fields) == 2: actual = z[..., fields[0], fields[1]] assert_array_equal(expect, actual) # basic selection with slice expect = a[fields][0:2] actual = z.get_basic_selection(slice(0, 2), fields=fields) assert_array_equal(expect, actual) # alternative API if isinstance(fields, str): actual = z[0:2, fields] assert_array_equal(expect, actual) elif len(fields) == 2: actual = z[0:2, fields[0], fields[1]] assert_array_equal(expect, actual) # basic selection with single item expect = a[fields][1] actual = z.get_basic_selection(1, fields=fields) assert_array_equal(expect, actual) # alternative API if isinstance(fields, str): actual = z[1, fields] assert_array_equal(expect, actual) elif len(fields) == 2: actual = z[1, fields[0], fields[1]] assert_array_equal(expect, actual) # orthogonal selection ix = [0, 2] expect = a[fields][ix] actual = z.get_orthogonal_selection(ix, fields=fields) assert_array_equal(expect, actual) # alternative API if isinstance(fields, str): actual = z.oindex[ix, fields] assert_array_equal(expect, actual) elif len(fields) == 2: actual = z.oindex[ix, fields[0], fields[1]] assert_array_equal(expect, actual) # coordinate selection ix = [0, 2] expect = a[fields][ix] actual = z.get_coordinate_selection(ix, fields=fields) assert_array_equal(expect, actual) # alternative API if isinstance(fields, str): actual = z.vindex[ix, fields] assert_array_equal(expect, actual) elif len(fields) == 2: actual = z.vindex[ix, fields[0], fields[1]] assert_array_equal(expect, actual) # mask selection ix = [True, False, True] expect = a[fields][ix] actual = z.get_mask_selection(ix, fields=fields) assert_array_equal(expect, actual) # alternative API if isinstance(fields, str): actual = z.vindex[ix, fields] assert_array_equal(expect, actual) elif len(fields) == 2: actual = z.vindex[ix, fields[0], fields[1]] assert_array_equal(expect, actual) # missing/bad fields with pytest.raises(IndexError): z.get_basic_selection(Ellipsis, fields=["notafield"]) with pytest.raises(IndexError): z.get_basic_selection(Ellipsis, fields=slice(None)) # type: ignore[arg-type] @pytest.mark.xfail(reason="fields are not supported in v3") def test_set_selections_with_fields(store: StorePath) -> None: v = np.array( [("aaa", 1, 4.2), ("bbb", 2, 8.4), ("ccc", 3, 12.6)], dtype=[("foo", "S3"), ("bar", "i4"), ("baz", "f8")], ) a = np.empty_like(v) z = zarr_array_from_numpy_array(store, v, chunk_shape=(2,)) fields_fixture: list[str | list[str]] = [ "foo", [], ["foo"], ["foo", "bar"], ["foo", "baz"], ["bar", "baz"], ["foo", "bar", "baz"], ["bar", "foo"], ["baz", "bar", "foo"], ] for fields in fields_fixture: # currently multi-field assignment is not supported in numpy, so we won't support # it either if isinstance(fields, list) and len(fields) > 1: with pytest.raises(IndexError): z.set_basic_selection(Ellipsis, v, fields=fields) with pytest.raises(IndexError): z.set_orthogonal_selection([0, 2], v, fields=fields) # type: ignore[arg-type] with pytest.raises(IndexError): z.set_coordinate_selection([0, 2], v, fields=fields) with pytest.raises(IndexError): z.set_mask_selection([True, False, True], v, fields=fields) # type: ignore[arg-type] else: if isinstance(fields, list) and len(fields) == 1: # work around numpy does not support multi-field assignment even if there # is only one field key = fields[0] elif isinstance(fields, list) and len(fields) == 0: # work around numpy ambiguity about what is a field selection key = Ellipsis else: key = fields # setup expectation a[:] = ("", 0, 0) z[:] = ("", 0, 0) assert_array_equal(a, z[:]) a[key] = v[key] # total selection z.set_basic_selection(Ellipsis, v[key], fields=fields) assert_array_equal(a, z[:]) # basic selection with slice a[:] = ("", 0, 0) z[:] = ("", 0, 0) a[key][0:2] = v[key][0:2] z.set_basic_selection(slice(0, 2), v[key][0:2], fields=fields) assert_array_equal(a, z[:]) # orthogonal selection a[:] = ("", 0, 0) z[:] = ("", 0, 0) ix = [0, 2] a[key][ix] = v[key][ix] z.set_orthogonal_selection(ix, v[key][ix], fields=fields) assert_array_equal(a, z[:]) # coordinate selection a[:] = ("", 0, 0) z[:] = ("", 0, 0) ix = [0, 2] a[key][ix] = v[key][ix] z.set_coordinate_selection(ix, v[key][ix], fields=fields) assert_array_equal(a, z[:]) # mask selection a[:] = ("", 0, 0) z[:] = ("", 0, 0) ix = [True, False, True] a[key][ix] = v[key][ix] z.set_mask_selection(ix, v[key][ix], fields=fields) assert_array_equal(a, z[:]) def test_slice_selection_uints() -> None: arr = np.arange(24).reshape((4, 6)) idx = np.uint64(3) slice_sel = make_slice_selection((idx,)) assert arr[tuple(slice_sel)].shape == (1, 6) def test_numpy_int_indexing(store: StorePath) -> None: a = np.arange(1050) z = zarr_array_from_numpy_array(store, a, chunk_shape=(100,)) assert a[42] == z[42] assert a[np.int64(42)] == z[np.int64(42)] @pytest.mark.parametrize( ("shape", "chunks", "ops"), [ # 1D test cases ((1070,), (50,), [("__getitem__", (slice(200, 400),))]), ((1070,), (50,), [("__getitem__", (slice(200, 400, 100),))]), ( (1070,), (50,), [ ("__getitem__", (slice(200, 400),)), ("__setitem__", (slice(200, 400, 100),)), ], ), # 2D test cases ( (40, 50), (5, 8), [ ("__getitem__", (slice(6, 37, 13), (slice(4, 10)))), ("__setitem__", (slice(None), (slice(None)))), ], ), ], ) async def test_accessed_chunks( shape: tuple[int, ...], chunks: tuple[int, ...], ops: list[tuple[str, tuple[slice, ...]]] ) -> None: # Test that only the required chunks are accessed during basic selection operations # shape: array shape # chunks: chunk size # ops: list of tuples with (optype, tuple of slices) # optype = "__getitem__" or "__setitem__", tuple length must match number of dims # Use a counting dict as the backing store so we can track the items access store = await CountingDict.open() z = zarr_array_from_numpy_array(StorePath(store), np.zeros(shape), chunk_shape=chunks) for ii, (optype, slices) in enumerate(ops): # Resolve the slices into the accessed chunks for each dimension chunks_per_dim = [] for N, C, sl in zip(shape, chunks, slices, strict=True): chunk_ind = np.arange(N, dtype=int)[sl] // C chunks_per_dim.append(np.unique(chunk_ind)) # Combine and generate the cartesian product to determine the chunks keys that # will be accessed chunks_accessed = [".".join(map(str, comb)) for comb in itertools.product(*chunks_per_dim)] counts_before = store.counter.copy() # Perform the operation if optype == "__getitem__": z[slices] else: z[slices] = ii # Get the change in counts delta_counts = store.counter - counts_before # Check that the access counts for the operation have increased by one for all # the chunks we expect to be included for ci in chunks_accessed: assert delta_counts.pop((optype, ci)) == 1 # If the chunk was partially written to it will also have been read once. We # don't determine if the chunk was actually partial here, just that the # counts are consistent that this might have happened if optype == "__setitem__": assert ("__getitem__", ci) not in delta_counts or delta_counts.pop( ("__getitem__", ci) ) == 1 # Check that no other chunks were accessed assert len(delta_counts) == 0 @pytest.mark.parametrize( "selection", [ # basic selection [...], [1, ...], [slice(None)], [1, 3], [[1, 2, 3], 9], [np.arange(1000)], [slice(5, 15)], [slice(2, 4), 4], [[1, 3]], # mask selection [np.tile([True, False], (1000, 5))], [np.full((1000, 10), False)], # coordinate selection [[1, 2, 3, 4], [5, 6, 7, 8]], [[100, 200, 300], [4, 5, 6]], ], ) def test_indexing_equals_numpy(store: StorePath, selection: Selection) -> None: a = np.arange(10000, dtype=int).reshape(1000, 10) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) # note: in python 3.10 a[*selection] is not valid unpacking syntax expected = a[*selection,] actual = z[*selection,] assert_array_equal(expected, actual, err_msg=f"selection: {selection}") @pytest.mark.parametrize( "selection", [ [np.tile([True, False], 500), np.tile([True, False], 5)], [np.full(1000, False), np.tile([True, False], 5)], [np.full(1000, True), np.full(10, True)], [np.full(1000, True), [True, False] * 5], ], ) def test_orthogonal_bool_indexing_like_numpy_ix( store: StorePath, selection: list[npt.ArrayLike] ) -> None: a = np.arange(10000, dtype=int).reshape(1000, 10) z = zarr_array_from_numpy_array(store, a, chunk_shape=(300, 3)) expected = a[np.ix_(*selection)] # note: in python 3.10 z[*selection] is not valid unpacking syntax actual = z[*selection,] assert_array_equal(expected, actual, err_msg=f"{selection=}") @pytest.mark.parametrize("ndim", [1, 2, 3]) @pytest.mark.parametrize("origin_0d", [None, (0,), (1,)]) @pytest.mark.parametrize("selection_shape_0d", [None, (2,), (3,)]) def test_iter_grid( ndim: int, origin_0d: tuple[int] | None, selection_shape_0d: tuple[int] | None ) -> None: """ Test that iter_grid works as expected for 1, 2, and 3 dimensions. """ grid_shape = (5,) * ndim if origin_0d is not None: origin_kwarg = origin_0d * ndim origin = origin_kwarg else: origin_kwarg = None origin = (0,) * ndim if selection_shape_0d is not None: selection_shape_kwarg = selection_shape_0d * ndim selection_shape = selection_shape_kwarg else: selection_shape_kwarg = None selection_shape = tuple(gs - o for gs, o in zip(grid_shape, origin, strict=False)) observed = tuple( _iter_grid(grid_shape, origin=origin_kwarg, selection_shape=selection_shape_kwarg) ) # generate a numpy array of indices, and index it coord_array = np.array(list(itertools.product(*[range(s) for s in grid_shape]))).reshape( (*grid_shape, ndim) ) coord_array_indexed = coord_array[ tuple(slice(o, o + s, 1) for o, s in zip(origin, selection_shape, strict=False)) + (range(ndim),) ] expected = tuple(map(tuple, coord_array_indexed.reshape(-1, ndim).tolist())) assert observed == expected def test_iter_grid_invalid() -> None: """ Ensure that a selection_shape that exceeds the grid_shape + origin produces an indexing error. """ with pytest.raises(IndexError): list(_iter_grid((5,), origin=(0,), selection_shape=(10,))) def test_indexing_with_zarr_array(store: StorePath) -> None: # regression test for https://github.com/zarr-developers/zarr-python/issues/2133 a = np.arange(10) za = zarr.array(a, chunks=2, store=store, path="a") ix = [False, True, False, True, False, True, False, True, False, True] ii = [0, 2, 4, 5] zix = zarr.array(ix, chunks=2, store=store, dtype="bool", path="ix") zii = zarr.array(ii, chunks=2, store=store, dtype="i4", path="ii") assert_array_equal(a[ix], za[zix]) assert_array_equal(a[ix], za.oindex[zix]) assert_array_equal(a[ix], za.vindex[zix]) assert_array_equal(a[ii], za[zii]) assert_array_equal(a[ii], za.oindex[zii]) @pytest.mark.parametrize("store", ["local", "memory"], indirect=["store"]) @pytest.mark.parametrize("shape", [(0, 2, 3), (0), (3, 0)]) def test_zero_sized_chunks(store: StorePath, shape: list[int]) -> None: z = zarr.create_array(store=store, shape=shape, chunks=shape, zarr_format=3, dtype="f8") z[...] = 42 assert_array_equal(z[...], np.zeros(shape, dtype="f8")) @pytest.mark.parametrize("store", ["memory"], indirect=["store"]) def test_vectorized_indexing_incompatible_shape(store) -> None: # GH2469 shape = (4, 4) chunks = (2, 2) fill_value = 32767 arr = zarr.create( shape, store=store, chunks=chunks, dtype=np.int16, fill_value=fill_value, codecs=[zarr.codecs.BytesCodec(), zarr.codecs.BloscCodec()], ) with pytest.raises(ValueError, match="Attempting to set"): arr[np.array([1, 2]), np.array([1, 2])] = np.array([[-1, -2], [-3, -4]]) zarr-python-3.0.6/tests/test_info.py000066400000000000000000000101671476711733500175460ustar00rootroot00000000000000import textwrap import numpy as np import pytest from zarr.codecs.bytes import BytesCodec from zarr.core._info import ArrayInfo, GroupInfo, human_readable_size from zarr.core.common import ZarrFormat ZARR_FORMATS = [2, 3] @pytest.mark.parametrize("zarr_format", ZARR_FORMATS) def test_group_info_repr(zarr_format: ZarrFormat) -> None: info = GroupInfo( _name="a", _store_type="MemoryStore", _read_only=False, _zarr_format=zarr_format ) result = repr(info) expected = textwrap.dedent(f"""\ Name : a Type : Group Zarr format : {zarr_format} Read-only : False Store type : MemoryStore""") assert result == expected @pytest.mark.parametrize("zarr_format", ZARR_FORMATS) def test_group_info_complete(zarr_format: ZarrFormat) -> None: info = GroupInfo( _name="a", _store_type="MemoryStore", _zarr_format=zarr_format, _read_only=False, _count_arrays=10, _count_groups=4, _count_members=14, ) result = repr(info) expected = textwrap.dedent(f"""\ Name : a Type : Group Zarr format : {zarr_format} Read-only : False Store type : MemoryStore No. members : 14 No. arrays : 10 No. groups : 4""") assert result == expected @pytest.mark.parametrize("zarr_format", ZARR_FORMATS) def test_array_info(zarr_format: ZarrFormat) -> None: info = ArrayInfo( _zarr_format=zarr_format, _data_type=np.dtype("int32"), _shape=(100, 100), _chunk_shape=(10, 100), _order="C", _read_only=True, _store_type="MemoryStore", _serializer=BytesCodec(), ) result = repr(info) assert result == textwrap.dedent(f"""\ Type : Array Zarr format : {zarr_format} Data type : int32 Shape : (100, 100) Chunk shape : (10, 100) Order : C Read-only : True Store type : MemoryStore Filters : () Serializer : BytesCodec(endian=) Compressors : ()""") @pytest.mark.parametrize("zarr_format", ZARR_FORMATS) @pytest.mark.parametrize("bytes_things", [(1_000_000, "976.6K", 500_000, "500000", "2.0", 5)]) def test_array_info_complete( zarr_format: ZarrFormat, bytes_things: tuple[int, str, int, str, str, int] ) -> None: ( count_bytes, count_bytes_formatted, count_bytes_stored, count_bytes_stored_formatted, storage_ratio_formatted, count_chunks_initialized, ) = bytes_things info = ArrayInfo( _zarr_format=zarr_format, _data_type=np.dtype("int32"), _shape=(100, 100), _chunk_shape=(10, 100), _order="C", _read_only=True, _store_type="MemoryStore", _serializer=BytesCodec(), _count_bytes=count_bytes, _count_bytes_stored=count_bytes_stored, _count_chunks_initialized=count_chunks_initialized, ) result = repr(info) assert result == textwrap.dedent(f"""\ Type : Array Zarr format : {zarr_format} Data type : int32 Shape : (100, 100) Chunk shape : (10, 100) Order : C Read-only : True Store type : MemoryStore Filters : () Serializer : BytesCodec(endian=) Compressors : () No. bytes : {count_bytes} ({count_bytes_formatted}) No. bytes stored : {count_bytes_stored_formatted} Storage ratio : {storage_ratio_formatted} Chunks Initialized : 5""") @pytest.mark.parametrize( ("size", "expected"), [ (1, "1"), (2**10, "1.0K"), (2**20, "1.0M"), (2**30, "1.0G"), (2**40, "1.0T"), (2**50, "1.0P"), ], ) def test_human_readable_size(size: int, expected: str) -> None: result = human_readable_size(size) assert result == expected zarr-python-3.0.6/tests/test_metadata/000077500000000000000000000000001476711733500200145ustar00rootroot00000000000000zarr-python-3.0.6/tests/test_metadata/__init__.py000066400000000000000000000000001476711733500221130ustar00rootroot00000000000000zarr-python-3.0.6/tests/test_metadata/test_consolidated.py000066400000000000000000000562721476711733500241110ustar00rootroot00000000000000from __future__ import annotations import json from typing import TYPE_CHECKING import numpy as np import pytest from numcodecs import Blosc import zarr.api.asynchronous import zarr.api.synchronous import zarr.storage from zarr.api.asynchronous import ( AsyncGroup, consolidate_metadata, group, open, open_consolidated, ) from zarr.core.buffer import cpu, default_buffer_prototype from zarr.core.group import ConsolidatedMetadata, GroupMetadata from zarr.core.metadata import ArrayV3Metadata from zarr.core.metadata.v2 import ArrayV2Metadata from zarr.storage import StorePath if TYPE_CHECKING: from zarr.abc.store import Store from zarr.core.common import ZarrFormat @pytest.fixture async def memory_store_with_hierarchy(memory_store: Store) -> None: g = await group(store=memory_store, attributes={"foo": "bar"}) dtype = "uint8" await g.create_array(name="air", shape=(1, 2, 3), dtype=dtype) await g.create_array(name="lat", shape=(1,), dtype=dtype) await g.create_array(name="lon", shape=(2,), dtype=dtype) await g.create_array(name="time", shape=(3,), dtype=dtype) child = await g.create_group("child", attributes={"key": "child"}) await child.create_array("array", shape=(4, 4), attributes={"key": "child"}, dtype=dtype) grandchild = await child.create_group("grandchild", attributes={"key": "grandchild"}) await grandchild.create_array( "array", shape=(4, 4), attributes={"key": "grandchild"}, dtype=dtype ) await grandchild.create_group("empty_group", attributes={"key": "empty"}) return memory_store class TestConsolidated: async def test_open_consolidated_false_raises(self): store = zarr.storage.MemoryStore() with pytest.raises(TypeError, match="use_consolidated"): await zarr.api.asynchronous.open_consolidated(store, use_consolidated=False) def test_open_consolidated_false_raises_sync(self): store = zarr.storage.MemoryStore() with pytest.raises(TypeError, match="use_consolidated"): zarr.open_consolidated(store, use_consolidated=False) async def test_consolidated(self, memory_store_with_hierarchy: Store) -> None: # TODO: Figure out desired keys in # TODO: variety in the hierarchies # More nesting # arrays under arrays # single array # etc. await consolidate_metadata(memory_store_with_hierarchy) group2 = await AsyncGroup.open(memory_store_with_hierarchy) array_metadata = { "attributes": {}, "chunk_key_encoding": { "configuration": {"separator": "/"}, "name": "default", }, "codecs": ( {"configuration": {"endian": "little"}, "name": "bytes"}, {"configuration": {"level": 0, "checksum": False}, "name": "zstd"}, ), "data_type": "uint8", "fill_value": 0, "node_type": "array", # "shape": (1, 2, 3), "zarr_format": 3, } expected = GroupMetadata( attributes={"foo": "bar"}, consolidated_metadata=ConsolidatedMetadata( kind="inline", must_understand=False, metadata={ "air": ArrayV3Metadata.from_dict( { "shape": (1, 2, 3), "chunk_grid": { "configuration": {"chunk_shape": (1, 2, 3)}, "name": "regular", }, **array_metadata, } ), "lat": ArrayV3Metadata.from_dict( { "shape": (1,), "chunk_grid": { "configuration": {"chunk_shape": (1,)}, "name": "regular", }, **array_metadata, } ), "lon": ArrayV3Metadata.from_dict( { "shape": (2,), "chunk_grid": { "configuration": {"chunk_shape": (2,)}, "name": "regular", }, **array_metadata, } ), "time": ArrayV3Metadata.from_dict( { "shape": (3,), "chunk_grid": { "configuration": {"chunk_shape": (3,)}, "name": "regular", }, **array_metadata, } ), "child": GroupMetadata( attributes={"key": "child"}, consolidated_metadata=ConsolidatedMetadata( metadata={ "array": ArrayV3Metadata.from_dict( { **array_metadata, "attributes": {"key": "child"}, "shape": (4, 4), "chunk_grid": { "configuration": {"chunk_shape": (4, 4)}, "name": "regular", }, } ), "grandchild": GroupMetadata( attributes={"key": "grandchild"}, consolidated_metadata=ConsolidatedMetadata( metadata={ # known to be empty child group "empty_group": GroupMetadata( consolidated_metadata=ConsolidatedMetadata( metadata={} ), attributes={"key": "empty"}, ), "array": ArrayV3Metadata.from_dict( { **array_metadata, "attributes": {"key": "grandchild"}, "shape": (4, 4), "chunk_grid": { "configuration": {"chunk_shape": (4, 4)}, "name": "regular", }, } ), } ), ), }, ), ), }, ), ) assert group2.metadata == expected group3 = await open(store=memory_store_with_hierarchy) assert group3.metadata == expected group4 = await open_consolidated(store=memory_store_with_hierarchy) assert group4.metadata == expected result_raw = json.loads( ( await memory_store_with_hierarchy.get( "zarr.json", prototype=default_buffer_prototype() ) ).to_bytes() )["consolidated_metadata"] assert result_raw["kind"] == "inline" assert sorted(result_raw["metadata"]) == [ "air", "child", "child/array", "child/grandchild", "child/grandchild/array", "child/grandchild/empty_group", "lat", "lon", "time", ] def test_consolidated_sync(self, memory_store): g = zarr.api.synchronous.group(store=memory_store, attributes={"foo": "bar"}) dtype = "uint8" g.create_array(name="air", shape=(1, 2, 3), dtype=dtype) g.create_array(name="lat", shape=(1,), dtype=dtype) g.create_array(name="lon", shape=(2,), dtype=dtype) g.create_array(name="time", shape=(3,), dtype=dtype) zarr.api.synchronous.consolidate_metadata(memory_store) group2 = zarr.api.synchronous.Group.open(memory_store) array_metadata = { "attributes": {}, "chunk_key_encoding": { "configuration": {"separator": "/"}, "name": "default", }, "codecs": ( {"configuration": {"endian": "little"}, "name": "bytes"}, {"configuration": {"level": 0, "checksum": False}, "name": "zstd"}, ), "data_type": dtype, "fill_value": 0, "node_type": "array", # "shape": (1, 2, 3), "zarr_format": 3, } expected = GroupMetadata( attributes={"foo": "bar"}, consolidated_metadata=ConsolidatedMetadata( kind="inline", must_understand=False, metadata={ "air": ArrayV3Metadata.from_dict( { "shape": (1, 2, 3), "chunk_grid": { "configuration": {"chunk_shape": (1, 2, 3)}, "name": "regular", }, **array_metadata, } ), "lat": ArrayV3Metadata.from_dict( { "shape": (1,), "chunk_grid": { "configuration": {"chunk_shape": (1,)}, "name": "regular", }, **array_metadata, } ), "lon": ArrayV3Metadata.from_dict( { "shape": (2,), "chunk_grid": { "configuration": {"chunk_shape": (2,)}, "name": "regular", }, **array_metadata, } ), "time": ArrayV3Metadata.from_dict( { "shape": (3,), "chunk_grid": { "configuration": {"chunk_shape": (3,)}, "name": "regular", }, **array_metadata, } ), }, ), ) assert group2.metadata == expected group3 = zarr.api.synchronous.open(store=memory_store) assert group3.metadata == expected group4 = zarr.api.synchronous.open_consolidated(store=memory_store) assert group4.metadata == expected async def test_not_writable_raises(self, memory_store: zarr.storage.MemoryStore) -> None: await group(store=memory_store, attributes={"foo": "bar"}) read_store = zarr.storage.MemoryStore(store_dict=memory_store._store_dict, read_only=True) with pytest.raises(ValueError, match="does not support writing"): await consolidate_metadata(read_store) async def test_non_root_node(self, memory_store_with_hierarchy: Store) -> None: await consolidate_metadata(memory_store_with_hierarchy, path="child") root = await AsyncGroup.open(memory_store_with_hierarchy) child = await AsyncGroup.open(StorePath(memory_store_with_hierarchy) / "child") assert root.metadata.consolidated_metadata is None assert child.metadata.consolidated_metadata is not None assert "air" not in child.metadata.consolidated_metadata.metadata assert "grandchild" in child.metadata.consolidated_metadata.metadata def test_consolidated_metadata_from_dict(self): data = {"must_understand": False} # missing kind with pytest.raises(ValueError, match="kind='None'"): ConsolidatedMetadata.from_dict(data) # invalid kind data["kind"] = "invalid" with pytest.raises(ValueError, match="kind='invalid'"): ConsolidatedMetadata.from_dict(data) # missing metadata data["kind"] = "inline" with pytest.raises(TypeError, match="Unexpected type for 'metadata'"): ConsolidatedMetadata.from_dict(data) data["kind"] = "inline" # empty is fine data["metadata"] = {} ConsolidatedMetadata.from_dict(data) def test_flatten(self): array_metadata = { "attributes": {}, "chunk_key_encoding": { "configuration": {"separator": "/"}, "name": "default", }, "codecs": ({"configuration": {"endian": "little"}, "name": "bytes"},), "data_type": "float64", "fill_value": np.float64(0.0), "node_type": "array", # "shape": (1, 2, 3), "zarr_format": 3, } metadata = ConsolidatedMetadata( kind="inline", must_understand=False, metadata={ "air": ArrayV3Metadata.from_dict( { "shape": (1, 2, 3), "chunk_grid": { "configuration": {"chunk_shape": (1, 2, 3)}, "name": "regular", }, **array_metadata, } ), "lat": ArrayV3Metadata.from_dict( { "shape": (1,), "chunk_grid": { "configuration": {"chunk_shape": (1,)}, "name": "regular", }, **array_metadata, } ), "child": GroupMetadata( attributes={"key": "child"}, consolidated_metadata=ConsolidatedMetadata( metadata={ "array": ArrayV3Metadata.from_dict( { **array_metadata, "attributes": {"key": "child"}, "shape": (4, 4), "chunk_grid": { "configuration": {"chunk_shape": (4, 4)}, "name": "regular", }, } ), "grandchild": GroupMetadata( attributes={"key": "grandchild"}, consolidated_metadata=ConsolidatedMetadata( metadata={ "array": ArrayV3Metadata.from_dict( { **array_metadata, "attributes": {"key": "grandchild"}, "shape": (4, 4), "chunk_grid": { "configuration": {"chunk_shape": (4, 4)}, "name": "regular", }, } ) } ), ), }, ), ), }, ) result = metadata.flattened_metadata expected = { "air": metadata.metadata["air"], "lat": metadata.metadata["lat"], "child": GroupMetadata( attributes={"key": "child"}, consolidated_metadata=ConsolidatedMetadata(metadata={}) ), "child/array": metadata.metadata["child"].consolidated_metadata.metadata["array"], "child/grandchild": GroupMetadata( attributes={"key": "grandchild"}, consolidated_metadata=ConsolidatedMetadata(metadata={}), ), "child/grandchild/array": ( metadata.metadata["child"] .consolidated_metadata.metadata["grandchild"] .consolidated_metadata.metadata["array"] ), } assert result == expected def test_invalid_metadata_raises(self): payload = { "kind": "inline", "must_understand": False, "metadata": { "foo": [1, 2, 3] # invalid }, } with pytest.raises(TypeError, match="key='foo', type='list'"): ConsolidatedMetadata.from_dict(payload) def test_to_dict_empty(self): meta = ConsolidatedMetadata( metadata={ "empty": GroupMetadata( attributes={"key": "empty"}, consolidated_metadata=ConsolidatedMetadata(metadata={}), ) } ) result = meta.to_dict() expected = { "kind": "inline", "must_understand": False, "metadata": { "empty": { "attributes": {"key": "empty"}, "consolidated_metadata": { "kind": "inline", "must_understand": False, "metadata": {}, }, "node_type": "group", "zarr_format": 3, } }, } assert result == expected @pytest.mark.parametrize("zarr_format", [2, 3]) async def test_open_consolidated_raises_async(self, zarr_format: ZarrFormat): store = zarr.storage.MemoryStore() await AsyncGroup.from_store(store, zarr_format=zarr_format) with pytest.raises(ValueError): await zarr.api.asynchronous.open_consolidated(store, zarr_format=zarr_format) with pytest.raises(ValueError): await zarr.api.asynchronous.open_consolidated(store, zarr_format=None) @pytest.fixture async def v2_consolidated_metadata_empty_dataset( self, memory_store: zarr.storage.MemoryStore ) -> AsyncGroup: zgroup_bytes = cpu.Buffer.from_bytes(json.dumps({"zarr_format": 2}).encode()) zmetadata_bytes = cpu.Buffer.from_bytes( b'{"metadata":{".zgroup":{"zarr_format":2}},"zarr_consolidated_format":1}' ) return AsyncGroup._from_bytes_v2( None, zgroup_bytes, zattrs_bytes=None, consolidated_metadata_bytes=zmetadata_bytes ) async def test_consolidated_metadata_backwards_compatibility( self, v2_consolidated_metadata_empty_dataset ): """ Test that consolidated metadata handles a missing .zattrs key. This is necessary for backwards compatibility with zarr-python 2.x. See https://github.com/zarr-developers/zarr-python/issues/2694 """ store = zarr.storage.MemoryStore() await zarr.api.asynchronous.open(store=store, zarr_format=2) await zarr.api.asynchronous.consolidate_metadata(store) result = await zarr.api.asynchronous.open_consolidated(store, zarr_format=2) assert result.metadata == v2_consolidated_metadata_empty_dataset.metadata async def test_consolidated_metadata_v2(self): store = zarr.storage.MemoryStore() g = await AsyncGroup.from_store(store, attributes={"key": "root"}, zarr_format=2) dtype = "uint8" await g.create_array(name="a", shape=(1,), attributes={"key": "a"}, dtype=dtype) g1 = await g.create_group(name="g1", attributes={"key": "g1"}) await g1.create_group(name="g2", attributes={"key": "g2"}) await zarr.api.asynchronous.consolidate_metadata(store) result = await zarr.api.asynchronous.open_consolidated(store, zarr_format=2) expected = GroupMetadata( attributes={"key": "root"}, zarr_format=2, consolidated_metadata=ConsolidatedMetadata( metadata={ "a": ArrayV2Metadata( shape=(1,), dtype=dtype, attributes={"key": "a"}, chunks=(1,), fill_value=0, compressor=Blosc(), order="C", ), "g1": GroupMetadata( attributes={"key": "g1"}, zarr_format=2, consolidated_metadata=ConsolidatedMetadata( metadata={ "g2": GroupMetadata( attributes={"key": "g2"}, zarr_format=2, consolidated_metadata=ConsolidatedMetadata(metadata={}), ) } ), ), } ), ) assert result.metadata == expected @pytest.mark.parametrize("zarr_format", [2, 3]) async def test_use_consolidated_false( self, memory_store: zarr.storage.MemoryStore, zarr_format: ZarrFormat ) -> None: with zarr.config.set(default_zarr_format=zarr_format): g = await group(store=memory_store, attributes={"foo": "bar"}) await g.create_group(name="a") # test a stale read await zarr.api.asynchronous.consolidate_metadata(memory_store) await g.create_group(name="b") stale = await zarr.api.asynchronous.open_group(store=memory_store) assert len([x async for x in stale.members()]) == 1 assert stale.metadata.consolidated_metadata assert list(stale.metadata.consolidated_metadata.metadata) == ["a"] # bypass stale data good = await zarr.api.asynchronous.open_group( store=memory_store, use_consolidated=False ) assert len([x async for x in good.members()]) == 2 # reconsolidate await zarr.api.asynchronous.consolidate_metadata(memory_store) good = await zarr.api.asynchronous.open_group(store=memory_store) assert len([x async for x in good.members()]) == 2 assert good.metadata.consolidated_metadata assert sorted(good.metadata.consolidated_metadata.metadata) == ["a", "b"] zarr-python-3.0.6/tests/test_metadata/test_v2.py000066400000000000000000000247661476711733500217730ustar00rootroot00000000000000from __future__ import annotations import json from typing import TYPE_CHECKING, Literal import numpy as np import pytest import zarr.api.asynchronous import zarr.storage from zarr.core.buffer import cpu from zarr.core.buffer.core import default_buffer_prototype from zarr.core.group import ConsolidatedMetadata, GroupMetadata from zarr.core.metadata import ArrayV2Metadata from zarr.core.metadata.v2 import parse_zarr_format if TYPE_CHECKING: from typing import Any from zarr.abc.codec import Codec import numcodecs def test_parse_zarr_format_valid() -> None: assert parse_zarr_format(2) == 2 @pytest.mark.parametrize("data", [None, 1, 3, 4, 5, "3"]) def test_parse_zarr_format_invalid(data: Any) -> None: with pytest.raises(ValueError, match=f"Invalid value. Expected 2. Got {data}"): parse_zarr_format(data) @pytest.mark.parametrize("attributes", [None, {"foo": "bar"}]) @pytest.mark.parametrize("filters", [None, (numcodecs.GZip(),)]) @pytest.mark.parametrize("compressor", [None, numcodecs.GZip()]) @pytest.mark.parametrize("fill_value", [None, 0, 1]) @pytest.mark.parametrize("order", ["C", "F"]) @pytest.mark.parametrize("dimension_separator", [".", "/", None]) def test_metadata_to_dict( compressor: Codec | None, filters: tuple[Codec] | None, fill_value: Any, order: Literal["C", "F"], dimension_separator: Literal[".", "/"] | None, attributes: dict[str, Any] | None, ) -> None: shape = (1, 2, 3) chunks = (1,) * len(shape) data_type = "|u1" metadata_dict = { "zarr_format": 2, "shape": shape, "chunks": chunks, "dtype": data_type, "order": order, "compressor": compressor, "filters": filters, "fill_value": fill_value, } if attributes is not None: metadata_dict["attributes"] = attributes if dimension_separator is not None: metadata_dict["dimension_separator"] = dimension_separator metadata = ArrayV2Metadata.from_dict(metadata_dict) observed = metadata.to_dict() expected = metadata_dict.copy() if attributes is None: assert observed["attributes"] == {} observed.pop("attributes") if dimension_separator is None: expected_dimension_sep = "." assert observed["dimension_separator"] == expected_dimension_sep observed.pop("dimension_separator") assert observed == expected def test_filters_empty_tuple_warns() -> None: metadata_dict = { "zarr_format": 2, "shape": (1,), "chunks": (1,), "dtype": "uint8", "order": "C", "compressor": None, "filters": (), "fill_value": 0, } with pytest.warns( UserWarning, match="Found an empty list of filters in the array metadata document." ): meta = ArrayV2Metadata.from_dict(metadata_dict) assert meta.filters is None class TestConsolidated: @pytest.fixture async def v2_consolidated_metadata( self, memory_store: zarr.storage.MemoryStore ) -> zarr.storage.MemoryStore: zmetadata = { "metadata": { ".zattrs": { "Conventions": "COARDS", }, ".zgroup": {"zarr_format": 2}, "air/.zarray": { "chunks": [730], "compressor": None, "dtype": " None: data = { "_nczarr_array": {"dimrefs": ["/dim1", "/dim2"], "storage": "chunked"}, "attributes": {"key": "value"}, "chunks": [8], "compressor": None, "dtype": " None: arr = zarr.create_array( {}, shape=(10,), chunks=(10,), dtype="int32", compressors={"id": "zstd", "level": 5, "checksum": False}, zarr_format=2, ) metadata = json.loads( arr.metadata.to_buffer_dict(default_buffer_prototype())[".zarray"].to_bytes() ) assert "checksum" not in metadata["compressor"] zarr-python-3.0.6/tests/test_metadata/test_v3.py000066400000000000000000000337471476711733500217730ustar00rootroot00000000000000from __future__ import annotations import json import re from typing import TYPE_CHECKING, Literal import numpy as np import pytest from zarr.codecs.bytes import BytesCodec from zarr.core.buffer import default_buffer_prototype from zarr.core.chunk_key_encodings import DefaultChunkKeyEncoding, V2ChunkKeyEncoding from zarr.core.config import config from zarr.core.group import GroupMetadata, parse_node_type from zarr.core.metadata.v3 import ( ArrayV3Metadata, DataType, default_fill_value, parse_dimension_names, parse_fill_value, parse_zarr_format, ) from zarr.errors import MetadataValidationError if TYPE_CHECKING: from collections.abc import Sequence from typing import Any from zarr.abc.codec import Codec from zarr.core.common import JSON from zarr.core.metadata.v3 import ( parse_node_type_array, ) bool_dtypes = ("bool",) int_dtypes = ( "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", ) float_dtypes = ( "float16", "float32", "float64", ) complex_dtypes = ("complex64", "complex128") vlen_dtypes = ("string", "bytes") dtypes = (*bool_dtypes, *int_dtypes, *float_dtypes, *complex_dtypes, *vlen_dtypes) @pytest.mark.parametrize("data", [None, 1, 2, 4, 5, "3"]) def test_parse_zarr_format_invalid(data: Any) -> None: with pytest.raises( ValueError, match=f"Invalid value for 'zarr_format'. Expected '3'. Got '{data}'." ): parse_zarr_format(data) def test_parse_zarr_format_valid() -> None: assert parse_zarr_format(3) == 3 def test_parse_node_type_valid() -> None: assert parse_node_type("array") == "array" assert parse_node_type("group") == "group" @pytest.mark.parametrize("node_type", [None, 2, "other"]) def test_parse_node_type_invalid(node_type: Any) -> None: with pytest.raises( MetadataValidationError, match=f"Invalid value for 'node_type'. Expected 'array or group'. Got '{node_type}'.", ): parse_node_type(node_type) @pytest.mark.parametrize("data", [None, "group"]) def test_parse_node_type_array_invalid(data: Any) -> None: with pytest.raises( ValueError, match=f"Invalid value for 'node_type'. Expected 'array'. Got '{data}'." ): parse_node_type_array(data) def test_parse_node_typev_array_alid() -> None: assert parse_node_type_array("array") == "array" @pytest.mark.parametrize("data", [(), [1, 2, "a"], {"foo": 10}]) def parse_dimension_names_invalid(data: Any) -> None: with pytest.raises(TypeError, match="Expected either None or iterable of str,"): parse_dimension_names(data) @pytest.mark.parametrize("data", [None, ("a", "b", "c"), ["a", "a", "a"]]) def parse_dimension_names_valid(data: Sequence[str] | None) -> None: assert parse_dimension_names(data) == data @pytest.mark.parametrize("dtype_str", dtypes) def test_default_fill_value(dtype_str: str) -> None: """ Test that parse_fill_value(None, dtype) results in the 0 value for the given dtype. """ dtype = DataType(dtype_str) fill_value = default_fill_value(dtype) if dtype == DataType.string: assert fill_value == "" elif dtype == DataType.bytes: assert fill_value == b"" else: assert fill_value == dtype.to_numpy().type(0) @pytest.mark.parametrize( ("fill_value", "dtype_str"), [ (True, "bool"), (False, "bool"), (-8, "int8"), (0, "int16"), (1e10, "uint64"), (-999, "float32"), (1e32, "float64"), (float("NaN"), "float64"), (np.nan, "float64"), (np.inf, "float64"), (-1 * np.inf, "float64"), (0j, "complex64"), ], ) def test_parse_fill_value_valid(fill_value: Any, dtype_str: str) -> None: """ Test that parse_fill_value(fill_value, dtype) casts fill_value to the given dtype. """ parsed = parse_fill_value(fill_value, dtype_str) if np.isnan(fill_value): assert np.isnan(parsed) else: assert parsed == DataType(dtype_str).to_numpy().type(fill_value) @pytest.mark.parametrize("fill_value", ["not a valid value"]) @pytest.mark.parametrize("dtype_str", [*int_dtypes, *float_dtypes, *complex_dtypes]) def test_parse_fill_value_invalid_value(fill_value: Any, dtype_str: str) -> None: """ Test that parse_fill_value(fill_value, dtype) raises ValueError for invalid values. This test excludes bool because the bool constructor takes anything. """ with pytest.raises(ValueError): parse_fill_value(fill_value, dtype_str) @pytest.mark.parametrize("fill_value", [[1.0, 0.0], [0, 1], complex(1, 1), np.complex64(0)]) @pytest.mark.parametrize("dtype_str", [*complex_dtypes]) def test_parse_fill_value_complex(fill_value: Any, dtype_str: str) -> None: """ Test that parse_fill_value(fill_value, dtype) correctly handles complex values represented as length-2 sequences """ dtype = DataType(dtype_str) if isinstance(fill_value, list): expected = dtype.to_numpy().type(complex(*fill_value)) else: expected = dtype.to_numpy().type(fill_value) assert expected == parse_fill_value(fill_value, dtype_str) @pytest.mark.parametrize("fill_value", [[1.0, 0.0, 3.0], [0, 1, 3], [1]]) @pytest.mark.parametrize("dtype_str", [*complex_dtypes]) def test_parse_fill_value_complex_invalid(fill_value: Any, dtype_str: str) -> None: """ Test that parse_fill_value(fill_value, dtype) correctly rejects sequences with length not equal to 2 """ match = ( f"Got an invalid fill value for complex data type {dtype_str}." f"Expected a sequence with 2 elements, but {fill_value} has " f"length {len(fill_value)}." ) with pytest.raises(ValueError, match=re.escape(match)): parse_fill_value(fill_value=fill_value, dtype=dtype_str) @pytest.mark.parametrize("fill_value", [{"foo": 10}]) @pytest.mark.parametrize("dtype_str", [*int_dtypes, *float_dtypes, *complex_dtypes]) def test_parse_fill_value_invalid_type(fill_value: Any, dtype_str: str) -> None: """ Test that parse_fill_value(fill_value, dtype) raises TypeError for invalid non-sequential types. This test excludes bool because the bool constructor takes anything. """ with pytest.raises(ValueError, match=r"fill value .* is not valid for dtype .*"): parse_fill_value(fill_value, dtype_str) @pytest.mark.parametrize( "fill_value", [ [ 1, ], (1, 23, 4), ], ) @pytest.mark.parametrize("dtype_str", [*int_dtypes, *float_dtypes]) def test_parse_fill_value_invalid_type_sequence(fill_value: Any, dtype_str: str) -> None: """ Test that parse_fill_value(fill_value, dtype) raises TypeError for invalid sequential types. This test excludes bool because the bool constructor takes anything, and complex because complex values can be created from length-2 sequences. """ match = f"Cannot parse non-string sequence {fill_value} as a scalar with type {dtype_str}" with pytest.raises(TypeError, match=re.escape(match)): parse_fill_value(fill_value, dtype_str) @pytest.mark.parametrize("chunk_grid", ["regular"]) @pytest.mark.parametrize("attributes", [None, {"foo": "bar"}]) @pytest.mark.parametrize("codecs", [[BytesCodec()]]) @pytest.mark.parametrize("fill_value", [0, 1]) @pytest.mark.parametrize("chunk_key_encoding", ["v2", "default"]) @pytest.mark.parametrize("dimension_separator", [".", "/", None]) @pytest.mark.parametrize("dimension_names", ["nones", "strings", "missing"]) @pytest.mark.parametrize("storage_transformers", [None, ()]) def test_metadata_to_dict( chunk_grid: str, codecs: list[Codec], fill_value: Any, chunk_key_encoding: Literal["v2", "default"], dimension_separator: Literal[".", "/"] | None, dimension_names: Literal["nones", "strings", "missing"], attributes: dict[str, Any] | None, storage_transformers: tuple[dict[str, JSON]] | None, ) -> None: shape = (1, 2, 3) data_type = DataType.uint8 if chunk_grid == "regular": cgrid = {"name": "regular", "configuration": {"chunk_shape": (1, 1, 1)}} cke: dict[str, Any] cke_name_dict = {"name": chunk_key_encoding} if dimension_separator is not None: cke = cke_name_dict | {"configuration": {"separator": dimension_separator}} else: cke = cke_name_dict dnames: tuple[str | None, ...] | None if dimension_names == "strings": dnames = tuple(map(str, range(len(shape)))) elif dimension_names == "missing": dnames = None elif dimension_names == "nones": dnames = (None,) * len(shape) metadata_dict = { "zarr_format": 3, "node_type": "array", "shape": shape, "chunk_grid": cgrid, "data_type": data_type, "chunk_key_encoding": cke, "codecs": tuple(c.to_dict() for c in codecs), "fill_value": fill_value, "storage_transformers": storage_transformers, } if attributes is not None: metadata_dict["attributes"] = attributes if dnames is not None: metadata_dict["dimension_names"] = dnames metadata = ArrayV3Metadata.from_dict(metadata_dict) observed = metadata.to_dict() expected = metadata_dict.copy() # if unset or None or (), storage_transformers gets normalized to () assert observed["storage_transformers"] == () observed.pop("storage_transformers") expected.pop("storage_transformers") if attributes is None: assert observed["attributes"] == {} observed.pop("attributes") if dimension_separator is None: if chunk_key_encoding == "default": expected_cke_dict = DefaultChunkKeyEncoding(separator="/").to_dict() else: expected_cke_dict = V2ChunkKeyEncoding(separator=".").to_dict() assert observed["chunk_key_encoding"] == expected_cke_dict observed.pop("chunk_key_encoding") expected.pop("chunk_key_encoding") assert observed == expected @pytest.mark.parametrize("indent", [2, 4, None]) def test_json_indent(indent: int): with config.set({"json_indent": indent}): m = GroupMetadata() d = m.to_buffer_dict(default_buffer_prototype())["zarr.json"].to_bytes() assert d == json.dumps(json.loads(d), indent=indent).encode() # @pytest.mark.parametrize("fill_value", [-1, 0, 1, 2932897]) # @pytest.mark.parametrize("precision", ["ns", "D"]) # async def test_datetime_metadata(fill_value: int, precision: str) -> None: # metadata_dict = { # "zarr_format": 3, # "node_type": "array", # "shape": (1,), # "chunk_grid": {"name": "regular", "configuration": {"chunk_shape": (1,)}}, # "data_type": f" None: metadata_dict = { "zarr_format": 3, "node_type": "array", "shape": (1,), "chunk_grid": {"name": "regular", "configuration": {"chunk_shape": (1,)}}, "data_type": " None: metadata_dict = { "zarr_format": 3, "node_type": "array", "shape": (1,), "chunk_grid": {"name": "regular", "configuration": {"chunk_shape": (1,)}}, "data_type": data_type, "chunk_key_encoding": {"name": "default", "separator": "."}, "codecs": (), "fill_value": fill_value, # this is not a valid fill value for uint8 } with pytest.raises(ValueError, match=r"fill value .* is not valid for dtype .*"): ArrayV3Metadata.from_dict(metadata_dict) @pytest.mark.parametrize("fill_value", [("NaN"), "Infinity", "-Infinity"]) async def test_special_float_fill_values(fill_value: str) -> None: metadata_dict = { "zarr_format": 3, "node_type": "array", "shape": (1,), "chunk_grid": {"name": "regular", "configuration": {"chunk_shape": (1,)}}, "data_type": "float64", "chunk_key_encoding": {"name": "default", "separator": "."}, "codecs": [{"name": "bytes"}], "fill_value": fill_value, # this is not a valid fill value for uint8 } m = ArrayV3Metadata.from_dict(metadata_dict) d = json.loads(m.to_buffer_dict(default_buffer_prototype())["zarr.json"].to_bytes()) assert m.fill_value is not None if fill_value == "NaN": assert np.isnan(m.fill_value) assert d["fill_value"] == "NaN" elif fill_value == "Infinity": assert np.isposinf(m.fill_value) assert d["fill_value"] == "Infinity" elif fill_value == "-Infinity": assert np.isneginf(m.fill_value) assert d["fill_value"] == "-Infinity" @pytest.mark.parametrize("dtype_str", dtypes) def test_dtypes(dtype_str: str) -> None: dt = DataType(dtype_str) np_dtype = dt.to_numpy() if dtype_str not in vlen_dtypes: # we can round trip "normal" dtypes assert dt == DataType.from_numpy(np_dtype) assert dt.byte_count == np_dtype.itemsize assert dt.has_endianness == (dt.byte_count > 1) else: # return type for vlen types may vary depending on numpy version assert dt.byte_count is None zarr-python-3.0.6/tests/test_properties.py000066400000000000000000000125731476711733500210120ustar00rootroot00000000000000import numpy as np import pytest from numpy.testing import assert_array_equal from zarr.core.buffer import default_buffer_prototype pytest.importorskip("hypothesis") import hypothesis.extra.numpy as npst import hypothesis.strategies as st from hypothesis import assume, given from zarr.abc.store import Store from zarr.core.metadata import ArrayV2Metadata, ArrayV3Metadata from zarr.core.sync import sync from zarr.testing.strategies import ( array_metadata, arrays, basic_indices, numpy_arrays, orthogonal_indices, simple_arrays, stores, zarr_formats, ) @given(data=st.data(), zarr_format=zarr_formats) def test_roundtrip(data: st.DataObject, zarr_format: int) -> None: nparray = data.draw(numpy_arrays(zarr_formats=st.just(zarr_format))) zarray = data.draw(arrays(arrays=st.just(nparray), zarr_formats=st.just(zarr_format))) assert_array_equal(nparray, zarray[:]) @given(array=arrays()) def test_array_creates_implicit_groups(array): path = array.path ancestry = path.split("/")[:-1] for i in range(len(ancestry)): parent = "/".join(ancestry[: i + 1]) if array.metadata.zarr_format == 2: assert ( sync(array.store.get(f"{parent}/.zgroup", prototype=default_buffer_prototype())) is not None ) elif array.metadata.zarr_format == 3: assert ( sync(array.store.get(f"{parent}/zarr.json", prototype=default_buffer_prototype())) is not None ) @given(data=st.data()) def test_basic_indexing(data: st.DataObject) -> None: zarray = data.draw(simple_arrays()) nparray = zarray[:] indexer = data.draw(basic_indices(shape=nparray.shape)) actual = zarray[indexer] assert_array_equal(nparray[indexer], actual) new_data = data.draw(numpy_arrays(shapes=st.just(actual.shape), dtype=nparray.dtype)) zarray[indexer] = new_data nparray[indexer] = new_data assert_array_equal(nparray, zarray[:]) @given(data=st.data()) def test_oindex(data: st.DataObject) -> None: # integer_array_indices can't handle 0-size dimensions. zarray = data.draw(simple_arrays(shapes=npst.array_shapes(max_dims=4, min_side=1))) nparray = zarray[:] zindexer, npindexer = data.draw(orthogonal_indices(shape=nparray.shape)) actual = zarray.oindex[zindexer] assert_array_equal(nparray[npindexer], actual) assume(zarray.shards is None) # GH2834 for idxr in npindexer: if isinstance(idxr, np.ndarray) and idxr.size != np.unique(idxr).size: # behaviour of setitem with repeated indices is not guaranteed in practice assume(False) new_data = data.draw(numpy_arrays(shapes=st.just(actual.shape), dtype=nparray.dtype)) nparray[npindexer] = new_data zarray.oindex[zindexer] = new_data assert_array_equal(nparray, zarray[:]) @given(data=st.data()) def test_vindex(data: st.DataObject) -> None: # integer_array_indices can't handle 0-size dimensions. zarray = data.draw(simple_arrays(shapes=npst.array_shapes(max_dims=4, min_side=1))) nparray = zarray[:] indexer = data.draw( npst.integer_array_indices( shape=nparray.shape, result_shape=npst.array_shapes(min_side=1, max_dims=None) ) ) actual = zarray.vindex[indexer] assert_array_equal(nparray[indexer], actual) # FIXME! # when the indexer is such that a value gets overwritten multiple times, # I think the output depends on chunking. # new_data = data.draw(npst.arrays(shape=st.just(actual.shape), dtype=nparray.dtype)) # nparray[indexer] = new_data # zarray.vindex[indexer] = new_data # assert_array_equal(nparray, zarray[:]) @given(store=stores, meta=array_metadata()) # type: ignore[misc] async def test_roundtrip_array_metadata( store: Store, meta: ArrayV2Metadata | ArrayV3Metadata ) -> None: asdict = meta.to_buffer_dict(prototype=default_buffer_prototype()) for key, expected in asdict.items(): await store.set(f"0/{key}", expected) actual = await store.get(f"0/{key}", prototype=default_buffer_prototype()) assert actual == expected @given(store=stores, meta=array_metadata()) # type: ignore[misc] def test_array_metadata_meets_spec(store: Store, meta: ArrayV2Metadata | ArrayV3Metadata) -> None: # TODO: fill this out asdict = meta.to_dict() if isinstance(meta, ArrayV2Metadata): assert asdict["filters"] != () assert asdict["filters"] is None or isinstance(asdict["filters"], tuple) assert asdict["zarr_format"] == 2 elif isinstance(meta, ArrayV3Metadata): assert asdict["zarr_format"] == 3 else: raise NotImplementedError # @st.composite # def advanced_indices(draw, *, shape): # basic_idxr = draw( # basic_indices( # shape=shape, min_dims=len(shape), max_dims=len(shape), allow_ellipsis=False # ).filter(lambda x: isinstance(x, tuple)) # ) # int_idxr = draw( # npst.integer_array_indices(shape=shape, result_shape=npst.array_shapes(max_dims=1)) # ) # args = tuple( # st.sampled_from((l, r)) for l, r in zip_longest(basic_idxr, int_idxr, fillvalue=slice(None)) # ) # return draw(st.tuples(*args)) # @given(st.data()) # def test_roundtrip_object_array(data): # nparray = data.draw(np_arrays) # zarray = data.draw(arrays(arrays=st.just(nparray))) # assert_array_equal(nparray, zarray[:]) zarr-python-3.0.6/tests/test_store/000077500000000000000000000000001476711733500173705ustar00rootroot00000000000000zarr-python-3.0.6/tests/test_store/__init__.py000066400000000000000000000000001476711733500214670ustar00rootroot00000000000000zarr-python-3.0.6/tests/test_store/test_core.py000066400000000000000000000171021476711733500217320ustar00rootroot00000000000000import tempfile from pathlib import Path import pytest from _pytest.compat import LEGACY_PATH from zarr import Group from zarr.core.common import AccessModeLiteral, ZarrFormat from zarr.storage import FsspecStore, LocalStore, MemoryStore, StoreLike, StorePath from zarr.storage._common import contains_array, contains_group, make_store_path from zarr.storage._utils import _join_paths, _normalize_path_keys, _normalize_paths, normalize_path @pytest.mark.parametrize("path", ["foo", "foo/bar"]) @pytest.mark.parametrize("write_group", [True, False]) @pytest.mark.parametrize("zarr_format", [2, 3]) async def test_contains_group( local_store, path: str, write_group: bool, zarr_format: ZarrFormat ) -> None: """ Test that the contains_group method correctly reports the existence of a group. """ root = Group.from_store(store=local_store, zarr_format=zarr_format) if write_group: root.create_group(path) store_path = StorePath(local_store, path=path) assert await contains_group(store_path, zarr_format=zarr_format) == write_group @pytest.mark.parametrize("path", ["foo", "foo/bar"]) @pytest.mark.parametrize("write_array", [True, False]) @pytest.mark.parametrize("zarr_format", [2, 3]) async def test_contains_array( local_store, path: str, write_array: bool, zarr_format: ZarrFormat ) -> None: """ Test that the contains array method correctly reports the existence of an array. """ root = Group.from_store(store=local_store, zarr_format=zarr_format) if write_array: root.create_array(path, shape=(100,), chunks=(10,), dtype="i4") store_path = StorePath(local_store, path=path) assert await contains_array(store_path, zarr_format=zarr_format) == write_array @pytest.mark.parametrize("func", [contains_array, contains_group]) async def test_contains_invalid_format_raises(local_store, func: callable) -> None: """ Test contains_group and contains_array raise errors for invalid zarr_formats """ store_path = StorePath(local_store) with pytest.raises(ValueError): assert await func(store_path, zarr_format="3.0") @pytest.mark.parametrize("path", [None, "", "bar"]) async def test_make_store_path_none(path: str) -> None: """ Test that creating a store_path with None creates a memorystore """ store_path = await make_store_path(None, path=path) assert isinstance(store_path.store, MemoryStore) assert store_path.path == normalize_path(path) @pytest.mark.parametrize("path", [None, "", "bar"]) @pytest.mark.parametrize("store_type", [str, Path]) @pytest.mark.parametrize("mode", ["r", "w"]) async def test_make_store_path_local( tmpdir: LEGACY_PATH, store_type: type[str] | type[Path] | type[LocalStore], path: str, mode: AccessModeLiteral, ) -> None: """ Test the various ways of invoking make_store_path that create a LocalStore """ store_like = store_type(str(tmpdir)) store_path = await make_store_path(store_like, path=path, mode=mode) assert isinstance(store_path.store, LocalStore) assert Path(store_path.store.root) == Path(tmpdir) assert store_path.path == normalize_path(path) assert store_path.read_only == (mode == "r") @pytest.mark.parametrize("path", [None, "", "bar"]) @pytest.mark.parametrize("mode", ["r", "w"]) async def test_make_store_path_store_path( tmpdir: LEGACY_PATH, path: str, mode: AccessModeLiteral ) -> None: """ Test invoking make_store_path when the input is another store_path. In particular we want to ensure that a new path is handled correctly. """ ro = mode == "r" store_like = await StorePath.open(LocalStore(str(tmpdir), read_only=ro), path="root", mode=mode) store_path = await make_store_path(store_like, path=path, mode=mode) assert isinstance(store_path.store, LocalStore) assert Path(store_path.store.root) == Path(tmpdir) path_normalized = normalize_path(path) assert store_path.path == (store_like / path_normalized).path assert store_path.read_only == ro @pytest.mark.parametrize("modes", [(True, "w"), (False, "x")]) async def test_store_path_invalid_mode_raises(tmpdir: LEGACY_PATH, modes: tuple) -> None: """ Test that ValueErrors are raise for invalid mode. """ with pytest.raises(ValueError): await StorePath.open(LocalStore(str(tmpdir), read_only=modes[0]), path=None, mode=modes[1]) async def test_make_store_path_invalid() -> None: """ Test that invalid types raise TypeError """ with pytest.raises(TypeError): await make_store_path(1) # type: ignore[arg-type] async def test_make_store_path_fsspec(monkeypatch) -> None: pytest.importorskip("fsspec") store_path = await make_store_path("http://foo.com/bar") assert isinstance(store_path.store, FsspecStore) @pytest.mark.parametrize( "store_like", [ None, tempfile.TemporaryDirectory().name, Path(tempfile.TemporaryDirectory().name), StorePath(store=MemoryStore(store_dict={}), path="/"), MemoryStore(store_dict={}), {}, ], ) async def test_make_store_path_storage_options_raises(store_like: StoreLike) -> None: with pytest.raises(TypeError, match="storage_options"): await make_store_path(store_like, storage_options={"foo": "bar"}) async def test_unsupported() -> None: with pytest.raises(TypeError, match="Unsupported type for store_like: 'int'"): await make_store_path(1) # type: ignore[arg-type] @pytest.mark.parametrize( "path", [ "/foo/bar", "//foo/bar", "foo///bar", "foo/bar///", Path("foo/bar"), b"foo/bar", ], ) def test_normalize_path_valid(path: str | bytes | Path) -> None: assert normalize_path(path) == "foo/bar" def test_normalize_path_upath() -> None: upath = pytest.importorskip("upath") assert normalize_path(upath.UPath("foo/bar")) == "foo/bar" def test_normalize_path_none(): assert normalize_path(None) == "" @pytest.mark.parametrize("path", [".", ".."]) def test_normalize_path_invalid(path: str): with pytest.raises(ValueError): normalize_path(path) @pytest.mark.parametrize("paths", [("", "foo"), ("foo", "bar")]) def test_join_paths(paths: tuple[str, str]) -> None: """ Test that _join_paths joins paths in a way that is robust to an empty string """ observed = _join_paths(paths) if paths[0] == "": assert observed == paths[1] else: assert observed == "/".join(paths) class TestNormalizePaths: @staticmethod def test_valid() -> None: """ Test that path normalization works as expected """ paths = ["a", "b", "c", "d", "", "//a///b//"] assert _normalize_paths(paths) == tuple([normalize_path(p) for p in paths]) @staticmethod @pytest.mark.parametrize("paths", [("", "/"), ("///a", "a")]) def test_invalid(paths: tuple[str, str]) -> None: """ Test that name collisions after normalization raise a ``ValueError`` """ msg = ( f"After normalization, the value '{paths[1]}' collides with '{paths[0]}'. " f"Both '{paths[1]}' and '{paths[0]}' normalize to the same value: '{normalize_path(paths[0])}'. " f"You should use either '{paths[1]}' or '{paths[0]}', but not both." ) with pytest.raises(ValueError, match=msg): _normalize_paths(paths) def test_normalize_path_keys(): """ Test that ``_normalize_path_keys`` just applies the normalize_path function to each key of its input """ data = {"a": 10, "//b": 10} assert _normalize_path_keys(data) == {normalize_path(k): v for k, v in data.items()} zarr-python-3.0.6/tests/test_store/test_fsspec.py000066400000000000000000000255711476711733500222760ustar00rootroot00000000000000from __future__ import annotations import json import os from typing import TYPE_CHECKING import pytest from packaging.version import parse as parse_version import zarr.api.asynchronous from zarr.abc.store import OffsetByteRequest from zarr.core.buffer import Buffer, cpu, default_buffer_prototype from zarr.core.sync import _collect_aiterator, sync from zarr.storage import FsspecStore from zarr.testing.store import StoreTests if TYPE_CHECKING: from collections.abc import Generator import botocore.client fsspec = pytest.importorskip("fsspec") s3fs = pytest.importorskip("s3fs") requests = pytest.importorskip("requests") moto_server = pytest.importorskip("moto.moto_server.threaded_moto_server") moto = pytest.importorskip("moto") botocore = pytest.importorskip("botocore") # ### amended from s3fs ### # test_bucket_name = "test" secure_bucket_name = "test-secure" port = 5555 endpoint_url = f"http://127.0.0.1:{port}/" @pytest.fixture(scope="module") def s3_base() -> Generator[None, None, None]: # writable local S3 system # This fixture is module-scoped, meaning that we can reuse the MotoServer across all tests server = moto_server.ThreadedMotoServer(ip_address="127.0.0.1", port=port) server.start() if "AWS_SECRET_ACCESS_KEY" not in os.environ: os.environ["AWS_SECRET_ACCESS_KEY"] = "foo" if "AWS_ACCESS_KEY_ID" not in os.environ: os.environ["AWS_ACCESS_KEY_ID"] = "foo" yield server.stop() def get_boto3_client() -> botocore.client.BaseClient: # NB: we use the sync botocore client for setup session = botocore.session.Session() return session.create_client("s3", endpoint_url=endpoint_url) @pytest.fixture(autouse=True) def s3(s3_base: None) -> Generator[s3fs.S3FileSystem, None, None]: """ Quoting Martin Durant: pytest-asyncio creates a new event loop for each async test. When an async-mode s3fs instance is made from async, it will be assigned to the loop from which it is made. That means that if you use s3fs again from a subsequent test, you will have the same identical instance, but be running on a different loop - which fails. For the rest: it's very convenient to clean up the state of the store between tests, make sure we start off blank each time. https://github.com/zarr-developers/zarr-python/pull/1785#discussion_r1634856207 """ client = get_boto3_client() client.create_bucket(Bucket=test_bucket_name, ACL="public-read") s3fs.S3FileSystem.clear_instance_cache() s3 = s3fs.S3FileSystem(anon=False, client_kwargs={"endpoint_url": endpoint_url}) session = sync(s3.set_session()) s3.invalidate_cache() yield s3 requests.post(f"{endpoint_url}/moto-api/reset") client.close() sync(session.close()) # ### end from s3fs ### # async def test_basic() -> None: store = FsspecStore.from_url( f"s3://{test_bucket_name}/foo/spam/", storage_options={"endpoint_url": endpoint_url, "anon": False}, ) assert store.fs.asynchronous assert store.path == f"{test_bucket_name}/foo/spam" assert await _collect_aiterator(store.list()) == () assert not await store.exists("foo") data = b"hello" await store.set("foo", cpu.Buffer.from_bytes(data)) assert await store.exists("foo") assert (await store.get("foo", prototype=default_buffer_prototype())).to_bytes() == data out = await store.get_partial_values( prototype=default_buffer_prototype(), key_ranges=[("foo", OffsetByteRequest(1))] ) assert out[0].to_bytes() == data[1:] class TestFsspecStoreS3(StoreTests[FsspecStore, cpu.Buffer]): store_cls = FsspecStore buffer_cls = cpu.Buffer @pytest.fixture def store_kwargs(self, request) -> dict[str, str | bool]: try: from fsspec import url_to_fs except ImportError: # before fsspec==2024.3.1 from fsspec.core import url_to_fs fs, path = url_to_fs( f"s3://{test_bucket_name}", endpoint_url=endpoint_url, anon=False, asynchronous=True ) return {"fs": fs, "path": path} @pytest.fixture def store(self, store_kwargs: dict[str, str | bool]) -> FsspecStore: return self.store_cls(**store_kwargs) async def get(self, store: FsspecStore, key: str) -> Buffer: # make a new, synchronous instance of the filesystem because this test is run in sync code new_fs = fsspec.filesystem( "s3", endpoint_url=store.fs.endpoint_url, anon=store.fs.anon, asynchronous=False ) return self.buffer_cls.from_bytes(new_fs.cat(f"{store.path}/{key}")) async def set(self, store: FsspecStore, key: str, value: Buffer) -> None: # make a new, synchronous instance of the filesystem because this test is run in sync code new_fs = fsspec.filesystem( "s3", endpoint_url=store.fs.endpoint_url, anon=store.fs.anon, asynchronous=False ) new_fs.write_bytes(f"{store.path}/{key}", value.to_bytes()) def test_store_repr(self, store: FsspecStore) -> None: assert str(store) == "" def test_store_supports_writes(self, store: FsspecStore) -> None: assert store.supports_writes def test_store_supports_partial_writes(self, store: FsspecStore) -> None: assert not store.supports_partial_writes def test_store_supports_listing(self, store: FsspecStore) -> None: assert store.supports_listing async def test_fsspec_store_from_uri(self, store: FsspecStore) -> None: storage_options = { "endpoint_url": endpoint_url, "anon": False, } meta = {"attributes": {"key": "value"}, "zarr_format": 3, "node_type": "group"} await store.set( "zarr.json", self.buffer_cls.from_bytes(json.dumps(meta).encode()), ) group = await zarr.api.asynchronous.open_group( store=f"s3://{test_bucket_name}", storage_options=storage_options ) assert dict(group.attrs) == {"key": "value"} meta["attributes"]["key"] = "value-2" await store.set( "directory-2/zarr.json", self.buffer_cls.from_bytes(json.dumps(meta).encode()), ) group = await zarr.api.asynchronous.open_group( store=f"s3://{test_bucket_name}/directory-2", storage_options=storage_options ) assert dict(group.attrs) == {"key": "value-2"} meta["attributes"]["key"] = "value-3" await store.set( "directory-3/zarr.json", self.buffer_cls.from_bytes(json.dumps(meta).encode()), ) group = await zarr.api.asynchronous.open_group( store=f"s3://{test_bucket_name}", path="directory-3", storage_options=storage_options ) assert dict(group.attrs) == {"key": "value-3"} @pytest.mark.skipif( parse_version(fsspec.__version__) < parse_version("2024.03.01"), reason="Prior bug in from_upath", ) def test_from_upath(self) -> None: upath = pytest.importorskip("upath") path = upath.UPath( f"s3://{test_bucket_name}/foo/bar/", endpoint_url=endpoint_url, anon=False, asynchronous=True, ) result = FsspecStore.from_upath(path) assert result.fs.endpoint_url == endpoint_url assert result.fs.asynchronous assert result.path == f"{test_bucket_name}/foo/bar" def test_init_raises_if_path_has_scheme(self, store_kwargs) -> None: # regression test for https://github.com/zarr-developers/zarr-python/issues/2342 store_kwargs["path"] = "s3://" + store_kwargs["path"] with pytest.raises( ValueError, match="path argument to FsspecStore must not include scheme .*" ): self.store_cls(**store_kwargs) def test_init_warns_if_fs_asynchronous_is_false(self) -> None: try: from fsspec import url_to_fs except ImportError: # before fsspec==2024.3.1 from fsspec.core import url_to_fs fs, path = url_to_fs( f"s3://{test_bucket_name}", endpoint_url=endpoint_url, anon=False, asynchronous=False ) store_kwargs = {"fs": fs, "path": path} with pytest.warns(UserWarning, match=r".* was not created with `asynchronous=True`.*"): self.store_cls(**store_kwargs) async def test_empty_nonexistent_path(self, store_kwargs) -> None: # regression test for https://github.com/zarr-developers/zarr-python/pull/2343 store_kwargs["path"] += "/abc" store = await self.store_cls.open(**store_kwargs) assert await store.is_empty("") async def test_delete_dir_unsupported_deletes(self, store: FsspecStore) -> None: store.supports_deletes = False with pytest.raises( NotImplementedError, match="This method is only available for stores that support deletes.", ): await store.delete_dir("test_prefix") @pytest.mark.skipif( parse_version(fsspec.__version__) < parse_version("2024.12.0"), reason="No AsyncFileSystemWrapper", ) def test_wrap_sync_filesystem(): """The local fs is not async so we should expect it to be wrapped automatically""" from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper store = FsspecStore.from_url("local://test/path") assert isinstance(store.fs, AsyncFileSystemWrapper) assert store.fs.async_impl @pytest.mark.skipif( parse_version(fsspec.__version__) < parse_version("2024.12.0"), reason="No AsyncFileSystemWrapper", ) def test_no_wrap_async_filesystem(): """An async fs should not be wrapped automatically; fsspec's https filesystem is such an fs""" from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper store = FsspecStore.from_url("https://test/path") assert not isinstance(store.fs, AsyncFileSystemWrapper) assert store.fs.async_impl @pytest.mark.skipif( parse_version(fsspec.__version__) < parse_version("2024.12.0"), reason="No AsyncFileSystemWrapper", ) async def test_delete_dir_wrapped_filesystem(tmpdir) -> None: from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper from fsspec.implementations.local import LocalFileSystem wrapped_fs = AsyncFileSystemWrapper(LocalFileSystem(auto_mkdir=True)) store = FsspecStore(wrapped_fs, read_only=False, path=f"{tmpdir}/test/path") assert isinstance(store.fs, AsyncFileSystemWrapper) assert store.fs.asynchronous await store.set("zarr.json", cpu.Buffer.from_bytes(b"root")) await store.set("foo-bar/zarr.json", cpu.Buffer.from_bytes(b"root")) await store.set("foo/zarr.json", cpu.Buffer.from_bytes(b"bar")) await store.set("foo/c/0", cpu.Buffer.from_bytes(b"chunk")) await store.delete_dir("foo") assert await store.exists("zarr.json") assert await store.exists("foo-bar/zarr.json") assert not await store.exists("foo/zarr.json") assert not await store.exists("foo/c/0") zarr-python-3.0.6/tests/test_store/test_local.py000066400000000000000000000051211476711733500220720ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING import pytest import zarr from zarr.core.buffer import Buffer, cpu from zarr.storage import LocalStore from zarr.testing.store import StoreTests from zarr.testing.utils import assert_bytes_equal if TYPE_CHECKING: import pathlib class TestLocalStore(StoreTests[LocalStore, cpu.Buffer]): store_cls = LocalStore buffer_cls = cpu.Buffer async def get(self, store: LocalStore, key: str) -> Buffer: return self.buffer_cls.from_bytes((store.root / key).read_bytes()) async def set(self, store: LocalStore, key: str, value: Buffer) -> None: parent = (store.root / key).parent if not parent.exists(): parent.mkdir(parents=True) (store.root / key).write_bytes(value.to_bytes()) @pytest.fixture def store_kwargs(self, tmpdir) -> dict[str, str]: return {"root": str(tmpdir)} def test_store_repr(self, store: LocalStore) -> None: assert str(store) == f"file://{store.root.as_posix()}" def test_store_supports_writes(self, store: LocalStore) -> None: assert store.supports_writes def test_store_supports_partial_writes(self, store: LocalStore) -> None: assert store.supports_partial_writes def test_store_supports_listing(self, store: LocalStore) -> None: assert store.supports_listing async def test_empty_with_empty_subdir(self, store: LocalStore) -> None: assert await store.is_empty("") (store.root / "foo/bar").mkdir(parents=True) assert await store.is_empty("") def test_creates_new_directory(self, tmp_path: pathlib.Path): target = tmp_path.joinpath("a", "b", "c") assert not target.exists() store = self.store_cls(root=target) zarr.group(store=store) def test_invalid_root_raises(self): """ Test that a TypeError is raised when a non-str/Path type is used for the `root` argument """ with pytest.raises( TypeError, match=r"'root' must be a string or Path instance. Got an instance of instead.", ): LocalStore(root=0) async def test_get_with_prototype_default(self, store: LocalStore): """ Ensure that data can be read via ``store.get`` if the prototype keyword argument is unspecified, i.e. set to ``None``. """ data_buf = self.buffer_cls.from_bytes(b"\x01\x02\x03\x04") key = "c/0" await self.set(store, key, data_buf) observed = await store.get(key, prototype=None) assert_bytes_equal(observed, data_buf) zarr-python-3.0.6/tests/test_store/test_logging.py000066400000000000000000000114461476711733500224350ustar00rootroot00000000000000from __future__ import annotations import logging from typing import TYPE_CHECKING import pytest import zarr from zarr.core.buffer import Buffer, cpu, default_buffer_prototype from zarr.storage import LocalStore, LoggingStore from zarr.testing.store import StoreTests if TYPE_CHECKING: from _pytest.compat import LEGACY_PATH from zarr.abc.store import Store class TestLoggingStore(StoreTests[LoggingStore, cpu.Buffer]): store_cls = LoggingStore buffer_cls = cpu.Buffer async def get(self, store: LoggingStore, key: str) -> Buffer: return self.buffer_cls.from_bytes((store._store.root / key).read_bytes()) async def set(self, store: LoggingStore, key: str, value: Buffer) -> None: parent = (store._store.root / key).parent if not parent.exists(): parent.mkdir(parents=True) (store._store.root / key).write_bytes(value.to_bytes()) @pytest.fixture def store_kwargs(self, tmpdir: LEGACY_PATH) -> dict[str, str]: return {"store": LocalStore(str(tmpdir)), "log_level": "DEBUG"} @pytest.fixture def open_kwargs(self, tmpdir) -> dict[str, str]: return {"store_cls": LocalStore, "root": str(tmpdir), "log_level": "DEBUG"} @pytest.fixture def store(self, store_kwargs: str | dict[str, Buffer] | None) -> LoggingStore: return self.store_cls(**store_kwargs) def test_store_supports_writes(self, store: LoggingStore) -> None: assert store.supports_writes def test_store_supports_partial_writes(self, store: LoggingStore) -> None: assert store.supports_partial_writes def test_store_supports_listing(self, store: LoggingStore) -> None: assert store.supports_listing def test_store_repr(self, store: LoggingStore) -> None: assert f"{store!r}" == f"LoggingStore(LocalStore, 'file://{store._store.root.as_posix()}')" def test_store_str(self, store: LoggingStore) -> None: assert str(store) == f"logging-file://{store._store.root.as_posix()}" async def test_default_handler(self, local_store, capsys) -> None: # Store and then remove existing handlers to enter default handler code path handlers = logging.getLogger().handlers[:] for h in handlers: logging.getLogger().removeHandler(h) # Test logs are sent to stdout wrapped = LoggingStore(store=local_store) buffer = default_buffer_prototype().buffer res = await wrapped.set("foo/bar/c/0", buffer.from_bytes(b"\x01\x02\x03\x04")) assert res is None captured = capsys.readouterr() assert len(captured) == 2 assert "Calling LocalStore.set" in captured.out assert "Finished LocalStore.set" in captured.out # Restore handlers for h in handlers: logging.getLogger().addHandler(h) def test_is_open_setter_raises(self, store: LoggingStore) -> None: "Test that a user cannot change `_is_open` without opening the underlying store." with pytest.raises( NotImplementedError, match="LoggingStore must be opened via the `_open` method" ): store._is_open = True @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) async def test_logging_store(store: Store, caplog) -> None: wrapped = LoggingStore(store=store, log_level="DEBUG") buffer = default_buffer_prototype().buffer caplog.clear() res = await wrapped.set("foo/bar/c/0", buffer.from_bytes(b"\x01\x02\x03\x04")) assert res is None assert len(caplog.record_tuples) == 2 for tup in caplog.record_tuples: assert str(store) in tup[0] assert f"Calling {type(store).__name__}.set" in caplog.record_tuples[0][2] assert f"Finished {type(store).__name__}.set" in caplog.record_tuples[1][2] caplog.clear() keys = [k async for k in wrapped.list()] assert keys == ["foo/bar/c/0"] assert len(caplog.record_tuples) == 2 for tup in caplog.record_tuples: assert str(store) in tup[0] assert f"Calling {type(store).__name__}.list" in caplog.record_tuples[0][2] assert f"Finished {type(store).__name__}.list" in caplog.record_tuples[1][2] @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=["store"]) async def test_logging_store_counter(store: Store) -> None: wrapped = LoggingStore(store=store, log_level="DEBUG") arr = zarr.create(shape=(10,), store=wrapped, overwrite=True) arr[:] = 1 assert wrapped.counter["set"] == 2 assert wrapped.counter["list"] == 0 assert wrapped.counter["list_dir"] == 0 assert wrapped.counter["list_prefix"] == 0 if store.supports_deletes: assert wrapped.counter["get"] == 0 # 1 if overwrite=False assert wrapped.counter["delete_dir"] == 1 else: assert wrapped.counter["get"] == 1 assert wrapped.counter["delete_dir"] == 0 zarr-python-3.0.6/tests/test_store/test_memory.py000066400000000000000000000100421476711733500223060ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING import numpy as np import pytest import zarr from zarr.core.buffer import Buffer, cpu, gpu from zarr.storage import GpuMemoryStore, MemoryStore from zarr.testing.store import StoreTests from zarr.testing.utils import gpu_test if TYPE_CHECKING: from zarr.core.common import ZarrFormat class TestMemoryStore(StoreTests[MemoryStore, cpu.Buffer]): store_cls = MemoryStore buffer_cls = cpu.Buffer async def set(self, store: MemoryStore, key: str, value: Buffer) -> None: store._store_dict[key] = value async def get(self, store: MemoryStore, key: str) -> Buffer: return store._store_dict[key] @pytest.fixture(params=[None, True]) def store_kwargs( self, request: pytest.FixtureRequest ) -> dict[str, str | dict[str, Buffer] | None]: kwargs = {"store_dict": None} if request.param is True: kwargs["store_dict"] = {} return kwargs @pytest.fixture def store(self, store_kwargs: str | dict[str, Buffer] | None) -> MemoryStore: return self.store_cls(**store_kwargs) def test_store_repr(self, store: MemoryStore) -> None: assert str(store) == f"memory://{id(store._store_dict)}" def test_store_supports_writes(self, store: MemoryStore) -> None: assert store.supports_writes def test_store_supports_listing(self, store: MemoryStore) -> None: assert store.supports_listing def test_store_supports_partial_writes(self, store: MemoryStore) -> None: assert store.supports_partial_writes def test_list_prefix(self, store: MemoryStore) -> None: assert True @pytest.mark.parametrize("dtype", ["uint8", "float32", "int64"]) @pytest.mark.parametrize("zarr_format", [2, 3]) async def test_deterministic_size( self, store: MemoryStore, dtype, zarr_format: ZarrFormat ) -> None: a = zarr.empty( store=store, shape=(3,), chunks=(1000,), dtype=dtype, zarr_format=zarr_format, overwrite=True, ) a[...] = 1 a.resize((1000,)) np.testing.assert_array_equal(a[:3], 1) np.testing.assert_array_equal(a[3:], 0) @gpu_test class TestGpuMemoryStore(StoreTests[GpuMemoryStore, gpu.Buffer]): store_cls = GpuMemoryStore buffer_cls = gpu.Buffer async def set(self, store: GpuMemoryStore, key: str, value: Buffer) -> None: store._store_dict[key] = value async def get(self, store: MemoryStore, key: str) -> Buffer: return store._store_dict[key] @pytest.fixture(params=[None, True]) def store_kwargs( self, request: pytest.FixtureRequest ) -> dict[str, str | dict[str, Buffer] | None]: kwargs = {"store_dict": None} if request.param is True: kwargs["store_dict"] = {} return kwargs @pytest.fixture def store(self, store_kwargs: str | dict[str, gpu.Buffer] | None) -> GpuMemoryStore: return self.store_cls(**store_kwargs) def test_store_repr(self, store: GpuMemoryStore) -> None: assert str(store) == f"gpumemory://{id(store._store_dict)}" def test_store_supports_writes(self, store: GpuMemoryStore) -> None: assert store.supports_writes def test_store_supports_listing(self, store: GpuMemoryStore) -> None: assert store.supports_listing def test_store_supports_partial_writes(self, store: GpuMemoryStore) -> None: assert store.supports_partial_writes def test_list_prefix(self, store: GpuMemoryStore) -> None: assert True def test_dict_reference(self, store: GpuMemoryStore) -> None: store_dict = {} result = GpuMemoryStore(store_dict=store_dict) assert result._store_dict is store_dict def test_from_dict(self): d = { "a": gpu.Buffer.from_bytes(b"aaaa"), "b": cpu.Buffer.from_bytes(b"bbbb"), } result = GpuMemoryStore.from_dict(d) for v in result._store_dict.values(): assert type(v) is gpu.Buffer zarr-python-3.0.6/tests/test_store/test_stateful.py000066400000000000000000000024431476711733500226330ustar00rootroot00000000000000# Stateful tests for arbitrary Zarr stores. import pytest from hypothesis.stateful import ( run_state_machine_as_test, ) from zarr.abc.store import Store from zarr.storage import LocalStore, ZipStore from zarr.testing.stateful import ZarrHierarchyStateMachine, ZarrStoreStateMachine pytestmark = pytest.mark.slow_hypothesis def test_zarr_hierarchy(sync_store: Store): def mk_test_instance_sync() -> ZarrHierarchyStateMachine: return ZarrHierarchyStateMachine(sync_store) if isinstance(sync_store, ZipStore): pytest.skip(reason="ZipStore does not support delete") run_state_machine_as_test(mk_test_instance_sync) def test_zarr_store(sync_store: Store) -> None: def mk_test_instance_sync() -> None: return ZarrStoreStateMachine(sync_store) if isinstance(sync_store, ZipStore): pytest.skip(reason="ZipStore does not support delete") if isinstance(sync_store, LocalStore): # This test uses arbitrary keys, which are passed to `set` and `delete`. # It assumes that `set` and `delete` are the only two operations that modify state. # But LocalStore, directories can hang around even after a key is delete-d. pytest.skip(reason="Test isn't suitable for LocalStore.") run_state_machine_as_test(mk_test_instance_sync) zarr-python-3.0.6/tests/test_store/test_wrapper.py000066400000000000000000000074301476711733500224650ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING import pytest from zarr.core.buffer.cpu import Buffer, buffer_prototype from zarr.storage import LocalStore, WrapperStore from zarr.testing.store import StoreTests if TYPE_CHECKING: from _pytest.compat import LEGACY_PATH from zarr.abc.store import Store from zarr.core.buffer.core import BufferPrototype class TestWrapperStore(StoreTests[WrapperStore, Buffer]): store_cls = WrapperStore buffer_cls = Buffer async def get(self, store: WrapperStore, key: str) -> Buffer: return self.buffer_cls.from_bytes((store._store.root / key).read_bytes()) async def set(self, store: WrapperStore, key: str, value: Buffer) -> None: parent = (store._store.root / key).parent if not parent.exists(): parent.mkdir(parents=True) (store._store.root / key).write_bytes(value.to_bytes()) @pytest.fixture def store_kwargs(self, tmpdir: LEGACY_PATH) -> dict[str, str]: return {"store": LocalStore(str(tmpdir))} @pytest.fixture def open_kwargs(self, tmpdir) -> dict[str, str]: return {"store_cls": LocalStore, "root": str(tmpdir)} def test_store_supports_writes(self, store: WrapperStore) -> None: assert store.supports_writes def test_store_supports_partial_writes(self, store: WrapperStore) -> None: assert store.supports_partial_writes def test_store_supports_listing(self, store: WrapperStore) -> None: assert store.supports_listing def test_store_repr(self, store: WrapperStore) -> None: assert f"{store!r}" == f"WrapperStore(LocalStore, 'file://{store._store.root.as_posix()}')" def test_store_str(self, store: WrapperStore) -> None: assert str(store) == f"wrapping-file://{store._store.root.as_posix()}" def test_check_writeable(self, store: WrapperStore) -> None: """ Test _check_writeable() runs without errors. """ store._check_writable() def test_close(self, store: WrapperStore) -> None: "Test store can be closed" store.close() assert not store._is_open def test_is_open_setter_raises(self, store: WrapperStore) -> None: """ Test that a user cannot change `_is_open` without opening the underlying store. """ with pytest.raises( NotImplementedError, match="WrapperStore must be opened via the `_open` method" ): store._is_open = True @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=True) async def test_wrapped_set(store: Store, capsys: pytest.CaptureFixture[str]) -> None: # define a class that prints when it sets class NoisySetter(WrapperStore): async def set(self, key: str, value: Buffer) -> None: print(f"setting {key}") await super().set(key, value) key = "foo" value = Buffer.from_bytes(b"bar") store_wrapped = NoisySetter(store) await store_wrapped.set(key, value) captured = capsys.readouterr() assert f"setting {key}" in captured.out assert await store_wrapped.get(key, buffer_prototype) == value @pytest.mark.parametrize("store", ["local", "memory", "zip"], indirect=True) async def test_wrapped_get(store: Store, capsys: pytest.CaptureFixture[str]) -> None: # define a class that prints when it sets class NoisyGetter(WrapperStore): def get(self, key: str, prototype: BufferPrototype) -> None: print(f"getting {key}") return super().get(key, prototype=prototype) key = "foo" value = Buffer.from_bytes(b"bar") store_wrapped = NoisyGetter(store) await store_wrapped.set(key, value) assert await store_wrapped.get(key, buffer_prototype) == value captured = capsys.readouterr() assert f"getting {key}" in captured.out zarr-python-3.0.6/tests/test_store/test_zip.py000066400000000000000000000107241476711733500216070ustar00rootroot00000000000000from __future__ import annotations import os import shutil import tempfile import zipfile from typing import TYPE_CHECKING import numpy as np import pytest import zarr from zarr.core.buffer import Buffer, cpu, default_buffer_prototype from zarr.storage import ZipStore from zarr.testing.store import StoreTests if TYPE_CHECKING: from pathlib import Path from typing import Any class TestZipStore(StoreTests[ZipStore, cpu.Buffer]): store_cls = ZipStore buffer_cls = cpu.Buffer @pytest.fixture def store_kwargs(self, request) -> dict[str, str | bool]: fd, temp_path = tempfile.mkstemp() os.close(fd) os.unlink(temp_path) return {"path": temp_path, "mode": "w", "read_only": False} async def get(self, store: ZipStore, key: str) -> Buffer: return store._get(key, prototype=default_buffer_prototype()) async def set(self, store: ZipStore, key: str, value: Buffer) -> None: return store._set(key, value) def test_store_read_only(self, store: ZipStore, store_kwargs: dict[str, Any]) -> None: assert not store.read_only async def test_read_only_store_raises(self, store_kwargs: dict[str, Any]) -> None: # we need to create the zipfile in write mode before switching to read mode store = await self.store_cls.open(**store_kwargs) store.close() kwargs = {**store_kwargs, "mode": "a", "read_only": True} store = await self.store_cls.open(**kwargs) assert store._zmode == "a" assert store.read_only # set with pytest.raises(ValueError): await store.set("foo", cpu.Buffer.from_bytes(b"bar")) def test_store_repr(self, store: ZipStore) -> None: assert str(store) == f"zip://{store.path}" def test_store_supports_writes(self, store: ZipStore) -> None: assert store.supports_writes def test_store_supports_partial_writes(self, store: ZipStore) -> None: assert store.supports_partial_writes is False def test_store_supports_listing(self, store: ZipStore) -> None: assert store.supports_listing def test_api_integration(self, store: ZipStore) -> None: root = zarr.open_group(store=store, mode="a") data = np.arange(10000, dtype=np.uint16).reshape(100, 100) z = root.create_array( shape=data.shape, chunks=(10, 10), name="foo", dtype=np.uint16, fill_value=99 ) z[:] = data assert np.array_equal(data, z[:]) # you can overwrite existing chunks but zipfile will issue a warning with pytest.warns(UserWarning, match="Duplicate name: 'foo/c/0/0'"): z[0, 0] = 100 # TODO: assigning an entire chunk to fill value ends up deleting the chunk which is not supported # a work around will be needed here. with pytest.raises(NotImplementedError): z[0:10, 0:10] = 99 bar = root.create_group("bar", attributes={"hello": "world"}) assert "hello" in dict(bar.attrs) # keys cannot be deleted with pytest.raises(NotImplementedError): del root["bar"] store.close() @pytest.mark.parametrize("read_only", [True, False]) async def test_store_open_read_only( self, store_kwargs: dict[str, Any], read_only: bool ) -> None: if read_only == "r": # create an empty zipfile with zipfile.ZipFile(store_kwargs["path"], mode="w"): pass await super().test_store_open_read_only(store_kwargs, read_only) @pytest.mark.parametrize(("zip_mode", "read_only"), [("w", False), ("a", False), ("x", False)]) async def test_zip_open_mode_translation( self, store_kwargs: dict[str, Any], zip_mode: str, read_only: bool ) -> None: kws = {**store_kwargs, "mode": zip_mode} store = await self.store_cls.open(**kws) assert store.read_only == read_only def test_externally_zipped_store(self, tmp_path: Path) -> None: # See: https://github.com/zarr-developers/zarr-python/issues/2757 zarr_path = tmp_path / "foo.zarr" root = zarr.open_group(store=zarr_path, mode="w") root.require_group("foo") root["foo"]["bar"] = np.array([1]) shutil.make_archive(zarr_path, "zip", zarr_path) zip_path = tmp_path / "foo.zarr.zip" zipped = zarr.open_group(ZipStore(zip_path, mode="r"), mode="r") assert list(zipped.keys()) == list(root.keys()) assert list(zipped["foo"].keys()) == list(root["foo"].keys()) zarr-python-3.0.6/tests/test_strings.py000066400000000000000000000025011476711733500202750ustar00rootroot00000000000000"""Tests for the strings module.""" import numpy as np import pytest from zarr.core.strings import _NUMPY_SUPPORTS_VLEN_STRING, _STRING_DTYPE, cast_to_string_dtype def test_string_defaults() -> None: if _NUMPY_SUPPORTS_VLEN_STRING: assert _STRING_DTYPE == np.dtypes.StringDType() else: assert _STRING_DTYPE == np.dtypes.ObjectDType() def test_cast_to_string_dtype() -> None: d1 = np.array(["a", "b", "c"]) assert d1.dtype == np.dtype(" asyncio.AbstractEventLoop | None: if request.param is True: return _get_loop() else: return None @pytest.fixture def clean_state(): # use this fixture to make sure no existing threads/loops exist in zarr.core.sync cleanup_resources() yield cleanup_resources() def test_get_loop() -> None: # test that calling _get_loop() twice returns the same loop loop = _get_loop() loop2 = _get_loop() assert loop is loop2 def test_get_lock() -> None: # test that calling _get_lock() twice returns the same lock lock = _get_lock() lock2 = _get_lock() assert lock is lock2 def test_sync(sync_loop: asyncio.AbstractEventLoop | None) -> None: foo = AsyncMock(return_value="foo") assert sync(foo(), loop=sync_loop) == "foo" foo.assert_awaited_once() def test_sync_raises(sync_loop: asyncio.AbstractEventLoop | None) -> None: foo = AsyncMock(side_effect=ValueError("foo-bar")) with pytest.raises(ValueError, match="foo-bar"): sync(foo(), loop=sync_loop) foo.assert_awaited_once() def test_sync_timeout() -> None: duration = 0.02 async def foo() -> None: await asyncio.sleep(duration) with pytest.raises(asyncio.TimeoutError): sync(foo(), timeout=duration / 10) def test_sync_raises_if_no_coroutine(sync_loop: asyncio.AbstractEventLoop | None) -> None: def foo() -> str: return "foo" with pytest.raises(TypeError): sync(foo(), loop=sync_loop) # type: ignore[arg-type] @pytest.mark.filterwarnings("ignore:coroutine.*was never awaited") def test_sync_raises_if_loop_is_closed() -> None: loop = _get_loop() foo = AsyncMock(return_value="foo") with patch.object(loop, "is_closed", return_value=True): with pytest.raises(RuntimeError): sync(foo(), loop=loop) foo.assert_not_awaited() @pytest.mark.filterwarnings("ignore:coroutine.*was never awaited") def test_sync_raises_if_calling_sync_from_within_a_running_loop( sync_loop: asyncio.AbstractEventLoop | None, ) -> None: def foo() -> str: # technically, this should be an async function but doing that # yields a warning because it is never awaited by the inner function return "foo" async def bar() -> str: return sync(foo(), loop=sync_loop) # type: ignore[arg-type] with pytest.raises(SyncError): sync(bar(), loop=sync_loop) @pytest.mark.filterwarnings("ignore:coroutine.*was never awaited") def test_sync_raises_if_loop_is_invalid_type() -> None: foo = AsyncMock(return_value="foo") with pytest.raises(TypeError): sync(foo(), loop=1) # type: ignore[arg-type] foo.assert_not_awaited() def test_sync_mixin(sync_loop) -> None: class AsyncFoo: def __init__(self) -> None: pass async def foo(self) -> str: return "foo" async def bar(self) -> AsyncGenerator: for i in range(10): yield i class SyncFoo(SyncMixin): def __init__(self, async_foo: AsyncFoo) -> None: self._async_foo = async_foo def foo(self) -> str: return self._sync(self._async_foo.foo()) def bar(self) -> list[int]: return self._sync_iter(self._async_foo.bar()) async_foo = AsyncFoo() foo = SyncFoo(async_foo) assert foo.foo() == "foo" assert foo.bar() == list(range(10)) def test_open_positional_args_deprecate(): store = MemoryStore() with pytest.warns(FutureWarning, match="pass"): zarr.open(store, "w", shape=(1,)) @pytest.mark.parametrize("workers", [None, 1, 2]) def test_threadpool_executor(clean_state, workers: int | None) -> None: with zarr.config.set({"threading.max_workers": workers}): _ = zarr.zeros(shape=(1,)) # trigger executor creation assert loop != [None] # confirm loop was created if workers is None: # confirm no executor was created if no workers were specified # (this is the default behavior) assert loop[0]._default_executor is None else: # confirm executor was created and attached to loop as the default executor # note: python doesn't have a direct way to get the default executor so we # use the private attribute assert _get_executor() is loop[0]._default_executor assert _get_executor()._max_workers == workers def test_cleanup_resources_idempotent() -> None: _get_executor() # trigger resource creation (iothread, loop, thread-pool) cleanup_resources() cleanup_resources() zarr-python-3.0.6/tests/test_tree.py000066400000000000000000000032371476711733500175520ustar00rootroot00000000000000import os import textwrap from typing import Any import pytest import zarr pytest.importorskip("rich") @pytest.mark.parametrize("root_name", [None, "root"]) def test_tree(root_name: Any) -> None: os.environ["OVERRIDE_COLOR_SYSTEM"] = "truecolor" g = zarr.group(path=root_name) A = g.create_group("A") B = g.create_group("B") C = B.create_group("C") D = C.create_group("C") A.create_array(name="x", shape=(2), dtype="float64") A.create_array(name="y", shape=(0,), dtype="int8") B.create_array(name="x", shape=(0,), dtype="float64") C.create_array(name="x", shape=(0,), dtype="float64") D.create_array(name="x", shape=(0,), dtype="float64") result = repr(g.tree()) root = root_name or "" BOPEN = "\x1b[1m" BCLOSE = "\x1b[0m" expected = textwrap.dedent(f"""\ {BOPEN}/{root}{BCLOSE} ├── {BOPEN}A{BCLOSE} │ ├── {BOPEN}x{BCLOSE} (2,) float64 │ └── {BOPEN}y{BCLOSE} (0,) int8 └── {BOPEN}B{BCLOSE} ├── {BOPEN}C{BCLOSE} │ ├── {BOPEN}C{BCLOSE} │ │ └── {BOPEN}x{BCLOSE} (0,) float64 │ └── {BOPEN}x{BCLOSE} (0,) float64 └── {BOPEN}x{BCLOSE} (0,) float64 """) assert result == expected result = repr(g.tree(level=0)) expected = textwrap.dedent(f"""\ {BOPEN}/{root}{BCLOSE} ├── {BOPEN}A{BCLOSE} └── {BOPEN}B{BCLOSE} """) assert result == expected def test_expand_not_implemented() -> None: g = zarr.group() with pytest.raises(NotImplementedError): g.tree(expand=True) zarr-python-3.0.6/tests/test_v2.py000066400000000000000000000250221476711733500171360ustar00rootroot00000000000000import json from collections.abc import Iterator from typing import Any, Literal import numcodecs.vlen import numpy as np import pytest from numcodecs import Delta from numcodecs.blosc import Blosc from numcodecs.zstd import Zstd import zarr import zarr.core.buffer import zarr.storage from zarr import config from zarr.abc.store import Store from zarr.core.buffer.core import default_buffer_prototype from zarr.core.sync import sync from zarr.storage import MemoryStore, StorePath @pytest.fixture async def store() -> Iterator[StorePath]: return StorePath(await MemoryStore.open()) def test_simple(store: StorePath) -> None: data = np.arange(0, 256, dtype="uint16").reshape((16, 16)) a = zarr.create_array( store / "simple_v2", zarr_format=2, shape=data.shape, chunks=(16, 16), dtype=data.dtype, fill_value=0, ) a[:, :] = data assert np.array_equal(data, a[:, :]) @pytest.mark.parametrize("store", ["memory"], indirect=True) @pytest.mark.parametrize( ("dtype", "fill_value"), [ ("bool", False), ("int64", 0), ("float64", 0.0), ("|S1", b""), ("|U1", ""), ("object", ""), (str, ""), ], ) def test_implicit_fill_value(store: MemoryStore, dtype: str, fill_value: Any) -> None: arr = zarr.create(store=store, shape=(4,), fill_value=None, zarr_format=2, dtype=dtype) assert arr.metadata.fill_value is None assert arr.metadata.to_dict()["fill_value"] is None result = arr[:] if dtype is str: # special case numpy_dtype = np.dtype(object) else: numpy_dtype = np.dtype(dtype) expected = np.full(arr.shape, fill_value, dtype=numpy_dtype) np.testing.assert_array_equal(result, expected) def test_codec_pipeline() -> None: # https://github.com/zarr-developers/zarr-python/issues/2243 store = MemoryStore() array = zarr.create( store=store, shape=(1,), dtype="i4", zarr_format=2, filters=[Delta(dtype="i4").get_config()], compressor=Blosc().get_config(), ) array[:] = 1 result = array[:] expected = np.ones(1) np.testing.assert_array_equal(result, expected) @pytest.mark.parametrize( ("dtype", "expected_dtype", "fill_value", "fill_value_encoding"), [ ("|S", "|S0", b"X", "WA=="), ("|V", "|V0", b"X", "WA=="), ("|V10", "|V10", b"X", "WAAAAAAAAAAAAA=="), ], ) async def test_v2_encode_decode(dtype, expected_dtype, fill_value, fill_value_encoding) -> None: with config.set( { "array.v2_default_filters.bytes": [{"id": "vlen-bytes"}], "array.v2_default_compressor.bytes": None, } ): store = zarr.storage.MemoryStore() g = zarr.group(store=store, zarr_format=2) g.create_array( name="foo", shape=(3,), chunks=(3,), dtype=dtype, fill_value=fill_value, compressor=None ) result = await store.get("foo/.zarray", zarr.core.buffer.default_buffer_prototype()) assert result is not None serialized = json.loads(result.to_bytes()) expected = { "chunks": [3], "compressor": None, "dtype": expected_dtype, "fill_value": fill_value_encoding, "filters": [{"id": "vlen-bytes"}] if dtype == "|S" else None, "order": "C", "shape": [3], "zarr_format": 2, "dimension_separator": ".", } assert serialized == expected data = zarr.open_array(store=store, path="foo")[:] expected = np.full((3,), b"X", dtype=dtype) np.testing.assert_equal(data, expected) @pytest.mark.parametrize("dtype_value", [["|S", b"Y"], ["|U", "Y"], ["O", b"Y"]]) def test_v2_encode_decode_with_data(dtype_value): dtype, value = dtype_value with config.set( { "array.v2_default_filters": { "string": [{"id": "vlen-utf8"}], "bytes": [{"id": "vlen-bytes"}], }, } ): expected = np.full((3,), value, dtype=dtype) a = zarr.create( shape=(3,), zarr_format=2, dtype=dtype, ) a[:] = expected data = a[:] np.testing.assert_equal(data, expected) @pytest.mark.parametrize("dtype", [str, "str"]) async def test_create_dtype_str(dtype: Any) -> None: arr = zarr.create(shape=3, dtype=dtype, zarr_format=2) assert arr.dtype.kind == "O" assert arr.metadata.to_dict()["dtype"] == "|O" assert arr.metadata.filters == (numcodecs.vlen.VLenBytes(),) arr[:] = [b"a", b"bb", b"ccc"] result = arr[:] np.testing.assert_array_equal(result, np.array([b"a", b"bb", b"ccc"], dtype="object")) @pytest.mark.parametrize("filters", [[], [numcodecs.Delta(dtype=" None: array_fixture = [42] with config.set({"array.order": order}): arr = zarr.create(shape=1, dtype=" None: store = MemoryStore() arr = zarr.create_array( store, shape=(10, 8), chunks=(3, 3), fill_value=np.nan, dtype="float64", zarr_format=2, filters=None, compressors=None, overwrite=True, order=array_order, config={"order": memory_order}, ) # Non-contiguous write a = np.arange(arr.shape[0] * arr.shape[1]).reshape(arr.shape, order=data_order) arr[6:9, 3:6] = a[6:9, 3:6] # The slice on the RHS is important np.testing.assert_array_equal(arr[6:9, 3:6], a[6:9, 3:6]) np.testing.assert_array_equal( a[6:9, 3:6], np.frombuffer( sync(store.get("2.1", default_buffer_prototype())).to_bytes(), dtype="float64" ).reshape((3, 3), order=array_order), ) if memory_order == "F": assert (arr[6:9, 3:6]).flags.f_contiguous else: assert (arr[6:9, 3:6]).flags.c_contiguous store = MemoryStore() arr = zarr.create_array( store, shape=(10, 8), chunks=(3, 3), fill_value=np.nan, dtype="float64", zarr_format=2, compressors=None, filters=None, overwrite=True, order=array_order, config={"order": memory_order}, ) # Contiguous write a = np.arange(9).reshape((3, 3), order=data_order) if data_order == "F": assert a.flags.f_contiguous else: assert a.flags.c_contiguous arr[6:9, 3:6] = a np.testing.assert_array_equal(arr[6:9, 3:6], a) def test_default_compressor_deprecation_warning(): with pytest.warns(DeprecationWarning, match="default_compressor is deprecated"): zarr.storage.default_compressor = "zarr.codecs.zstd.ZstdCodec()" @pytest.mark.parametrize( "dtype_expected", [ ["b", "zstd", None], ["i", "zstd", None], ["f", "zstd", None], ["|S1", "zstd", "vlen-bytes"], ["|U1", "zstd", "vlen-utf8"], ], ) def test_default_filters_and_compressor(dtype_expected: Any) -> None: with config.set( { "array.v2_default_compressor": { "numeric": {"id": "zstd", "level": "0"}, "string": {"id": "zstd", "level": "0"}, "bytes": {"id": "zstd", "level": "0"}, }, "array.v2_default_filters": { "numeric": [], "string": [{"id": "vlen-utf8"}], "bytes": [{"id": "vlen-bytes"}], }, } ): dtype, expected_compressor, expected_filter = dtype_expected arr = zarr.create(shape=(3,), path="foo", store={}, zarr_format=2, dtype=dtype) assert arr.metadata.compressor.codec_id == expected_compressor if expected_filter is not None: assert arr.metadata.filters[0].codec_id == expected_filter @pytest.mark.parametrize("fill_value", [None, (b"", 0, 0.0)], ids=["no_fill", "fill"]) def test_structured_dtype_roundtrip(fill_value, tmp_path) -> None: a = np.array( [(b"aaa", 1, 4.2), (b"bbb", 2, 8.4), (b"ccc", 3, 12.6)], dtype=[("foo", "S3"), ("bar", "i4"), ("baz", "f8")], ) array_path = tmp_path / "data.zarr" za = zarr.create( shape=(3,), store=array_path, chunks=(2,), fill_value=fill_value, zarr_format=2, dtype=a.dtype, ) if fill_value is not None: assert (np.array([fill_value] * a.shape[0], dtype=a.dtype) == za[:]).all() za[...] = a za = zarr.open_array(store=array_path) assert (a == za[:]).all() @pytest.mark.parametrize("fill_value", [None, b"x"], ids=["no_fill", "fill"]) def test_other_dtype_roundtrip(fill_value, tmp_path) -> None: a = np.array([b"a\0\0", b"bb", b"ccc"], dtype="V7") array_path = tmp_path / "data.zarr" za = zarr.create( shape=(3,), store=array_path, chunks=(2,), fill_value=fill_value, zarr_format=2, dtype=a.dtype, ) if fill_value is not None: assert (np.array([fill_value] * a.shape[0], dtype=a.dtype) == za[:]).all() za[...] = a za = zarr.open_array(store=array_path) assert (a == za[:]).all() zarr-python-3.0.6/tests/test_zarr.py000066400000000000000000000003101476711733500175560ustar00rootroot00000000000000import zarr def test_exports() -> None: """ Ensure that everything in __all__ can be imported. """ from zarr import __all__ for export in __all__: getattr(zarr, export)