pax_global_header00006660000000000000000000000064147733160740014526gustar00rootroot0000000000000052 comment=af79a8c787f245e1eda7c735fc49073b29c0e722 adios4dolfinx-0.9.3/000077500000000000000000000000001477331607400143065ustar00rootroot00000000000000adios4dolfinx-0.9.3/.coveragerc000066400000000000000000000001441477331607400164260ustar00rootroot00000000000000[run] parallel = true source = adios4dolfinx [html] directory= htmlcov [xml] output = coverage.xmladios4dolfinx-0.9.3/.github/000077500000000000000000000000001477331607400156465ustar00rootroot00000000000000adios4dolfinx-0.9.3/.github/dependabot.yml000066400000000000000000000010301477331607400204700ustar00rootroot00000000000000# To get started with Dependabot version updates, you'll need to specify which # package ecosystems to update and where the package manifests are located. # Please see the documentation for all configuration options: # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file version: 2 updates: - package-ecosystem: "github-actions" # See documentation for possible values directory: "/" # Location of package manifests schedule: interval: "weekly" adios4dolfinx-0.9.3/.github/workflows/000077500000000000000000000000001477331607400177035ustar00rootroot00000000000000adios4dolfinx-0.9.3/.github/workflows/build_docs.yml000066400000000000000000000031231477331607400225340ustar00rootroot00000000000000name: Build documentation on: workflow_call: inputs: tag: description: "Tag of DOLFINx docker image" default: "nightly" required: true type: string workflow_dispatch: inputs: tag: description: "Tag of DOLFINx docker image" default: "nightly" required: true type: string push: branches: - main pull_request: branches: - main schedule: - cron: "0 8 * * *" env: DEB_PYTHON_INSTALL_LAYOUT: deb_system DEFAULT_TAG: nightly ARTIFACT_NAME: docs PUBLISH_DIR: ./_build/html jobs: get_image_tag: runs-on: ubuntu-latest outputs: image: ${{ steps.docker_tag.outputs.image }} steps: - id: docker_tag run: echo "image=${{ inputs.tag || env.DEFAULT_TAG }}" >> $GITHUB_OUTPUT build-docs: needs: get_image_tag runs-on: ubuntu-latest container: ghcr.io/fenics/dolfinx/dolfinx:${{ needs.get_image_tag.outputs.image }} steps: # This action sets the current path to the root of your github repo - uses: actions/checkout@v4 - name: Update pip run: python3 -m pip install --break-system-packages --upgrade pip setuptools - name: Install dependencies run: python3 -m pip install --break-system-packages -e ".[docs]" - name: Build docs run: jupyter book build -W . - name: Upload documentation as artifact uses: actions/upload-artifact@v4 if: always() with: name: ${{ env.ARTIFACT_NAME }} path: ${{ env.PUBLISH_DIR }} if-no-files-found: error adios4dolfinx-0.9.3/.github/workflows/build_joss_paper.yml000066400000000000000000000007721477331607400237600ustar00rootroot00000000000000name: Build JOSS paper on: push: branches: - main jobs: paper: runs-on: ubuntu-latest name: Paper Draft steps: - name: Checkout uses: actions/checkout@v4 - name: Build draft PDF uses: openjournals/openjournals-draft-action@master with: journal: joss paper-path: ./joss-paper/paper.md - name: Upload uses: actions/upload-artifact@v4 with: name: paper path: ./joss-paper/paper.pdf adios4dolfinx-0.9.3/.github/workflows/check_formatting.yml000066400000000000000000000024351477331607400237410ustar00rootroot00000000000000name: Check formatting on: workflow_call: inputs: tag: description: "Tag of DOLFINx docker image" default: "nightly" required: true type: string workflow_dispatch: inputs: tag: description: "Tag of DOLFINx docker image" default: "nightly" required: true type: string pull_request: branches: - main schedule: - cron: "0 8 * * *" env: DEB_PYTHON_INSTALL_LAYOUT: deb_system DEFAULT_TAG: nightly jobs: get_image_tag: runs-on: ubuntu-latest outputs: image: ${{ steps.docker_tag.outputs.image }} steps: - id: docker_tag run: echo "image=${{ inputs.tag || env.DEFAULT_TAG }}" >> $GITHUB_OUTPUT build: needs: get_image_tag runs-on: ubuntu-22.04 container: ghcr.io/fenics/dolfinx/dolfinx:${{ needs.get_image_tag.outputs.image }} steps: - uses: actions/checkout@v4 - name: Update pip run: python3 -m pip install --break-system-packages --upgrade pip setuptools - name: Install code run: python3 -m pip install --break-system-packages .[dev] - name: Check code formatting with ruff run: | ruff check . ruff format --check . - name: Mypy check run: python3 -m mypy -v adios4dolfinx-0.9.3/.github/workflows/create_legacy_checkpoint.yml000066400000000000000000000015111477331607400254220ustar00rootroot00000000000000name: Generate adios4dolfinx legacy data on: workflow_call: inputs: artifact_name: type: string required: true description: "Name of the artifact to be created" jobs: create-adios-data: env: data_dir: "legacy_checkpoint" adios4dolfinx_version: "0.7.1" runs-on: "ubuntu-22.04" container: ghcr.io/fenics/dolfinx/dolfinx:v0.7.3 steps: - uses: actions/checkout@v4 - name: Install legacy version of adios4dolfinx run: python3 -m pip install --break-system-packages adios4dolfinx==${adios4dolfinx_version} - name: Create datasets run: python3 ./tests/create_legacy_checkpoint.py --output-dir=$data_dir - uses: actions/upload-artifact@v4 with: name: ${{ inputs.artifact_name }} path: ./${{ env.data_dir }} adios4dolfinx-0.9.3/.github/workflows/create_legacy_data.yml000066400000000000000000000011621477331607400242060ustar00rootroot00000000000000name: Generate data from Legacy DOLFIN on: workflow_call: inputs: artifact_name: type: string required: true description: "Name of the artifact to be created" jobs: create-dolfin-data: env: data_dir: "legacy" runs-on: "ubuntu-22.04" container: ghcr.io/scientificcomputing/fenics:2024-02-19 steps: - uses: actions/checkout@v4 - name: Create datasets run: python3 ./tests/create_legacy_data.py --output-dir=$data_dir - uses: actions/upload-artifact@v4 with: name: ${{ inputs.artifact_name }} path: ./${{ env.data_dir }} adios4dolfinx-0.9.3/.github/workflows/deploy_pages.yml000066400000000000000000000026331477331607400231050ustar00rootroot00000000000000name: Github Pages on: push: branches: [release] pull_request: branches: [release] permissions: contents: read pages: write id-token: write # Allow one concurrent deployment concurrency: group: "pages" cancel-in-progress: true jobs: build-docs: uses: ./.github/workflows/build_docs.yml with: tag: "stable" run-coverage: uses: ./.github/workflows/test_package.yml with: tag: "stable" deploy: needs: [run-coverage, build-docs] environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} if: github.event_name == 'push' runs-on: ubuntu-latest steps: - name: Download docs artifact # docs artifact is uploaded by build-docs job uses: actions/download-artifact@v4 with: name: docs path: "./public" - name: Download docs artifact # docs artifact is uploaded by build-docs job uses: actions/download-artifact@v4 with: name: code-coverage-report path: "./public/code-coverage-report" - name: Upload artifact uses: actions/upload-pages-artifact@v3 with: path: "./public" - name: Checkout uses: actions/checkout@v4 - name: Setup Pages uses: actions/configure-pages@v5 - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v4 adios4dolfinx-0.9.3/.github/workflows/pypi.yml000066400000000000000000000012431477331607400214070ustar00rootroot00000000000000name: pypi on: push: branches: [main] tags: - "v*" jobs: dist: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Build SDist and wheel run: pipx run build - uses: actions/upload-artifact@v4 with: path: dist/* - name: Check metadata run: pipx run twine check dist/* publish: needs: [dist] runs-on: ubuntu-latest if: startsWith(github.ref, 'refs/tags') environment: pypi permissions: id-token: write steps: - uses: actions/download-artifact@v4 with: name: artifact path: dist - uses: pypa/gh-action-pypi-publish@release/v1 adios4dolfinx-0.9.3/.github/workflows/test_package.yml000066400000000000000000000047531477331607400230710ustar00rootroot00000000000000name: Test package on: workflow_call: inputs: tag: description: "Tag of DOLFINx docker image" default: "nightly" required: true type: string workflow_dispatch: inputs: tag: description: "Tag of DOLFINx docker image" default: "nightly" required: true type: string pull_request: branches: - main schedule: - cron: "0 8 * * *" env: DEB_PYTHON_INSTALL_LAYOUT: deb_system DEFAULT_TAG: nightly jobs: get_image_tag: runs-on: ubuntu-latest outputs: image: ${{ steps.docker_tag.outputs.image }} steps: - id: docker_tag run: echo "image=${{ inputs.tag || env.DEFAULT_TAG }}" >> $GITHUB_OUTPUT check-formatting: needs: get_image_tag uses: ./.github/workflows/check_formatting.yml with: tag: ${{ needs.get_image_tag.outputs.image }} create-datasets: uses: ./.github/workflows/create_legacy_data.yml with: artifact_name: "legacy_mpich" create-legacy-datasets: uses: ./.github/workflows/create_legacy_checkpoint.yml with: artifact_name: "legacy_checkpoint_mpich" test-code: runs-on: ubuntu-24.04 needs: [create-datasets, create-legacy-datasets, check-formatting, get_image_tag] container: ghcr.io/fenics/dolfinx/dolfinx:${{ needs.get_image_tag.outputs.image }} steps: - uses: actions/checkout@v4 - name: Update pip run: python3 -m pip install --break-system-packages --upgrade pip setuptools - name: Download legacy data uses: actions/download-artifact@v4 with: name: legacy_mpich path: ./legacy - name: Download legacy data uses: actions/download-artifact@v4 with: name: legacy_checkpoint_mpich path: ./legacy_checkpoint - name: Install package run: python3 -m pip install --break-system-packages .[test] - name: Run tests run: | coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/ - name: Run tests in parallel run: | mpirun -n 4 coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/ - name: Combine coverage reports run: | coverage combine coverage report -m coverage html - name: Upload coverage report as artifact uses: actions/upload-artifact@v4 with: name: code-coverage-report path: htmlcov if-no-files-found: error adios4dolfinx-0.9.3/.github/workflows/test_package_openmpi.yml000066400000000000000000000040421477331607400246070ustar00rootroot00000000000000name: Test package with openmpi on: push: branches: - main pull_request: branches: - main workflow_call: workflow_dispatch: schedule: - cron: "0 8 * * *" jobs: create-datasets: uses: ./.github/workflows/create_legacy_data.yml with: artifact_name: "legacy_ompi" create-legacy-datasets: uses: ./.github/workflows/create_legacy_checkpoint.yml with: artifact_name: "legacy_checkpoint_ompi" test-code: runs-on: ubuntu-latest needs: [create-datasets, create-legacy-datasets] container: ghcr.io/fenics/test-env:current-openmpi env: DEB_PYTHON_INSTALL_LAYOUT: deb_system PETSC_ARCH: "linux-gnu-real64-32" OMPI_ALLOW_RUN_AS_ROOT: 1 OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 PRTE_MCA_rmaps_default_mapping_policy: :oversubscribe working-directory: ./src strategy: matrix: adios2: ["default", "v2.10.2"] steps: - uses: actions/checkout@v4 - name: Update pip run: python3 -m pip install --break-system-packages --upgrade pip setuptools - name: Install DOLFINx uses: jorgensd/actions/install-dolfinx@v0.4 with: adios2: ${{ matrix.adios2 }} petsc_arch: ${{ env.PETSC_ARCH }} dolfinx: main basix: main ufl: main ffcx: main working-directory: ${{ env.working-directory}} - name: Download legacy data uses: actions/download-artifact@v4 with: name: legacy_ompi path: ./legacy - name: Download legacy data uses: actions/download-artifact@v4 with: name: legacy_checkpoint_ompi path: ./legacy_checkpoint - name: Install package run: python3 -m pip install --break-system-packages .[test] - name: Run tests run: | coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/ - name: Run tests in parallel run: | mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests/ adios4dolfinx-0.9.3/.github/workflows/test_redhat.yml000066400000000000000000000050461477331607400227410ustar00rootroot00000000000000name: Test package with redhat on: push: branches: - main pull_request: branches: - main workflow_call: workflow_dispatch: schedule: - cron: "0 8 * * *" jobs: create-datasets: uses: ./.github/workflows/create_legacy_data.yml with: artifact_name: "legacy_ompi" create-legacy-datasets: uses: ./.github/workflows/create_legacy_checkpoint.yml with: artifact_name: "legacy_checkpoint_ompi" test-code: runs-on: "ubuntu-22.04" needs: [create-datasets, create-legacy-datasets] container: docker.io/fenicsproject/test-env:current-redhat env: DEB_PYTHON_INSTALL_LAYOUT: deb_system PETSC_ARCH: "" PETSC_DIR: "/usr/local/" PYTHONPATH: "/usr/local/lib/:${PYTHONPATH}" working-directory: ./src strategy: matrix: adios2: ["v2.10.2"] steps: - uses: actions/checkout@v4 - name: Get pip flags based on version id: python-version shell: bash -el {0} run: | MODERN_PIP=$(python3 -c "import sys; t = sys.version_info >= (3, 11, 0); sys.stdout.write(str(t))") if [ ${MODERN_PIP} == "True" ]; then FLAGS="--break-system-packages" else FLAGS="" python3 -m pip install --upgrade pip fi echo "PYTHON_FLAGS=${FLAGS}" >> "$GITHUB_OUTPUT" - name: Update pip run: python3 -m pip install ${{ steps.python-version.outputs.PYTHON_FLAGS}} --upgrade pip setuptools - name: Install DOLFINx uses: jorgensd/actions/install-dolfinx@v0.4 with: adios2: ${{ matrix.adios2 }} petsc_arch: ${{ env.PETSC_ARCH }} petsc_dir: ${{ env.PETSC_DIR }} dolfinx: main basix: main ufl: main ffcx: main working-directory: ${{ env.working-directory}} - name: Download legacy data uses: actions/download-artifact@v4 with: name: legacy_ompi path: ./legacy - name: Download legacy data uses: actions/download-artifact@v4 with: name: legacy_checkpoint_ompi path: ./legacy_checkpoint - name: Install package run: python3 -m pip install ${{ steps.python-version.outputs.PYTHON_FLAGS}} --check-build-dependencies .[test] - name: Run tests run: | coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests - name: Run tests in parallel run: | mpirun -n 4 coverage run --rcfile=.coveragerc -m mpi4py -m pytest -xvs ./tests adios4dolfinx-0.9.3/.gitignore000066400000000000000000000035061477331607400163020ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ _build/ output/ *.h5 .coverage.* .coverage *.h5 *.xdmf *.bp/adios4dolfinx-0.9.3/.pre-commit-config.yaml000066400000000000000000000006601477331607400205710ustar00rootroot00000000000000# See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. rev: 'v0.9.6' hooks: # Run the linter. - id: ruff args: [ --fix ] # Run the formatter. - id: ruff-format - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.15.0 hooks: - id: mypy adios4dolfinx-0.9.3/CITATION.cff000066400000000000000000000016451477331607400162060ustar00rootroot00000000000000cff-version: "1.2.0" authors: - family-names: Dokken given-names: Jørgen Schartum orcid: "https://orcid.org/0000-0001-6489-8858" contact: - family-names: Dokken given-names: Jørgen Schartum orcid: "https://orcid.org/0000-0001-6489-8858" doi: 10.5281/zenodo.11094985 message: If you use this software, please cite our article in the Journal of Open Source Software. preferred-citation: authors: - family-names: Dokken given-names: Jørgen Schartum orcid: "https://orcid.org/0000-0001-6489-8858" date-published: 2024-04-30 doi: 10.21105/joss.06451 issn: 2475-9066 issue: 96 journal: Journal of Open Source Software publisher: name: Open Journals start: 6451 title: "ADIOS4DOLFINx: A framework for checkpointing in FEniCS" type: article url: "https://joss.theoj.org/papers/10.21105/joss.06451" volume: 9 title: "ADIOS4DOLFINx: A framework for checkpointing in FEniCS" adios4dolfinx-0.9.3/CODE_OF_CONDUCT.md000066400000000000000000000053521477331607400171120ustar00rootroot00000000000000 # Code of Conduct ### Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ### Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ### Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ### Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at dokken@simula.no. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ### Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ adios4dolfinx-0.9.3/CONTRIBUTING.md000066400000000000000000000073361477331607400165500ustar00rootroot00000000000000# Contributor guidelines When contributing to this repository, please first [create an issue](https://github.com/jorgensd/adios4dolfinx/issues/new/choose) containing information about the missing feature or the bug that you would like to fix. Here you can discuss the change you want to make with the maintainers of the repository. Please note we have a code of conduct, please follow it in all your interactions with the project. ## New contributor guide To get an overview of the project, read the [documentation](https://jorgensd.github.io/adios4dolfinx). Here are some resources to help you get started with open source contributions: - [Finding ways to contribute to open source on GitHub](https://docs.github.com/en/get-started/exploring-projects-on-github/finding-ways-to-contribute-to-open-source-on-github) - [Set up Git](https://docs.github.com/en/get-started/quickstart/set-up-git) - [GitHub flow](https://docs.github.com/en/get-started/quickstart/github-flow) - [Collaborating with pull requests](https://docs.github.com/en/github/collaborating-with-pull-requests) ## Pull Request Process ### Pull Request - When you're finished with the changes, create a pull request, also known as a PR. It is also OK to create a [draft pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/) from the very beginning. Once you are done you can click on the ["Ready for review"] button. You can also [request a review](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/requesting-a-pull-request-review) from one of the maintainers. - Don't forget to [link PR to the issue that you opened ](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue). - Enable the checkbox to [allow maintainer edits](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork) so the branch can be updated for a merge. Once you submit your PR, a team member will review your proposal. We may ask questions or request for additional information. - We may ask for changes to be made before a PR can be merged, either using [suggested changes](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/incorporating-feedback-in-your-pull-request) or pull request comments. You can apply suggested changes directly through the UI. You can make any other changes in your fork, then commit them to your branch. - As you update your PR and apply changes, mark each conversation as [resolved](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/commenting-on-a-pull-request#resolving-conversations). - If you run into any merge issues, checkout this [git tutorial](https://lab.github.com/githubtraining/managing-merge-conflicts) to help you resolve merge conflicts and other issues. - Please make sure that all tests are passing, github pages renders nicely, and code coverage are are not lower than before your contribution. You see the different github action workflows by clicking the "Action" tab in the GitHub repository. Note that for a pull request to be accepted, it has to pass all the tests on CI, which includes: - `mypy`: typechecking - `ruff`: Code formatting - `pytest`: Successfull execution of all tests in the `tests` folder. ### Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. adios4dolfinx-0.9.3/LICENSE000066400000000000000000000020411477331607400153100ustar00rootroot00000000000000Copyright 2023 Jørgen S. Dokken Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. adios4dolfinx-0.9.3/README.md000066400000000000000000000171121477331607400155670ustar00rootroot00000000000000# ADIOS4DOLFINx - A framework for checkpointing in DOLFINx ![MIT](https://img.shields.io/github/license/jorgensd/adios4dolfinx) [![DOI](https://joss.theoj.org/papers/10.21105/joss.06451/status.svg)](https://doi.org/10.21105/joss.06451) [![Anaconda-Server Badge](https://anaconda.org/conda-forge/adios4dolfinx/badges/version.svg)](https://anaconda.org/conda-forge/adios4dolfinx) ADIOS4DOLFINx is an extension for [DOLFINx](https://github.com/FEniCS/dolfinx/) to checkpoint meshes, meshtags and functions using [ADIOS 2](https://adios2.readthedocs.io/en/latest/). The code uses the ADIOS2 Python-wrappers to write DOLFINx objects to file, supporting N-to-M (_recoverable_) and N-to-N (_snapshot_) checkpointing. See: [Checkpointing in DOLFINx - FEniCS 23](https://jsdokken.com/checkpointing-presentation/#/) or the examples in the [Documentation](https://jsdokken.com/adios4dolfinx/) for more information. For scalability, the code uses [MPI Neighbourhood collectives](https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report/node200.htm) for communication across processes. ## Statement of Need As the usage of high performance computing clusters increases, more and more large-scale, long-running simulations are deployed. The need for storing intermediate solutions from such simulations are crucial, as the HPC system might crash, or the simulation might crash or exceed the alloted computational budget. Having a checkpoint of related variables, such as the solutions to partial differential equations (PDEs) is therefore essential. The `adios4dolfinx` library extends the [DOLFINx](https://github.com/FEniCS/dolfinx/) computational framework for solving PDEs with checkpointing functionality, such that immediate solutions and mesh information can be stored and re-used in another simulation. ## Installation Compatibility with DOLFINx: - ADIOS4DOLFINx v0.9.0 is compatible with DOLFINx v0.9.x - ADIOS4DOLFINx v0.8.1 is compatible with DOLFINx v0.8.x - ADIOS4DOLFINx v0.7.3 is compatible with DOLFINx v0.7.x ### Dependencies The library depends on the Python-interface of [DOLFINx](https://github.com/) and an MPI-build of [ADIOS2](https://adios2.readthedocs.io/en/latest/setting_up/setting_up.html#as-package). Therefore `ADIOS2` should not be install through PYPI/pip, but has to be installed through Conda, Spack or from source. > [!IMPORTANT] > ADIOS2<2.10.2 does not work properly with `numpy>=2.0.0`. Everyone is adviced to use the newest version of ADIOS2. > This is for instance available through `conda` or the `ghcr.io/fenics/dolfinx/dolfinx:nightly` Docker-image. ### Docker An MPI build of ADIOS2 is installed in the official DOLFINx containers, and thus there are no additional dependencies required to install `adios4dolfinx` on top of DOLFINx in these images. Create a Docker container, named for instance `dolfinx-checkpoint`. Use the `nightly` tag to get the main branch of DOLFINx, or `stable` to get the latest stable release ```bash docker run -ti -v $(pwd):/root/shared -w /root/shared --name=dolfinx-checkpoint ghcr.io/fenics/dolfinx/dolfinx:nightly ``` For the latest version compatible with nightly (with the ability to run the test suite), use ```bash python3 -m pip install adios4dolfinx[test]@git+https://github.com/jorgensd/adios4dolfinx@main ``` If you are using the `stable` image, you can install `adios4dolfinx` from [PYPI](https://pypi.org/project/adios4dolfinx/) with ```bash python3 -m pip install adios4dolfinx[test] ``` This docker container can be opened with ```bash docker container start -i dolfinx-checkpoint ``` at a later instance ### Conda > [!NOTE] > Conda supports the stable release of DOLFINx, and thus the appropriate version should be installed, see the section above for more details. Following is a minimal recipe of how to install adios4dolfinx, given that you have conda installed on your system. ```bash conda create -n dolfinx-checkpoint python=3.10 conda activate dolfinx-checkpoint conda install -c conda-forge adios4dolfinx ``` > [!NOTE] > Remember to download the appropriate version of `adios4dolfinx` from Github [adios4dolfinx: Releases](https://github.com/jorgensd/adios4dolfinx/releases) To run the test suite, you should also install `ipyparallel`, `pytest` and `coverage`, which can all be installed with conda ```bash conda install -c conda-forge ipyparallel pytest coverage ``` ## Functionality ### DOLFINx - Reading and writing meshes, using `adios4dolfinx.read/write_mesh` - Reading and writing meshtags associated to meshes `adios4dolfinx.read/write_meshtags` - Reading checkpoints for any element (serial and parallel, arbitrary number of functions and timesteps per file). Use `adios4dolfinx.read/write_function`. - Writing standalone function checkpoints relating to "original meshes", i.e. meshes read from `XDMFFile`. Use `adios4dolfinx.write_function_on_input_mesh` for this. - Store mesh partitioning and re-read the mesh with this information, avoiding calling SCOTCH, Kahip or Parmetis. > [!IMPORTANT] > For checkpoints written with `write_function` to be valid, you first have to store the mesh with `write_mesh` to the checkpoint file. > [!IMPORTANT] > A checkpoint file supports multiple functions and multiple time steps, as long as the functions are associated with the same mesh > [!IMPORTANT] > Only one mesh per file is allowed ## Example Usage The repository contains many documented examples of usage, in the `docs`-folder, including - [Reading and writing mesh checkpoints](./docs/writing_mesh_checkpoint.py) - [Storing mesh partitioning data](./docs/partitioned_mesh.py) - [Writing mesh-tags to a checkpoint](./docs/meshtags.py) - [Reading and writing function checkpoints](./docs/writing_functions_checkpoint.py) - [Checkpoint on input mesh](./docs/original_checkpoint.py) Further examples can be found at [ADIOS4DOLFINx examples](https://jsdokken.com/adios4dolfinx/) ### Backwards compatibility > [!WARNING] > If you are using v0.7.2, you are adviced to upgrade to v0.7.3, as it contains som crucial fixes for openmpi. ### Legacy DOLFIN Only checkpoints for `Lagrange` or `DG` functions are supported from legacy DOLFIN - Reading meshes from the DOLFIN HDF5File-format - Reading checkpoints from the DOLFIN HDF5File-format (one checkpoint per file only) - Reading checkpoints from the DOLFIN XDMFFile-format (one checkpoint per file only, and only uses the `.h5` file) See the [API](./docs/api) for more information. ## Testing This library uses `pytest` for testing. To execute the tests, one should first install the library and its dependencies, as listed above. Then, can execute all tests by calling ```bash python3 -m pytest . ``` ### Testing against data from legacy dolfin Some tests check the capability of reading data created with the legacy version of DOLFIN. To create this dataset, start a docker container with legacy DOLFIN, for instance: ```bash docker run -ti -v $(pwd):/root/shared -w /root/s hared --rm ghcr.io/scientificcomputing/fenics:2024-02-19 ``` Then, inside this container, call ```bash python3 ./tests/create_legacy_data.py --output-dir=legacy ``` ### Testing against data from older versions of ADIOS4DOLFINx Some tests check the capability to read data generated by `adios4dolfinx<0.7.2`. To generate data for these tests use the following commands: ```bash docker run -ti -v $(pwd):/root/shared -w /root/shared --rm ghcr.io/fenics/dolfinx/dolfinx:v0.7.3 ``` Then, inside the container, call ```bash python3 -m pip install adios4dolfinx==0.7.1 python3 ./tests/create_legacy_checkpoint.py --output-dir=legacy_checkpoint ``` ## Long term plan The long term plan is to get this library merged into DOLFINx (rewritten in C++ with appropriate Python-bindings). adios4dolfinx-0.9.3/_config.yml000066400000000000000000000016121477331607400164350ustar00rootroot00000000000000# Book settings # Learn more at https://jupyterbook.org/customize/config.html title: ADIOS2Wrappers author: Jørgen S. Dokken logo: "docs/logo.png" copyright: "2023" only_build_toc_files: true # Force re-execution of notebooks on each build. # See https://jupyterbook.org/content/execute.html execute: execute_notebooks: cache # Information about where the book exists on the web repository: url: https://github.com/jorgensd/adios4dolfinx # Online location of your book branch: main html: use_issues_button: true use_repository_button: true parse: myst_enable_extensions: - amsmath - dollarmath - linkify sphinx: extra_extensions: - 'sphinx.ext.autodoc' - 'sphinx.ext.napoleon' - 'sphinx.ext.viewcode' config: html_last_updated_fmt: "%b %d, %Y" nb_custom_formats: .py: - jupytext.reads - fmt: py exclude_patterns: [".pytest_cache/*"] adios4dolfinx-0.9.3/_toc.yml000066400000000000000000000011021477331607400157470ustar00rootroot00000000000000format: jb-book root: README parts: - caption: Introduction to IPyParallel chapters: - file: "docs/ipyparallel_intro" - caption: Writing and reading mesh data chapters: - file: "docs/writing_mesh_checkpoint" - file: "docs/partitioned_mesh" - file: "docs/time_dependent_mesh" - file: "docs/meshtags" - caption: Writing and reading functions chapters: - file: "docs/writing_functions_checkpoint" - file: "docs/snapshot_checkpoint" - file: "docs/original_checkpoint" - caption: Python API chapters: - file: "docs/api" adios4dolfinx-0.9.3/docs/000077500000000000000000000000001477331607400152365ustar00rootroot00000000000000adios4dolfinx-0.9.3/docs/api.rst000066400000000000000000000001111477331607400165320ustar00rootroot00000000000000API Reference ============= .. automodule:: adios4dolfinx :members: adios4dolfinx-0.9.3/docs/ipyparallel_intro.py000066400000000000000000000017451477331607400213500ustar00rootroot00000000000000# # Introduction to IPython parallel # The following demos heavily rely on IPython-parallel to illustrate how checkpointing works when # using multiple MPI processes. # We illustrate what happens in parallel by launching three MPI processes # using [ipyparallel](https://ipyparallel.readthedocs.io/en/latest/) import logging import ipyparallel as ipp def hello_mpi(): # We define all imports inside the function as they have to be launched on the remote engines from mpi4py import MPI print(f"Hello from rank {MPI.COMM_WORLD.rank}/{MPI.COMM_WORLD.size - 1}") with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: # We send the query to run the function `hello_mpi` on all engines query = cluster[:].apply_async(hello_mpi) # We wait for all engines to finish query.wait() # We check that all engines exited successfully assert query.successful(), query.error # We print the output from each engine print("".join(query.stdout)) adios4dolfinx-0.9.3/docs/logo.png000066400000000000000000000356451477331607400167210ustar00rootroot00000000000000PNG  IHDRgAMA a cHRMz&u0`:pQ<bKGDCtIME :IDATxw$UoٝvawY"(DA xU*bΈ0+b| PAəEKes&wj}uOWWz眓ìN/v3p8pp0%;^?S@W'm K9-oM^  tہv*(7AA!>"OQ0Yz8XakL$ppЖf333  QC[̬( kKXGk_vp.000?wXff5g0"k@k˖ .V /GY40՞7Q; CoK˶S,Prީf|`Ibffw(pfe KÁ )Eo^# =5lffV$⯠o=&cTxYvY slIg:4|.Y9Xx'bt6żxme; ->&#Ш虡bfffrejPZPYE9XC7cX膘Ymqlx`l(`ZFYmql틨JI1bY9X6#B7M1̬r,@ }v333- `*DvQ̬",8S%5B7je Kྼ @sFYplffffVe˒Ѝ033`B7[C7jeX l݈~X n/o]bfff݈> <5t[̬F8X6-({V{B7je7C7 Y8XMQzw@G膘Ymq\bm5m*FЍ033` 3t# *>333$@WnS5uafff=['om4333˖ @pl,C^#5O76˶Sx1h!J.V >9.333,[oLeJ2_.6e333 =G"@}8L/مQ~?p),%,[AgnV7_, 0.z<9BA(@{e333K֫|6%Q!dh,4ho'd333K/XvӁ?Ds&/B3lN4׈p`duKˁ:4)GBߥ2X.5|/`w`O`&0 GFW].-ubI`! 76CT~,1sTh=V {)Y6e) m88(~v[ N=mw@y+Xh9z1#0n@aѣ>>2/W[oC϶>`{B{^>脔D?&j{n;PpЎNZ)D/Uȫɤrljyi97CQ#s7~':FNpp0]h:+G`xO@t4=~g3gBQH/)mbree{ߖ8; % DFP?߾ o]c ݶ%U(+JAϡ`? }=(~.(KpjU( F$2ܑD݅nU,k& ׅqN'GhzGGu_M{ku ic6.*G)%.|3 wN󁻀{P{ =`6_DCYCo}v _.q>(p [ :ΕR,#.]wzrN@y?=^}ps=- \ n_~T=^iU3 e>OD$ Vq4o \OD8 mGsnCe]k.NAYJ\zxnTdK,] wGǡD`80#zmEekkP|#r 3PrřB:N5[GW?77w+2xt2-kSl^Cu xgB5iygcoPRE*{gc7ą@kO^Ѕߓ>f2/CvhD!NԴ" Q`|$ @,\#.(Q9Yq^){Q&5җq65'T)]R|2:+ڕ̘' J$gh.&2;VFڟ6E(h^)cBw!_Bѡ^zQ+sGeA.z?'G%jti!QAقA~(&4p/ K \>ۀU6[B%2Y$5FeL?~:xl÷rVv:k.ىҩ@?7uz5pVK_כ|KP)PAso:ʄAW6x&_'t6ʻ}%A3AVAOG=/{i5|8*6w8(a9N탆 "$h}G_'K{Ưj58q{=s١ʀ՜~f@9oc -<bw UܧeQ⸝WqۗcNm!e/2 b;peY<I߳0 RoE!K1E%ޮd/B,,硬y8l9~MITҼrʡzߗj>qЙIjW4j ʬ]jw.PUތ@9eM>Ͼȟ夃P 1}PiՕ.فM>J5ޠĉk e7мY>9T|:7t\LsDPљ@$>nY fi;&Qy-bs%!9`9Py6h_2WN`2whƤ1yd{u(PQm8Z 1bh{YESlF2wXlCha QhopQQ' ɀ;:4s'5zC g}e; m *d߈tBˈe$}˜Њu3?{@3;L ݨ;x͂й^ƉHN2Q{4CCYY*ݳ; "en9|mJ;%gU^%@2  McfV|3g3zpT)I1 9$T= sO9,4=bfZʟf;F1d@b!fi[aΉh o֔F:@NOFok"K**8O&'Pb }]N^e n1i> f6D{ÓmsZܧUkC7b1U'v+(Bg%HmNKgC?e{@`hX>U6H]Q1n,44* Toff%0M%K`}ƘՐ}]OD_txCu`KwGk Dq Tƙ?rnھ)h9ղm@[4h~kb4pN]mv̕(]෠UNAs~fUN _}yP#xJ<"9 L-XR Q4(?(ZmsZP}(8x;6+˷W_ŁIp3"4#ڷ:η?'~e eR:APiF!CY4fL.<YĂ]ݼP]G5As;:ǭ@iw`s>CQF( "Eu6F e6eP 6[|x mbr֒فn}x+]&u^br)iX"zkBoI`#j3Y;\ƌ2,z+ W-Pt⽟+b'4(y! w[.EBA_w 6Dc!v!#sDtOk^ڬ|rhqa7g$T" gf+Oaotm^]}ݾ ՜[1Tf(PGhVׅ(Cs=^-M7?@}Eg@e*h:.h$* e% c4m~(Psf,#\ Eg*?Ĥ'+ nFCOşjMhYq[70ZTR;eJSU*SԂfV#Ti!t1*h11Ё2XL=k$hɹnTDs;sPCu磌Ă=[0+Psbc B dn;p9:y tS:Ҡp󁟒hmĠ S,XAZ\>jy(0+t-棁uCK[fI` &mLY9UjDuikSDa43DҳAhz4Rqg6mCZ|HkGfeߡLJP棙20q3QA~fe;a)ZO]؎΄H# 7F(0osR0ZaX~; }C=Ϣz4wy .4mD[~ bYyF`R4}u!h C^uAVMV#sQIhĊ07ҁB֣ ݐ>b) Y]zYU45/fճ,X*j${Q?@`d ?ֶgi g*mQJj4aƄ&}K&1XV:*3@VЍkPF4m6Y&BfeQ'idVJ[F09hIՔbռMh,mo!݆D2!1佄wyͬ:!J 7vo$ꬫ-`6Kx 2Y eT ,rICHHfV+ B`yVJݢZV0;h*u:;/Ny1%|~Y6+g~z0),p p6i0[YV&J}y KPof [݆zτaVu5z,ث Zm*u>1M"PyXCaC|8Z30g7<Cun:<9-]h+ͪ:s(請hP(1_ׄ.dAcCu.wyg r mTqł嵤gQpc3 abe" 2ڼ(u&7߾ I_۬uͥn1tˣ,D1T4`7 K pj*,oEsɾި,4e}Q;7f˜dgl$|md{%hWR5CMbAdmYCAuK؎f/EkS վ^~ xi:tt4.|Xn%X؁V %ߧ>zCY `VhUhn~,[NB |ނv& =;3eAS]=Gۉݾuʬ jHO+Wqsxk38o@,3Q<#B*,Ǧ/;0WMc ~>Cϭh$&D #хo5h'4G`܄#ƢllT[%#QEUq5( -c; tZ}n߉no#-a9,Cyyw㭚UҰe@ eEEٸo^3˱gN|/EkPf6`!vlyHD񥐒u=\ߚY}p)KHR0@/"$Y0&zQVx皳VJTŇY_$(&ǠXPVن`95z_Ry_=uJ73ε>g/>̬%(9w:JTn1)tM3yWx=I  jf(L Y)InB3q +~mXց2˿"ڝ X='z7p9=ZЁ:PY_[F0Y+I>)p%p.s[{(30D4OUbsl1H$F'Xk/RaeyˀO`-xNRP^ tiYrY)$;|09tjр|}Q܁fO_びT/?@s\ (333+D<e?ml弥GEKJ p633N@`o <|x22t0?^]̪J6pv( GO6m[Ш9ot+̑fffUfaxT# Cp7Wev>P2*Qn> \L6$%м~U >0eX4aGدT*$։ZGף9~U,10eI=bTvJm!P_spʄ/f2Tc/Yuhyhe>hoBS_:u\AA+Q܎Vy%Zn?bـVR|:zlMQ2 ]jZ@y#x3/J6x?68`63^sfL;xշX'.@A(hȸ4.AJ dwV(=f2~v*ɸ6tC̬"J>@'L@Qݦ!F@GQyCh%hM=AT)n@7 Ǡ`0yT`rUWATffria2ʚuxߏJ* M;TIJ5@&4br^QH~T}}{vnU4znYUA04?eDd(k/xEe*(r\[(p *̻ $6~Oe28x NffP{2DcX,: xIFb 7*|e$b>oC2akG9ݝtꁓh3š2DvaD1J*nfZFZ 2,2H'*@ߎ?ͺ[( } |xF;{z2tC̊S Z mMne,4IAWwj9@b@ا>J7@姫ǣQ|0pHCB6:؉Q,YZ=𵫥w2ǢiЁ~} ; wr\U,K:}^Fv^vEYG{޹'r5 `1ԎV hFCo!ȡ󒧎ˎ:pTkVWH9f(>2Ci^F:=@G-4͢eP`vLE8m |͏ 8H)2xp}x=*w8x Q67%AB7,eѴY6 Z#X XEFs܇ C5?$ҍKI; Ͳ=Qe]i(rOEꝻ{P!h]HSP,61Bb b PRIJ!MؒͭhM ˉ+PW.s[ Z|H_Y"4/T<Ai08"ײaghEwʥAUQ'X(&s[;vhRFA,ffiz`Z 3ss4ܲcBhSr6%~ 7+Q,~B+h"\! nȅI"u/@5˖u='w%AAʇKYn |xE0'n#^̪RžBoA2 hP֢2̊Ő co`*%BMl߭JyHl: ͬ:m'zD22=v<923TFws^1؇@AˉҋYhs} 13 u2X63K p2\t9x3X+M7B F31`9VyҏGl2;;8:[hf֛Մ5 xdt2ʆ Vo;>%+r9Xmr9x0TyQ.U743Kt,fTi>xNi"p*o4d()sNu,,'}*p~ s=Z2TMofF;Э^xN@Zz`f^Ѹfg¬[x$qX8 \r0}˱->cb4KwIh)ay63`IFD根Ssci4/eWaM$``%s~Drfh:]?FgZNKGU|%fkO]Bb?$pZY)H-jy:8)t# ˉlt45)ݴ53QkJey2/ 5z̬g 8/q|N0H(94KfJy P XNt{o5yd6ZbTjAfV֢* FCeAךJ/ރjNGp[\`9~2؆P| &w"!®Գx<613+&vx:t{b_IV%?8Mz=Ze ܆bp8;`GHB<;\ ~J M1s7xҖ)MlolRpW6[o02|XKu*^-Vr(Ӏ7nH!b4F.5Q}CA@<mWW$4'>Бd%-h{uFff}@l[! ]oM9sP Mty\7oe->9`vGIGmsZSyoyS!ȑO+P|v[9߁C{ff4(9 g\2eARž_NQx2ǀ-iHB&]-/hBWBWiPFmwע[kz3x.)GWggG5fZ2FԙU,x K[7x -aNTo䷣z"? c 0: %[ƅ~V1+I u*0'aQ66*]~ eG"JBZЕbtȇK͛/ZLeh4TnAN,m!zDq(X- MM: MGYmوx(*0'⭙ь4|]91PNjDhd0FP`eۜ.4-(${ԡ#퍲#Bo"nAY3؎eeӀxB_ twN =GoYBFxwcЇN|+)i)73C'p3S1=Y%ʎ`ʃ m hx$TzƺOݘ!G2ͪk[e#ebVfP\c*}%0߈J0 ݡ?"cIT R,_fQmyn D-N&Z=ϬJuI LݘU_KxǢG+ۀ j@^hli6-eS> p DMn 3ݚ56qz0Ƴm1;P:ƕgAxFhXqw_vK0,[b#hv;fyP}@sCޮB],F- 0Z@2=!fwqFu#-NdYA[EH(YfP5g)](K#tCb Y',vȌ}UyhcK?mbfV"(Q3/tC2b!}l'Q˲% 82j?ʗfVUօnHʵ?A4!v^$9{Т2*8\ >tRztwqewnUizM+U4'UރgͬzMP a%r h?nSF@A..iPWɴ]SVY7r[plfU1"<;F"]bC M)4;m~ ZB'eqlf(ѷC+%!Vr' ֞ |]0Efm,} F4;єJe3n[|'f_A!>D|~-`qv XF~)p6h.BPbe3n]Tˀ`C7J+(ܯQf@K| A (J/e~4YZ* |cmnfV 8\ [v$9*7X]mAw>AJ.N-Ьwij ;MV&YPM%(xw[]aU&0j:TvtQF9 ? \F-}? MhEF,%֢>(P6މ4ZHdWS[qӨ>@[5 pۜZ ,uo6E6W1ff8lA% E_Du`(M*bP;.#M{.03tʨM{1*Mݔu42Kos$eW lE;7ffh緣B֠kX3_;M%}e e @ e(Af9)+g4hG擁݁ѡ7@,CӶSM"4O|8p#>'Զľ_!l`XvV}=mJ,n)oCwN*w[؂ '1_hZwQ\\lڦYVye+\~zDG煟 hCY5ݟ$beQ,d/vD%&FSí߇lrej;PV'h? 84ZNX\~xb@e'ϡ We*ݙ :QYIIF bZt,t.i&%AږQ|5:? ufk'u]!95?8g)ľZڇ|tQ?c(Leg*vEEЇb@fc+Qb*3Y朾 x2'= ڬNIz^s1* {uxr)P<{`':VDBrt.98ݽ L NԏEQ17FʟxoO vOF_xua>@S6GP|_wgW^/mAbT>;z;QQWօ:.4{^so36[%j iVyv%P&46 i>x RBv܋>Gw=HfeEֲtt#J0#о?01ބ(pq5c Gr-1(<uv(<wF.^oW[ݸux[Q(z,@E[O,ÁY(pCw.'-(7 G&#y>zxB( 'Q_.>4+{=0 J C>?Rо>؋x،bQP|:VgyOeW$pΫ=c 0uv `+;Y̪Y/c ,vE{q9TY(XNԕa^P<8|sl`doD27=|C5+>x3ZRzz97=f瓇.#l%tEXtdate:create2022-05-24T11:00:10+00:00x%tEXtdate:modify2022-05-24T11:00:10+00:00%~YtEXtSoftwareAdobe ImageReadyqe<IENDB`adios4dolfinx-0.9.3/docs/meshtags.py000066400000000000000000000064301477331607400174260ustar00rootroot00000000000000# # Writing MeshTags data to a checkpoint file # In many scenarios, the mesh used in a checkpoint is not trivial, and subdomains and sub-entities # have been tagged with appropriate markers. # As the mesh gets redistributed when read # (see [Writing Mesh Checkpoint](./writing_mesh_checkpoint)), # we need to store any tags together with this new mesh. # As an example we will use a unit-cube, where each entity has been tagged with a unique index. import logging from pathlib import Path from mpi4py import MPI import dolfinx import ipyparallel as ipp import numpy as np import adios4dolfinx assert MPI.COMM_WORLD.size == 1, "This example should only be run with 1 MPI process" mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, nx=3, ny=4, nz=5) # We start by computing the unique global index of each (owned) entity in the mesh # as well as its corresponding midpoint entity_midpoints = {} meshtags = {} for i in range(mesh.topology.dim + 1): mesh.topology.create_entities(i) e_map = mesh.topology.index_map(i) # Compute midpoints of entities entities = np.arange(e_map.size_local, dtype=np.int32) mesh.topology.create_connectivity(i, mesh.topology.dim) entity_midpoints[i] = dolfinx.mesh.compute_midpoints(mesh, i, entities) # Associate each local index with its global index values = np.arange(e_map.size_local, dtype=np.int32) + e_map.local_range[0] meshtags[i] = dolfinx.mesh.meshtags(mesh, i, entities, values) # We use adios4dolfinx to write the mesh and meshtags to file. # We associate each meshtag with a name filename = Path("mesh_with_meshtags.bp") adios4dolfinx.write_mesh(filename, mesh) for i, tag in meshtags.items(): adios4dolfinx.write_meshtags(filename, mesh, tag, meshtag_name=f"meshtags_{i}") # Next we want to read the meshtags in on a different number of processes, # and check that the midpoints of each entity is still correct def verify_meshtags(filename: Path): # We assume that entity_midpoints have been sent to the engine from mpi4py import MPI import dolfinx import numpy as np import adios4dolfinx read_mesh = adios4dolfinx.read_mesh(filename, MPI.COMM_WORLD) prefix = f"{read_mesh.comm.rank + 1}/{read_mesh.comm.size}: " for i in range(read_mesh.topology.dim + 1): # Read mesh from file meshtags = adios4dolfinx.read_meshtags(filename, read_mesh, meshtag_name=f"meshtags_{i}") # Compute midpoints for all local entities on process read_mesh.topology.create_connectivity(i, read_mesh.topology.dim) midpoints = dolfinx.mesh.compute_midpoints(read_mesh, i, meshtags.indices) # Compare locally computed midpoint with reference data for global_pos, midpoint in zip(meshtags.values, midpoints): np.testing.assert_allclose( entity_midpoints[i][global_pos], midpoint, err_msg=f"{prefix}: Midpoint ({i, global_pos}) do not match", ) print(f"{prefix} Matching of all entities of dimension {i} successful") with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: cluster[:].push({"entity_midpoints": entity_midpoints}) query = cluster[:].apply_async(verify_meshtags, filename) query.wait() assert query.successful(), query.error print("".join(query.stdout)) adios4dolfinx-0.9.3/docs/original_checkpoint.py000066400000000000000000000107621477331607400216310ustar00rootroot00000000000000# # Checkpoint on input mesh # As we have discussed earlier, one can choose to store function data in a way that # is N-to-M compatible by using `adios4dolfinx.write_checkpoint`. # This stores the distributed mesh in it's current (partitioned) ordering, and does # use the original input data ordering for the cells and connectivity. # This means that you cannot use your original mesh (from `.xdmf` files) or mesh tags # together with the checkpoint. The checkpoint has to store the mesh and associated # mesh-tags. # An optional way of store an N-to-M checkpoint is to store the function data in the same # ordering as the mesh. The write operation will be more expensive, as it requires data # communication to ensure contiguous data being written to the checkpoint. # The method is exposed as `adios4dolfinx.write_function_on_input_mesh`. # Below we will demonstrate this method. import logging from pathlib import Path from typing import Tuple import ipyparallel as ipp def locate_facets(x, tol=1.0e-12): return abs(x[0]) < tol def create_xdmf_mesh(filename: Path): from mpi4py import MPI import dolfinx mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 10, 10) facets = dolfinx.mesh.locate_entities_boundary(mesh, mesh.topology.dim - 1, locate_facets) facet_tag = dolfinx.mesh.meshtags(mesh, mesh.topology.dim - 1, facets, 1) facet_tag.name = "FacetTag" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, filename.with_suffix(".xdmf"), "w") as xdmf: xdmf.write_mesh(mesh) xdmf.write_meshtags(facet_tag, mesh.geometry) print(f"{mesh.comm.rank + 1}/{mesh.comm.size} Mesh written to {filename.with_suffix('.xdmf')}") mesh_file = Path("MyMesh.xdmf") with ipp.Cluster(engines="mpi", n=4, log_level=logging.ERROR) as cluster: # Create a mesh and write to XDMFFile cluster[:].push({"locate_facets": locate_facets}) query = cluster[:].apply_async(create_xdmf_mesh, mesh_file) query.wait() assert query.successful(), query.error print("".join(query.stdout)) # Next, we will create a function on the mesh and write it to a checkpoint. def f(x): return (x[0] + x[1]) * (x[0] < 0.5), x[1], x[2] - x[1] def write_function( mesh_filename: Path, function_filename: Path, element: Tuple[str, int, Tuple[int,]] ): from mpi4py import MPI import dolfinx import adios4dolfinx with dolfinx.io.XDMFFile(MPI.COMM_WORLD, mesh_filename, "r") as xdmf: mesh = xdmf.read_mesh() V = dolfinx.fem.functionspace(mesh, element) u = dolfinx.fem.Function(V) u.interpolate(f) adios4dolfinx.write_function_on_input_mesh( function_filename.with_suffix(".bp"), u, mode=adios4dolfinx.adios2_helpers.adios2.Mode.Write, time=0.0, name="Output", ) print( f"{mesh.comm.rank + 1}/{mesh.comm.size} Function written to ", f"{function_filename.with_suffix('.bp')}", ) # Read in mesh and write function to file element = ("DG", 4, (3,)) function_file = Path("MyFunction.bp") with ipp.Cluster(engines="mpi", n=2, log_level=logging.ERROR) as cluster: cluster[:].push({"f": f}) query = cluster[:].apply_async(write_function, mesh_file, function_file, element) query.wait() assert query.successful(), query.error print("".join(query.stdout)) # Finally, we will read in the mesh from file and the function from the checkpoint # and compare it with the analytical solution. def verify_checkpoint( mesh_filename: Path, function_filename: Path, element: Tuple[str, int, Tuple[int,]] ): from mpi4py import MPI import dolfinx import numpy as np import adios4dolfinx with dolfinx.io.XDMFFile(MPI.COMM_WORLD, mesh_filename, "r") as xdmf: in_mesh = xdmf.read_mesh() V = dolfinx.fem.functionspace(in_mesh, element) u_in = dolfinx.fem.Function(V) adios4dolfinx.read_function(function_filename.with_suffix(".bp"), u_in, time=0.0, name="Output") # Compute exact interpolation u_ex = dolfinx.fem.Function(V) u_ex.interpolate(f) np.testing.assert_allclose(u_in.x.array, u_ex.x.array) print( "Successfully read checkpoint onto mesh on rank ", f"{in_mesh.comm.rank + 1}/{in_mesh.comm.size}", ) # Verify checkpoint by comparing to exact solution with ipp.Cluster(engines="mpi", n=5, log_level=logging.ERROR) as cluster: cluster[:].push({"f": f}) query = cluster[:].apply_async(verify_checkpoint, mesh_file, function_file, element) query.wait() assert query.successful(), query.error print("".join(query.stdout)) adios4dolfinx-0.9.3/docs/partitioned_mesh.py000066400000000000000000000061061477331607400211510ustar00rootroot00000000000000# # Storing mesh partition # This data is re-ordered when reading in a mesh, as the mesh is partitioned. # This means that when storing the mesh to disk from DOLFINx, the geometry and # connectivity arrays are re-ordered. # If we want to avoid to re-partition the mesh every time you run a simulation # (on a fixed number of processes), one can store the partitioning of the mesh # in the checkpoint. import logging from pathlib import Path import ipyparallel as ipp def write_partitioned_mesh(filename: Path): import subprocess from mpi4py import MPI import dolfinx import adios4dolfinx # Create a simple unit square mesh mesh = dolfinx.mesh.create_unit_square( MPI.COMM_WORLD, 10, 10, cell_type=dolfinx.mesh.CellType.quadrilateral, ghost_mode=dolfinx.mesh.GhostMode.shared_facet, ) # Write mesh checkpoint adios4dolfinx.write_mesh(filename, mesh, engine="BP4", store_partition_info=True) # Inspect checkpoint on rank 0 with `bpls` if mesh.comm.rank == 0: output = subprocess.run(["bpls", "-a", "-l", filename], capture_output=True) print(output.stdout.decode("utf-8")) # We inspect the partitioned mesh mesh_file = Path("partitioned_mesh.bp") n = 3 with ipp.Cluster(engines="mpi", n=n, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(write_partitioned_mesh, mesh_file) query.wait() assert query.successful(), query.error print("".join(query.stdout)) # # Reading a partitioned mesh # If we try to read the mesh in on a different number of processes, we will get an error def read_partitioned_mesh(filename: Path, read_from_partition: bool = True): from mpi4py import MPI import adios4dolfinx prefix = f"{MPI.COMM_WORLD.rank + 1}/{MPI.COMM_WORLD.size}: " try: mesh = adios4dolfinx.read_mesh( filename, comm=MPI.COMM_WORLD, engine="BP4", read_from_partition=read_from_partition ) print(f"{prefix} Mesh: {mesh.name} read successfully with {read_from_partition=}") except ValueError as e: print(f"{prefix} Caught exception: ", e) with ipp.Cluster(engines="mpi", n=n + 1, log_level=logging.ERROR) as cluster: # Read mesh from file with different number of processes query = cluster[:].apply_async(read_partitioned_mesh, mesh_file) query.wait() assert query.successful(), query.error print("".join(query.stdout)) # Read mesh from file with different number of processes (not using partitioning information). with ipp.Cluster(engines="mpi", n=n + 1, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, False) query.wait() assert query.successful(), query.error print("".join(query.stdout)) # Read mesh from file with same number of processes as was written, # re-using partitioning information. with ipp.Cluster(engines="mpi", n=n, log_level=logging.ERROR) as cluster: query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, True) query.wait() assert query.successful(), query.error print("".join(query.stdout)) adios4dolfinx-0.9.3/docs/snapshot_checkpoint.py000066400000000000000000000045711477331607400216650ustar00rootroot00000000000000# # Snapshot checkpoint (non-persistent) # The checkpoint method described in [Writing function checkpoints](./writing_functions_checkpoint) # are *N-to-M*, meaning that you can write them out on N-processes and read them in on M processes. # # As discussed in that chapter, these checkpoints need to be associated with a mesh. # This is because the function is defined on a specific function space, which in turn is # defined on a specific mesh. # # However, there are certain scenarios where you simply want to store a checkpoint associated # with the current mesh, that should only be possible to use during this simulation. # An example use-case is when running an iterative solver, and wanting a fall-back mechanism that # does not require extra RAM. # In this example, we will demonstrate how to write a snapshot checkpoint to disk. # First we define a function `f` that we want to represent in the function space import logging from pathlib import Path import ipyparallel as ipp def f(x): import numpy as np return np.sin(x[0]) + 0.1 * x[1] # Next, we create a mesh and an appropriate function space and read and write from file def read_write_snapshot(filename: Path): from mpi4py import MPI import dolfinx import numpy as np import adios4dolfinx mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 3, 7, 4) V = dolfinx.fem.functionspace(mesh, ("Lagrange", 5)) u = dolfinx.fem.Function(V) u.interpolate(f) u.name = "Current_solution" # Next, we store the solution to file adios4dolfinx.snapshot_checkpoint(u, filename, adios4dolfinx.adios2_helpers.adios2.Mode.Write) # Next, we create a new function and load the solution into it u_new = dolfinx.fem.Function(V) u_new.name = "Read_solution" adios4dolfinx.snapshot_checkpoint( u_new, filename, adios4dolfinx.adios2_helpers.adios2.Mode.Read ) # Next, we verify that the solution is correct np.testing.assert_allclose(u_new.x.array, u.x.array) print(f"{MPI.COMM_WORLD.rank + 1}/{MPI.COMM_WORLD.size}: Successfully wrote and read snapshot") mesh_file = Path("snapshot.bp") with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: cluster[:].push({"f": f}) query = cluster[:].apply_async( read_write_snapshot, mesh_file, ) query.wait() assert query.successful(), query.error print("".join(query.stdout)) adios4dolfinx-0.9.3/docs/time_dependent_mesh.py000066400000000000000000000060711477331607400216140ustar00rootroot00000000000000# # Time-dependent mesh checkpoints # As we have seen in the previous examples, we store information about the connectivity, # the coordinates of the mesh nodes, # as well as a reference element. Note that the only thing that can change for a mesh # during a simulation are the coordinate of the mesh nodes. # In the following example, we will demonstrate how to write a time-dependent mesh # checkpoint to disk. # First, we create a simple function to compute the volume of a mesh import logging from pathlib import Path from mpi4py import MPI import ipyparallel as ipp import adios4dolfinx def compute_volume(mesh, time_stamp): from mpi4py import MPI import dolfinx import ufl # Compute the volume of the mesh vol_form = dolfinx.fem.form(1 * ufl.dx(domain=mesh)) vol_local = dolfinx.fem.assemble_scalar(vol_form) vol_glob = mesh.comm.allreduce(vol_local, op=MPI.SUM) if mesh.comm.rank == 0: print(f"{mesh.comm.rank + 1}/{mesh.comm.size} Time: {time_stamp} Mesh Volume: {vol_glob}") def write_meshes(filename: Path): from mpi4py import MPI import dolfinx import numpy as np import adios4dolfinx # Create a unit cube mesh = dolfinx.mesh.create_unit_cube( MPI.COMM_WORLD, 3, 6, 5, cell_type=dolfinx.mesh.CellType.hexahedron, ghost_mode=dolfinx.mesh.GhostMode.shared_facet, ) # Write mesh to file, associated with time stamp 1.5 adios4dolfinx.write_mesh(filename, mesh, engine="BP4", time=1.5) compute_volume(mesh, 1.5) mesh.geometry.x[:, 0] += 0.1 * mesh.geometry.x[:, 0] mesh.geometry.x[:, 1] += 0.3 * mesh.geometry.x[:, 1] * np.sin(mesh.geometry.x[:, 2]) compute_volume(mesh, 3.3) # Write mesh to file, associated with time stamp 3.3 # Note that we set the mode to append, as we have already created the file # and we do not want to overwrite the existing data adios4dolfinx.write_mesh( filename, mesh, engine="BP4", time=3.3, mode=adios4dolfinx.adios2_helpers.adios2.Mode.Append ) # We write the sequence of meshes to file mesh_file = Path("timedep_mesh.bp") n = 3 with ipp.Cluster(engines="mpi", n=n, log_level=logging.ERROR) as cluster: # Write mesh to file cluster[:].push({"compute_volume": compute_volume}) query = cluster[:].apply_async(write_meshes, mesh_file) query.wait() assert query.successful(), query.error print("".join(query.stdout)) # # Reading a time dependent mesh # The only thing we need to do to read the mesh is to send in the associated time stamp. second_mesh = adios4dolfinx.read_mesh(mesh_file, comm=MPI.COMM_WORLD, engine="BP4", time=3.3) compute_volume(second_mesh, 3.3) first_mesh = adios4dolfinx.read_mesh(mesh_file, comm=MPI.COMM_WORLD, engine="BP4", time=1.5) compute_volume(first_mesh, 1.5) # We observe that the volume of the mesh has changed, as we have perturbed the mesh # between the two time stamps. # We also note that we can read the meshes in on a different number of processes than # we wrote them with and in a different order (as long as the time stamps are correct). adios4dolfinx-0.9.3/docs/writing_functions_checkpoint.py000066400000000000000000000045621477331607400236010ustar00rootroot00000000000000# # Writing a function checkpoint # In the previous sections, we have gone in to quite some detail as to how # to store meshes with adios4dolfinx. # This section will explain how to store functions, and how to read them back in. # We start by creating a mesh and an appropriate function import logging from pathlib import Path from mpi4py import MPI import dolfinx import ipyparallel as ipp import adios4dolfinx assert MPI.COMM_WORLD.size == 1, "This example should only be run with 1 MPI process" mesh = dolfinx.mesh.create_unit_square( MPI.COMM_WORLD, nx=10, ny=10, cell_type=dolfinx.cpp.mesh.CellType.quadrilateral ) # Next, we create a function, and interpolate a polynomial function into the function space el = "N1curl" degree = 3 V = dolfinx.fem.functionspace(mesh, (el, degree)) def f(x): return -(x[1] ** 2), x[0] - 2 * x[1] u = dolfinx.fem.Function(V) u.interpolate(f) # For the checkpointing, we start by storing the mesh to file filename = Path("function_checkpoint.bp") adios4dolfinx.write_mesh(filename, mesh) # Next, we store the function to file, and associate it with a name. # Note that we can also associate a time stamp with it, as done for meshes in # [Writing time-dependent mesh checkpoint](./time_dependent_mesh) adios4dolfinx.write_function(filename, u, time=0.3, name="my_curl_function") # Next, we want to read the function back in (using multiple MPI processes) # and check that the function is correct. def read_function(filename: Path, timestamp: float): from mpi4py import MPI import dolfinx import numpy as np import adios4dolfinx in_mesh = adios4dolfinx.read_mesh(filename, MPI.COMM_WORLD) W = dolfinx.fem.functionspace(in_mesh, (el, degree)) u_ref = dolfinx.fem.Function(W) u_ref.interpolate(f) u_in = dolfinx.fem.Function(W) adios4dolfinx.read_function(filename, u_in, time=timestamp, name="my_curl_function") np.testing.assert_allclose(u_ref.x.array, u_in.x.array, atol=1e-14) print( f"{MPI.COMM_WORLD.rank + 1}/{MPI.COMM_WORLD.size}: ", f"Function read in correctly at time {timestamp}", ) with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: cluster[:].push({"f": f, "el": el, "degree": degree}) query = cluster[:].apply_async(read_function, filename, 0.3) query.wait() assert query.successful(), query.error print("".join(query.stdout)) adios4dolfinx-0.9.3/docs/writing_mesh_checkpoint.py000066400000000000000000000112471477331607400225230ustar00rootroot00000000000000# # Writing a mesh checkpoint # # In this example, we will demonstrate how to write a mesh checkpoint to disk. # # We start by creating a simple unit-square mesh. import logging from pathlib import Path from mpi4py import MPI import dolfinx import ipyparallel as ipp mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 10, 10) # Note that when a mesh is created in DOLFINx, we send in an MPI communicator. # The communicator is used to partition (distribute) the mesh across the available processes. # This means that each process only have access to a sub-set of cells and nodes of the mesh. # We can inspect these with the following commands: def print_mesh_info(mesh: dolfinx.mesh.Mesh): cell_map = mesh.topology.index_map(mesh.topology.dim) node_map = mesh.geometry.index_map() print( f"Rank {mesh.comm.rank}: number of owned cells {cell_map.size_local}", f", number of ghosted cells {cell_map.num_ghosts}\n", f"Number of owned nodes {node_map.size_local}", f", number of ghosted nodes {node_map.num_ghosts}", ) print_mesh_info(mesh) # ## Create a distributed mesh # Next, we can use IPython parallel to inspect a partitioned mesh. # We create a convenience function for creating a mesh that shares cells on the boundary # between two processes if `ghosted=True`. def create_distributed_mesh(ghosted: bool, N: int = 10): """ Create a distributed mesh with N x N cells. Share cells on process boundaries if ghosted is set to True """ from mpi4py import MPI import dolfinx ghost_mode = dolfinx.mesh.GhostMode.shared_facet if ghosted else dolfinx.mesh.GhostMode.none mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, N, N, ghost_mode=ghost_mode) print(f"{ghost_mode=}") print_mesh_info(mesh) # Next we start up a new cluster with three engines. # As we defined `print_mesh_info` locally on this process, we need to push it to all engines. with ipp.Cluster(engines="mpi", n=3, log_level=logging.ERROR) as cluster: # Push print_mesh_info to all engines cluster[:].push({"print_mesh_info": print_mesh_info}) # Create mesh with ghosted cells query_true = cluster[:].apply_async(create_distributed_mesh, True) query_true.wait() assert query_true.successful(), query_true.error print("".join(query_true.stdout)) # Create mesh without ghosted cells query_false = cluster[:].apply_async(create_distributed_mesh, False) query_false.wait() assert query_false.successful(), query_false.error print("".join(query_false.stdout)) # ## Writing a mesh checkpoint # The input data to a mesh is: # - A geometry: the set of points in R^D that are part of each cell # - A two-dimensional connectivity array: A list that indicates which nodes of the geometry # is part of each cell # - A reference element: Used for push data back and forth from the reference element and # computing Jacobians # We now use adios4dolfinx to write a mesh to file. def write_mesh(filename: Path): import subprocess from mpi4py import MPI import dolfinx import adios4dolfinx # Create a simple unit square mesh mesh = dolfinx.mesh.create_unit_square( MPI.COMM_WORLD, 10, 10, cell_type=dolfinx.mesh.CellType.quadrilateral ) # Write mesh checkpoint adios4dolfinx.write_mesh(filename, mesh, engine="BP4") # Inspect checkpoint on rank 0 with `bpls` if mesh.comm.rank == 0: output = subprocess.run(["bpls", "-a", "-l", str(filename.absolute())], capture_output=True) print(output.stdout.decode("utf-8")) mesh_file = Path("mesh.bp") with ipp.Cluster(engines="mpi", n=2, log_level=logging.ERROR) as cluster: # Write mesh to file query = cluster[:].apply_async(write_mesh, mesh_file) query.wait() assert query.successful(), query.error print("".join(query.stdout)) # We observe that we have stored all the data needed to re-create the mesh in the file `mesh.bp`. # We can therefore read it (to any number of processes) with `adios4dolfinx.read_mesh` def read_mesh(filename: Path): from mpi4py import MPI import dolfinx import adios4dolfinx mesh = adios4dolfinx.read_mesh( filename, comm=MPI.COMM_WORLD, engine="BP4", ghost_mode=dolfinx.mesh.GhostMode.none ) print_mesh_info(mesh) # ## Reading mesh checkpoints (N-to-M) # We can now read the checkpoint on a different number of processes than we wrote it on. with ipp.Cluster(engines="mpi", n=4, log_level=logging.ERROR) as cluster: # Write mesh to file cluster[:].push({"print_mesh_info": print_mesh_info}) query = cluster[:].apply_async(read_mesh, mesh_file) query.wait() assert query.successful(), query.error print("".join(query.stdout)) adios4dolfinx-0.9.3/joss-paper/000077500000000000000000000000001477331607400163715ustar00rootroot00000000000000adios4dolfinx-0.9.3/joss-paper/README.md000066400000000000000000000002161477331607400176470ustar00rootroot00000000000000# How to generate paper ```python docker run --rm --volume $(pwd):/data --user $(id -u):$(id -g) --env JOURNAL=joss openjournals/inara ``` adios4dolfinx-0.9.3/joss-paper/joss-checklist.md000066400000000000000000000062601477331607400216440ustar00rootroot00000000000000### Conflict of interest - [x] I confirm that I have read the [JOSS conflict of interest policy](https://joss.readthedocs.io/en/latest/submitting.html#conflict-of-interest-policy-for-authors) and that: I have no COIs with reviewing this work or that any perceived COIs have been waived by JOSS for the purpose of this review. ### Code of Conduct - [x] I confirm that I read and will adhere to the [JOSS code of conduct](https://joss.theoj.org/about#code_of_conduct). ### General checks - [x] **Repository:** Is the source code for this software available at the repository url? - [x] **License:** Does the repository contain a plain-text LICENSE file with the contents of an [OSI approved](https://opensource.org/licenses/alphabetical) software license? - [x] **Contribution and authorship:** Has the submitting author made major contributions to the software? Does the full list of paper authors seem appropriate and complete? ### Functionality - [x] **Installation:** Does installation proceed as outlined in the documentation? - [x] **Functionality:** Have the functional claims of the software been confirmed? - [ ] **Performance:** If there are any performance claims of the software, have they been confirmed? (If there are no claims, please check off this item.) ### Documentation - [x] **A statement of need:** Do the authors clearly state what problems the software is designed to solve and who the target audience is? - [x] **Installation instructions:** Is there a clearly-stated list of dependencies? Ideally these should be handled with an automated package management solution. - [x] **Example usage:** Do the authors include examples of how to use the software (ideally to solve real-world analysis problems). - [x] **Functionality documentation:** Is the core functionality of the software documented to a satisfactory level (e.g., API method documentation)? - [x] **Automated tests:** Are there automated tests or manual steps described so that the functionality of the software can be verified? - [x] **Community guidelines:** Are there clear guidelines for third parties wishing to 1) Contribute to the software 2) Report issues or problems with the software 3) Seek support ### Software paper - [x] **Summary:** Has a clear description of the high-level functionality and purpose of the software for a diverse, non-specialist audience been provided? - [x] **A statement of need:** Does the paper have a section titled 'Statement of need' that clearly states what problems the software is designed to solve, who the target audience is, and its relation to other work? - [x] **State of the field:** Do the authors describe how this software compares to other commonly-used packages? - [x] **Quality of writing:** Is the paper well written (i.e., it does not require editing for structure, language, or writing quality)? - [x] **References:** Is the list of references complete, and is everything cited appropriately that should be cited (e.g., papers, datasets, software)? Do references in the text use the proper [citation syntax]( https://rmarkdown.rstudio.com/authoring_bibliographies_and_citations.html#citation_syntax)? adios4dolfinx-0.9.3/joss-paper/paper.bib000066400000000000000000000065341477331607400201660ustar00rootroot00000000000000@unpublished{Baratta:2023, author = {Baratta, Igor A. and Dean, Joseph P. and Dokken, Jørgen S. and Habera, Michal and Hale, Jack and Richardson, Chris N. and Rognes, Marie E. and Scroggs, Matthew W. and Sime, Nathan and Wells, Garth N.}, title = {DOLFINx: The next generation FEniCS problem solving environment}, language = {English}, year = {2023}, doi = {10.5281/zenodo.10447666} } @article{Godoy:2020, title = {ADIOS 2: The Adaptable Input Output System. A framework for high-performance data management}, journal = {SoftwareX}, volume = {12}, pages = {100561}, year = {2020}, issn = {2352-7110}, doi = {10.1016/j.softx.2020.100561}, author = {William F. Godoy and Norbert Podhorszki and Ruonan Wang and Chuck Atkins and Greg Eisenhauer and Junmin Gu and Philip Davis and Jong Choi and Kai Germaschewski and Kevin Huck and Axel Huebl and Mark Kim and James Kress and Tahsin Kurc and Qing Liu and Jeremy Logan and Kshitij Mehta and George Ostrouchov and Manish Parashar and Franz Poeschel and David Pugmire and Eric Suchyta and Keichi Takahashi and Nick Thompson and Seiji Tsutsumi and Lipeng Wan and Matthew Wolf and Kesheng Wu and Scott Klasky} } @conference{Habera:2018, author = {Habera, Michal and Zilian, Andreas and Hale, Jack and Richardson, Chris N. and Blechta, Jan and Dave, Demarle}, year = {2018}, title = {{XDMF and ParaView: checkpointing format}}, booktitle = {{FEniCS Conference 2018: Book of Abstracts}}, url = {https://hdl.handle.net/10993/35848} } @misc{Ham:2024, title = {Efficient N-to-M Checkpointing Algorithm for Finite Element Simulations}, author = {David A. Ham and Vaclav Hapla and Matthew G. Knepley and Lawrence Mitchell and Koki Sagiyama}, year = {2024}, eprint = {2401.05868}, archiveprefix = {arXiv}, doi = {10.48550/arXiv.2401.05868} } @article{Rathgeber:2016, author = {Rathgeber, Florian and Ham, David A. and Mitchell, Lawrence and Lange, Michael and Luporini, Fabio and Mcrae, Andrew T. T. and Bercea, Gheorghe-Teodor and Markall, Graham R. and Kelly, Paul H. J.}, title = {Firedrake: Automating the Finite Element Method by Composing Abstractions}, year = {2016}, issue_date = {September 2017}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, volume = {43}, number = {3}, issn = {0098-3500}, doi = {10.1145/2998441}, journal = {ACM Transactions on Mathematical Software}, month = {dec}, articleno = {24}, numpages = {27} } @article{Scroggs:2022, author = {Scroggs, Matthew W. and Dokken, J\o{}rgen S. and Richardson, Chris N. and Wells, Garth N.}, title = {Construction of Arbitrary Order Finite Element Degree-of-Freedom Maps on Polygonal and Polyhedral Cell Meshes}, year = {2022}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, volume = {48}, number = {2}, issn = {0098-3500}, doi = {10.1145/3524456}, journal = {ACM Transactions on Mathematical Software}, month = {may}, articleno = {18}, numpages = {23} } @misc{MPI-Forum:2012, author = {MPI-Forum}, year = {2012}, title = {{MPI: A Message-Passing Interface Standard. Version 3.0}}, url = {https://www.mpi-forum.org/docs/mpi-3.0/mpi30-report.pdf} }adios4dolfinx-0.9.3/joss-paper/paper.md000066400000000000000000000154711477331607400200320ustar00rootroot00000000000000--- title: "ADIOS4DOLFINx: A framework for checkpointing in FEniCS" tags: - Python - finite element simulations - checkpointing authors: - name: Jørgen Schartum Dokken orcid: 0000-0001-6489-8858 corresponding: true affiliation: 1 affiliations: - name: Simula Research Laboratory, Oslo, Norway index: 1 date: 30 April 2024 bibliography: paper.bib --- # Summary We introduce ADIOS4DOLFINx, a checkpointing framework for the latest version of the FEniCS project, known as DOLFINx. DOLFINx is a general framework for solving partial differential equations using the finite element method. The input to simulations using the finite element method is the computational domain (mesh), mesh markers, initial conditions, and boundary conditions. To be able to restart a simulation at any point, one has to have the capability to read in all of the aforementioned variables. The adios4dolfinx package implements all of these operations, using the Message Passing Interface (MPI) for communication across multiple processes and ADIOS2 for writing/reading data to/from file. In particular, the functionality of adios4dolfinx includes *N-to-M*-checkpointing, which means that one can store a result of a simulation that was generated with N number of processes, and read it into a program running on M processes. # Statement of need The ability to start, stop, and resume simulations is becoming increasingly important with the growing use of supercomputers for solving scientific and engineering problems. A rising number of large-scale problems are deployed on high-performance, distributed-memory computing systems and users tend to run more demanding simulations. These are often non-linear and time-dependent, which typically amounts to thousands of CPU hours. As it might uncover bugs and unphysical solutions, the ability to run parts of the simulation, inspect the result, and then resume simulation becomes a key factor to enable efficient development. If this is discovered early on, the simulation can be terminated, saving the developer time, money and energy usage. ADIOS4DOLFINx enables users of the FEniCS project [@Baratta:2023] to store solutions during simulation, and read them in at their convenience to resume simulations at a later stage. Several checkpointing methods are implemented, including *N-to-M* checkpointing, which means saving data from a program executed with N processes, and loading it back in on M processes. Functionality for *N-to-M* checkpointing was implemented for the old version of DOLFIN by @Habera:2018. However, this functionality is not present in the newest version of the FEniCS Project [@Baratta:2023]. The storage principles in the ADIOS4DOLFINx are based on the ideas present in this implementation. However, the implementation for non-Lagrangian finite element spaces vastly differs, due to the usage of dof-permutations [@Scroggs:2022]. Additionally, all global MPI calls in the old implementation have been reimplemented with scalable MPI communication using the MPI-3 Neighborhood Collectives [@MPI-Forum:2012]. The framework introduces several new methods for storing partitioning information for *N-to-N* checkpointing with arbitrary ghosting, as well as very lightweight snapshot checkpoints. A similar framework for *N-to-M* checkpointing was implemented by @Ham:2024 for the finite element framework Firedrake [@Rathgeber:2016]. This frameworks differs from the one used in ADIOS4DOLFINx in several ways due to the different internal structures of DOLFINx and Firedrake. # Functionality The software is written as a Python-extension to DOLFINx, which can be installed using the Python Package installer `pip` directly from the GitHub repository or using the [ADIOS4DOLFINx](https://pypi.org/project/adios4dolfinx/) from the Python Package Index (PyPI). The following features are supported: - Snapshot checkpointing - *N-to-M* checkpointing with mesh storage - *N-to-M* checkpointing without mesh storage - *N-to-N* checkpointing storing partitioning information A *snapshot checkpoint* is a checkpoint that is only valid during the run of a simulation. It is lightweight (only storing the local portion of the global dof array to file), and is stored using the *Local Array* feature in ADIOS2 [@Godoy:2020] to store data local to the MPI process. This feature is intended for use cases where many solutions have to be aggregated to the end of a simulation to some post-processing step, or as a fall-back mechanism when restarting a diverging iterative solver. A *N-to-M* checkpoint is a checkpoint that can be written with N processes and read back in with M processes. Two versions of this checkpoint are supported: one where storage of the mesh is required and one without mesh storage. The reasoning for such a split is that when a mesh is read into DOLFINx and passed to an appropriate partitioner, the ordering mesh nodes (coordinates) and connectivity (cells) is changed. Writing these back into *global arrays* requires MPI communication to ensure contiguous writing of data. The *N-to-M* checkpoint with mesh storage exclusively writes contiguous chunks of data owned by the current process to an ADIOS2 *Global Array* that can be read in with a different number of processes at a later stage. This operation requires no MPI communication. In many cases, the input mesh might stem from an external mesh generator and is stored together with mesh entity markers in an external file, for instance an XDMF file. To avoid duplication of this mesh data, a standalone file that can be associated with the XDMF file for a later restart can be created. This method requires some MPI neighborhood collective calls to move data from the process that currently owns it to the relevant process for that stores it as a *Global Array* in contiguous chunks. Both *N-to-M* checkpoint routines use the same API to read in checkpoints at a later instance. In certain scenarios, mesh partitioning might be time consuming, as a developer is running the same problem over and over again with the same number of processes. As DOLFINx supports custom partitioning [@Baratta:2023], we use this feature to read in partition data from a previous run. As opposed to the checkpoints in the old version of DOLFIN, these checkpoints handle any ghosting, that being a custom ghosting provided by the user, or the shared-facet mode provided by DOLFINx. # Examples A large variety of examples covering all the functions in ADIOS4DOLFINx is available at [https://jorgensd.github.io/adios4dolfinx](https://jorgensd.github.io/adios4dolfinx). # Acknowledgements We acknowledge the valuable feedback on the documentation and manuscript by Thomas M. Surowiec and Halvor Herlyng and packaging support by Min Ragan-Kelley. Additionally, we acknowledge the scientific discussion regarding feature development and code contributions by Francesco Ballarin, Henrik N. Finsberg, and Nathan Sime. # References adios4dolfinx-0.9.3/pyproject.toml000066400000000000000000000027031477331607400172240ustar00rootroot00000000000000[build-system] # Require setuptool version due to https://github.com/pypa/setuptools/issues/2938 requires = ["setuptools>=61.0.0", "wheel"] [project] name = "adios4dolfinx" version = "0.9.3" description = "Checkpointing functionality for DOLFINx meshes/functions with ADIOS2" authors = [{ name = "Jørgen S. Dokken", email = "dokken@simula.no" }] license = { file = "LICENSE" } readme = "README.md" dependencies = ["fenics-dolfinx>=0.9.0", "packaging"] [project.optional-dependencies] test = ["pytest", "coverage", "ipyparallel"] dev = ["pdbpp", "ipython", "mypy", "ruff"] docs = ["jupyter-book", "ipyparallel", "ipywidgets", "jupytext"] all = ["adios4dolfinx[test,dev,docs]"] [tool.pytest.ini_options] addopts = ["--import-mode=importlib"] testpaths = ["tests"] [tool.mypy] ignore_missing_imports = true # Folders to exclude exclude = ["docs/", "build/"] # Folder to check with mypy files = ["src", "tests"] [tool.ruff] src = ["src", "tests", "docs"] line-length = 100 indent-width = 4 [tool.ruff.lint] select = [ # Pyflakes "F", # Pycodestyle "E", "W", # isort "I001", ] [tool.ruff.lint.isort] known-first-party = ["adios4dolfinx"] known-third-party = [ "basix", "dolfinx", "ffcx", "ufl", "gmsh", "numpy", "pytest", ] section-order = [ "future", "standard-library", "mpi", "third-party", "first-party", "local-folder", ] [tool.ruff.lint.isort.sections] "mpi" = ["mpi4py", "petsc4py"] adios4dolfinx-0.9.3/src/000077500000000000000000000000001477331607400150755ustar00rootroot00000000000000adios4dolfinx-0.9.3/src/adios4dolfinx/000077500000000000000000000000001477331607400176445ustar00rootroot00000000000000adios4dolfinx-0.9.3/src/adios4dolfinx/__init__.py000066400000000000000000000022401477331607400217530ustar00rootroot00000000000000# Copyright (C) 2023 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT """Top-level package for ADIOS2Wrappers.""" from importlib.metadata import metadata from .checkpointing import ( read_attributes, read_function, read_mesh, read_meshtags, read_timestamps, write_attributes, write_function, write_mesh, write_meshtags, ) from .legacy_readers import read_function_from_legacy_h5, read_mesh_from_legacy_h5 from .original_checkpoint import write_function_on_input_mesh, write_mesh_input_order from .snapshot import snapshot_checkpoint meta = metadata("adios4dolfinx") __version__ = meta["Version"] __author__ = meta.get("Author", "") __license__ = meta["License"] __email__ = meta["Author-email"] __program_name__ = meta["Name"] __all__ = [ "write_meshtags", "read_meshtags", "read_mesh", "write_mesh", "read_function_from_legacy_h5", "read_mesh_from_legacy_h5", "write_function", "read_function", "snapshot_checkpoint", "write_function_on_input_mesh", "write_mesh_input_order", "write_attributes", "read_attributes", "read_timestamps", ] adios4dolfinx-0.9.3/src/adios4dolfinx/adios2_helpers.py000066400000000000000000000235451477331607400231320ustar00rootroot00000000000000from __future__ import annotations from contextlib import contextmanager from pathlib import Path from typing import NamedTuple, Union from mpi4py import MPI import adios2 import dolfinx.cpp.graph import dolfinx.graph import numpy as np import numpy.typing as npt from .utils import compute_local_range, valid_function_types def resolve_adios_scope(adios2): scope = adios2.bindings if hasattr(adios2, "bindings") else adios2 if not scope.is_built_with_mpi: raise ImportError("ADIOS2 must be built with MPI support") return scope adios2 = resolve_adios_scope(adios2) """ Helpers reading/writing data with ADIOS2 """ __all__ = ["read_array", "read_adjacency_list", "read_cell_perms", "adios_to_numpy_dtype"] adios_to_numpy_dtype = { "float": np.float32, "double": np.float64, "float complex": np.complex64, "double complex": np.complex128, "uint32_t": np.uint32, } class AdiosFile(NamedTuple): io: adios2.IO file: adios2.Engine @contextmanager def ADIOSFile( adios: adios2.ADIOS, filename: Union[Path, str], engine: str, mode: adios2.Mode, io_name: str, ): io = adios.DeclareIO(io_name) io.SetEngine(engine) file = io.Open(str(filename), mode) try: yield AdiosFile(io=io, file=file) finally: file.Close() adios.RemoveIO(io_name) def read_cell_perms( adios: adios2.ADIOS, comm: MPI.Intracomm, filename: Union[Path, str], variable: str, num_cells_global: np.int64, engine: str, ) -> npt.NDArray[np.uint32]: """ Read cell permutation from file with given communicator, Split in continuous chunks based on number of cells in the mesh (global). Args: adios: The ADIOS instance comm: The MPI communicator used to read the data filename: Path to input file variable: Name of cell-permutation variable num_cells_global: Number of cells in the mesh (global) engine: Type of ADIOS engine to use for reading data Returns: Cell-permutations local to the process .. note:: No MPI communication is done during this call """ # Open ADIOS engine io_name = f"{variable=}_reader" with ADIOSFile( adios=adios, engine=engine, filename=filename, mode=adios2.Mode.Read, io_name=io_name, ) as adios_file: # Find step that has cell permutation for i in range(adios_file.file.Steps()): adios_file.file.BeginStep() if variable in adios_file.io.AvailableVariables().keys(): break adios_file.file.EndStep() if variable not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"Variable {variable} not found in '{filename}'") # Get variable and get global shape perm_var = adios_file.io.InquireVariable(variable) shape = perm_var.Shape() assert len(shape) == 1 # Get local selection local_cell_range = compute_local_range(comm, num_cells_global) perm_var.SetSelection([[local_cell_range[0]], [local_cell_range[1] - local_cell_range[0]]]) in_perm = np.empty( local_cell_range[1] - local_cell_range[0], dtype=adios_to_numpy_dtype[perm_var.Type()], ) adios_file.file.Get(perm_var, in_perm, adios2.Mode.Sync) adios_file.file.EndStep() return in_perm def read_adjacency_list( adios: adios2.ADIOS, comm: MPI.Intracomm, filename: Union[Path, str], dofmap: str, dofmap_offsets: str, num_cells_global: np.int64, engine: str, ) -> Union[dolfinx.cpp.graph.AdjacencyList_int64, dolfinx.cpp.graph.AdjacencyList_int32]: """ Read an adjacency-list from an ADIOS file with given communicator. The adjancency list is split in to a flat array (data) and its corresponding offset. Args: adios: The ADIOS instance comm: The MPI communicator used to read the data filename: Path to input file dofmap: Name of variable containing dofmap dofmap_offsets: Name of variable containing dofmap_offsets num_cells_global: Number of cells in the mesh (global) engine: Type of ADIOS engine to use for reading data Returns: The local part of dofmap from input dofs .. note:: No MPI communication is done during this call """ local_cell_range = compute_local_range(comm, num_cells_global) # Open ADIOS engine io_name = f"{dofmap=}_reader" with ADIOSFile( adios=adios, engine=engine, filename=filename, mode=adios2.Mode.Read, io_name=io_name, ) as adios_file: # First find step with dofmap offsets, to be able to read # in a full row of the dofmap for i in range(adios_file.file.Steps()): adios_file.file.BeginStep() if dofmap_offsets in adios_file.io.AvailableVariables().keys(): break adios_file.file.EndStep() if dofmap_offsets not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"Dof offsets not found at '{dofmap_offsets}' in {filename}") # Get global shape of dofmap-offset, and read in data with an overlap d_offsets = adios_file.io.InquireVariable(dofmap_offsets) shape = d_offsets.Shape() assert len(shape) == 1 # As the offsets are one longer than the number of cells, we need to read in with an overlap d_offsets.SetSelection( [[local_cell_range[0]], [local_cell_range[1] + 1 - local_cell_range[0]]] ) in_offsets = np.empty( local_cell_range[1] + 1 - local_cell_range[0], dtype=d_offsets.Type().strip("_t"), ) adios_file.file.Get(d_offsets, in_offsets, adios2.Mode.Sync) # Assuming dofmap is saved in stame step # Get the relevant part of the dofmap if dofmap not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"Dof offsets not found at {dofmap} in {filename}") cell_dofs = adios_file.io.InquireVariable(dofmap) cell_dofs.SetSelection([[in_offsets[0]], [in_offsets[-1] - in_offsets[0]]]) in_dofmap = np.empty(in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t")) adios_file.file.Get(cell_dofs, in_dofmap, adios2.Mode.Sync) in_offsets -= in_offsets[0] adios_file.file.EndStep() # Return local dofmap return dolfinx.graph.adjacencylist(in_dofmap, in_offsets.astype(np.int32)) def read_array( adios: adios2.ADIOS, filename: Union[Path, str], array_name: str, engine: str, comm: MPI.Intracomm, time: float = 0.0, time_name: str = "", legacy: bool = False, ) -> tuple[npt.NDArray[valid_function_types], int]: """ Read an array from file, return the global starting position of the local array Args: adios: The ADIOS instance filename: Path to file to read array from array_name: Name of array in file engine: Name of engine to use to read file comm: MPI communicator used for reading the data time_name: Name of time variable for modern checkpoints legacy: If True ignore time_name and read the first available step Returns: Local part of array and its global starting position """ with ADIOSFile( adios=adios, engine=engine, filename=filename, mode=adios2.Mode.Read, io_name="ArrayReader", ) as adios_file: # Get time-stamp from first available step if legacy: for i in range(adios_file.file.Steps()): adios_file.file.BeginStep() if array_name in adios_file.io.AvailableVariables().keys(): break adios_file.file.EndStep() if array_name not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"No array found at {array_name}") else: for i in range(adios_file.file.Steps()): adios_file.file.BeginStep() if time_name in adios_file.io.AvailableVariables().keys(): arr = adios_file.io.InquireVariable(time_name) time_shape = arr.Shape() arr.SetSelection([[0], [time_shape[0]]]) times = np.empty(time_shape[0], dtype=adios_to_numpy_dtype[arr.Type()]) adios_file.file.Get(arr, times, adios2.Mode.Sync) if times[0] == time: break if i == adios_file.file.Steps() - 1: raise KeyError( f"No data associated with {time_name}={time} found in {filename}" ) adios_file.file.EndStep() if time_name not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"No data associated with {time_name}={time} found in {filename}") if array_name not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"No array found at {time=} for {array_name}") arr = adios_file.io.InquireVariable(array_name) arr_shape = arr.Shape() assert len(arr_shape) >= 1 # TODO: Should we always pick the first element? arr_range = compute_local_range(comm, arr_shape[0]) if len(arr_shape) == 1: arr.SetSelection([[arr_range[0]], [arr_range[1] - arr_range[0]]]) vals = np.empty(arr_range[1] - arr_range[0], dtype=adios_to_numpy_dtype[arr.Type()]) else: arr.SetSelection([[arr_range[0], 0], [arr_range[1] - arr_range[0], arr_shape[1]]]) vals = np.empty( (arr_range[1] - arr_range[0], arr_shape[1]), dtype=adios_to_numpy_dtype[arr.Type()], ) assert arr_shape[1] == 1 adios_file.file.Get(arr, vals, adios2.Mode.Sync) adios_file.file.EndStep() return vals.reshape(-1), arr_range[0] adios4dolfinx-0.9.3/src/adios4dolfinx/checkpointing.py000066400000000000000000000754011477331607400230520ustar00rootroot00000000000000# Copyright (C) 2023 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT from __future__ import annotations import typing from pathlib import Path from mpi4py import MPI import adios2 import basix import dolfinx import numpy as np import numpy.typing as npt import ufl from packaging.version import Version from .adios2_helpers import ( ADIOSFile, adios_to_numpy_dtype, read_adjacency_list, read_array, read_cell_perms, resolve_adios_scope, ) from .comm_helpers import ( send_and_recv_cell_perm, send_dofmap_and_recv_values, send_dofs_and_recv_values, ) from .structures import FunctionData, MeshData from .utils import ( compute_dofmap_pos, compute_local_range, index_owner, unroll_dofmap, unroll_insert_position, ) from .writers import write_function as _internal_function_writer from .writers import write_mesh as _internal_mesh_writer adios2 = resolve_adios_scope(adios2) __all__ = [ "read_mesh_data", "read_mesh", "write_function", "read_function", "write_mesh", "read_meshtags", "write_meshtags", "read_attributes", "write_attributes", ] def check_file_exists(filename: typing.Union[Path, str]): """Check if file exists.""" if not Path(filename).exists(): raise FileNotFoundError(f"{filename} not found") def write_attributes( filename: typing.Union[Path, str], comm: MPI.Intracomm, name: str, attributes: dict[str, np.ndarray], engine: str = "BP4", ): """Write attributes to file using ADIOS2. Args: filename: Path to file to write to comm: MPI communicator used in storage name: Name of the attributes attributes: Dictionary of attributes to write to file engine: ADIOS2 engine to use """ adios = adios2.ADIOS(comm) with ADIOSFile( adios=adios, filename=filename, mode=adios2.Mode.Append, engine=engine, io_name="AttributesWriter", ) as adios_file: adios_file.file.BeginStep() for k, v in attributes.items(): adios_file.io.DefineAttribute(f"{name}_{k}", v) adios_file.file.PerformPuts() adios_file.file.EndStep() def read_attributes( filename: typing.Union[Path, str], comm: MPI.Intracomm, name: str, engine: str = "BP4", ) -> dict[str, np.ndarray]: """Read attributes from file using ADIOS2. Args: filename: Path to file to read from comm: MPI communicator used in storage name: Name of the attributes engine: ADIOS2 engine to use Returns: The attributes """ check_file_exists(filename) adios = adios2.ADIOS(comm) with ADIOSFile( adios=adios, filename=filename, mode=adios2.Mode.Read, engine=engine, io_name="AttributesReader", ) as adios_file: adios_file.file.BeginStep() attributes = {} for k in adios_file.io.AvailableAttributes().keys(): if k.startswith(f"{name}_"): a = adios_file.io.InquireAttribute(k) attributes[k[len(name) + 1 :]] = a.Data() adios_file.file.EndStep() return attributes def read_timestamps( filename: typing.Union[Path, str], comm: MPI.Intracomm, function_name: str, engine="BP4" ) -> npt.NDArray[np.float64]: """ Read time-stamps from a checkpoint file. Args: comm: MPI communicator filename: Path to file function_name: Name of the function to read time-stamps for engine: ADIOS2 engine Returns: The time-stamps """ check_file_exists(filename) adios = adios2.ADIOS(comm) with ADIOSFile( adios=adios, filename=filename, mode=adios2.Mode.Read, engine=engine, io_name="TimestepReader", ) as adios_file: time_name = f"{function_name}_time" time_stamps = [] for i in range(adios_file.file.Steps()): adios_file.file.BeginStep() if time_name in adios_file.io.AvailableVariables().keys(): arr = adios_file.io.InquireVariable(time_name) time_shape = arr.Shape() arr.SetSelection([[0], [time_shape[0]]]) times = np.empty( time_shape[0], dtype=adios_to_numpy_dtype[arr.Type()], ) adios_file.file.Get(arr, times, adios2.Mode.Sync) time_stamps.append(times[0]) adios_file.file.EndStep() return np.array(time_stamps) def write_meshtags( filename: typing.Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtags: dolfinx.mesh.MeshTags, engine: str = "BP4", meshtag_name: typing.Optional[str] = None, ): """ Write meshtags associated with input mesh to file. .. note:: For this checkpoint to work, the mesh must be written to file using :func:`write_mesh` before calling this function. Args: filename: Path to save meshtags (with file-extension) mesh: The mesh associated with the meshtags meshtags: The meshtags to write to file engine: Adios2 Engine meshtag_name: Name of the meshtag. If None, the meshtag name is used. """ tag_entities = meshtags.indices dim = meshtags.dim num_tag_entities_local = mesh.topology.index_map(dim).size_local local_tag_entities = tag_entities[tag_entities < num_tag_entities_local] local_values = meshtags.values[: len(local_tag_entities)] num_saved_tag_entities = len(local_tag_entities) local_start = mesh.comm.exscan(num_saved_tag_entities, op=MPI.SUM) local_start = local_start if mesh.comm.rank != 0 else 0 global_num_tag_entities = mesh.comm.allreduce(num_saved_tag_entities, op=MPI.SUM) dof_layout = mesh.geometry.cmap.create_dof_layout() num_dofs_per_entity = dof_layout.num_entity_closure_dofs(dim) entities_to_geometry = dolfinx.cpp.mesh.entities_to_geometry( mesh._cpp_object, dim, local_tag_entities, False ) indices = mesh.geometry.index_map().local_to_global(entities_to_geometry.reshape(-1)) name = meshtag_name or meshtags.name adios = adios2.ADIOS(mesh.comm) with ADIOSFile( adios=adios, filename=filename, mode=adios2.Mode.Append, engine=engine, io_name="MeshTagWriter", ) as adios_file: adios_file.file.BeginStep() # Write meshtag topology topology_var = adios_file.io.DefineVariable( name + "_topology", indices, shape=[global_num_tag_entities, num_dofs_per_entity], start=[local_start, 0], count=[num_saved_tag_entities, num_dofs_per_entity], ) adios_file.file.Put(topology_var, indices, adios2.Mode.Sync) # Write meshtag topology values_var = adios_file.io.DefineVariable( name + "_values", local_values, shape=[global_num_tag_entities], start=[local_start], count=[num_saved_tag_entities], ) adios_file.file.Put(values_var, local_values, adios2.Mode.Sync) # Write meshtag dim adios_file.io.DefineAttribute(name + "_dim", np.array([meshtags.dim], dtype=np.uint8)) adios_file.file.PerformPuts() adios_file.file.EndStep() def read_meshtags( filename: typing.Union[Path, str], mesh: dolfinx.mesh.Mesh, meshtag_name: str, engine: str = "BP4", ) -> dolfinx.mesh.MeshTags: """ Read meshtags from file and return a :class:`dolfinx.mesh.MeshTags` object. Args: filename: Path to meshtags file (with file-extension) mesh: The mesh associated with the meshtags meshtag_name: The name of the meshtag to read engine: Adios2 Engine Returns: The meshtags """ check_file_exists(filename) adios = adios2.ADIOS(mesh.comm) with ADIOSFile( adios=adios, filename=filename, mode=adios2.Mode.Read, engine=engine, io_name="MeshTagsReader", ) as adios_file: # Get mesh cell type dim_attr_name = f"{meshtag_name}_dim" step = 0 for i in range(adios_file.file.Steps()): adios_file.file.BeginStep() if dim_attr_name in adios_file.io.AvailableAttributes().keys(): step = i break adios_file.file.EndStep() if dim_attr_name not in adios_file.io.AvailableAttributes().keys(): raise KeyError(f"{dim_attr_name} not found in {filename}") m_dim = adios_file.io.InquireAttribute(dim_attr_name) dim = int(m_dim.Data()[0]) # Get mesh tags entites topology_name = f"{meshtag_name}_topology" for i in range(step, adios_file.file.Steps()): if i > step: adios_file.file.BeginStep() if topology_name in adios_file.io.AvailableVariables().keys(): break adios_file.file.EndStep() if topology_name not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"{topology_name} not found in {filename}") topology = adios_file.io.InquireVariable(topology_name) top_shape = topology.Shape() topology_range = compute_local_range(mesh.comm, top_shape[0]) topology.SetSelection( [ [topology_range[0], 0], [topology_range[1] - topology_range[0], top_shape[1]], ] ) mesh_entities = np.empty( (topology_range[1] - topology_range[0], top_shape[1]), dtype=np.int64 ) adios_file.file.Get(topology, mesh_entities, adios2.Mode.Deferred) # Get mesh tags values values_name = f"{meshtag_name}_values" if values_name not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"{values_name} not found") values = adios_file.io.InquireVariable(values_name) val_shape = values.Shape() assert val_shape[0] == top_shape[0] values.SetSelection([[topology_range[0]], [topology_range[1] - topology_range[0]]]) tag_values = np.empty((topology_range[1] - topology_range[0]), dtype=np.int32) adios_file.file.Get(values, tag_values, adios2.Mode.Deferred) adios_file.file.PerformGets() adios_file.file.EndStep() local_entities, local_values = dolfinx.io.distribute_entity_data( mesh, int(dim), mesh_entities.astype(np.int32), tag_values ) mesh.topology.create_connectivity(dim, 0) mesh.topology.create_connectivity(dim, mesh.topology.dim) adj = dolfinx.graph.adjacencylist(local_entities) local_values = np.array(local_values, dtype=np.int32) mt = dolfinx.mesh.meshtags_from_entities(mesh, int(dim), adj, local_values) mt.name = meshtag_name return mt def read_function( filename: typing.Union[Path, str], u: dolfinx.fem.Function, engine: str = "BP4", time: float = 0.0, legacy: bool = False, name: typing.Optional[str] = None, ): """ Read checkpoint from file and fill it into `u`. Args: filename: Path to checkpoint u: Function to fill engine: ADIOS engine type used for reading time: Time-stamp associated with checkpoint legacy: If checkpoint is from prior to time-dependent writing set to True name: If not provided, `u.name` is used to search through the input file for the function """ check_file_exists(filename) mesh = u.function_space.mesh comm = mesh.comm adios = adios2.ADIOS(comm) if name is None: name = u.name # Check that file contains the function to read if not legacy: with ADIOSFile( adios=adios, filename=filename, mode=adios2.Mode.Read, engine=engine, io_name="FunctionReader", ) as adios_file: variables = set( sorted( map( lambda x: x.split("_time")[0], filter(lambda x: x.endswith("_time"), adios_file.io.AvailableVariables()), ) ) ) if name not in variables: raise KeyError(f"{name} not found in {filename}. Did you mean one of {variables}?") # ----------------------Step 1--------------------------------- # Compute index of input cells and get cell permutation num_owned_cells = mesh.topology.index_map(mesh.topology.dim).size_local input_cells = mesh.topology.original_cell_index[:num_owned_cells] mesh.topology.create_entity_permutations() cell_perm = mesh.topology.get_cell_permutation_info()[:num_owned_cells] # Compute mesh->input communicator # 1.1 Compute mesh->input communicator num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global owners = index_owner(mesh.comm, input_cells, num_cells_global) # -------------------Step 2------------------------------------ # Send and receive global cell index and cell perm inc_cells, inc_perms = send_and_recv_cell_perm(input_cells, cell_perm, owners, mesh.comm) # -------------------Step 3----------------------------------- # Read dofmap from file and compute dof owners if legacy: dofmap_path = "Dofmap" xdofmap_path = "XDofmap" else: dofmap_path = f"{name}_dofmap" xdofmap_path = f"{name}_XDofmap" input_dofmap = read_adjacency_list( adios, comm, filename, dofmap_path, xdofmap_path, num_cells_global, engine ) # Compute owner of dofs in dofmap num_dofs_global = ( u.function_space.dofmap.index_map.size_global * u.function_space.dofmap.index_map_bs ) dof_owner = index_owner(comm, input_dofmap.array, num_dofs_global) # --------------------Step 4----------------------------------- # Read array from file and communicate them to input dofmap process if legacy: array_path = "Values" else: array_path = f"{name}_values" time_name = f"{name}_time" input_array, starting_pos = read_array( adios, filename, array_path, engine, comm, time, time_name, legacy=legacy ) recv_array = send_dofs_and_recv_values( input_dofmap.array, dof_owner, comm, input_array, starting_pos ) # -------------------Step 5-------------------------------------- # Invert permutation of input data based on input perm # Then apply current permutation to the local data element = u.function_space.element if element.needs_dof_transformations: bs = u.function_space.dofmap.bs # Read input cell permutations on dofmap process local_input_range = compute_local_range(comm, num_cells_global) input_local_cell_index = inc_cells - local_input_range[0] input_perms = read_cell_perms( adios, comm, filename, "CellPermutations", num_cells_global, engine ) # Start by sorting data array by cell permutation num_dofs_per_cell = input_dofmap.offsets[1:] - input_dofmap.offsets[:-1] assert np.allclose(num_dofs_per_cell, num_dofs_per_cell[0]) # Sort dofmap by input local cell index input_perms_sorted = input_perms[input_local_cell_index] unrolled_dofmap_position = unroll_insert_position( input_local_cell_index, num_dofs_per_cell[0] ) dofmap_sorted_by_input = recv_array[unrolled_dofmap_position] # First invert input data to reference element then transform to current mesh element.Tt_apply(dofmap_sorted_by_input, input_perms_sorted, bs) element.Tt_inv_apply(dofmap_sorted_by_input, inc_perms, bs) # Compute invert permutation inverted_perm = np.empty_like(unrolled_dofmap_position) inverted_perm[unrolled_dofmap_position] = np.arange( len(unrolled_dofmap_position), dtype=inverted_perm.dtype ) recv_array = dofmap_sorted_by_input[inverted_perm] # ------------------Step 6---------------------------------------- # For each dof owned by a process, find the local position in the dofmap. V = u.function_space local_cells, dof_pos = compute_dofmap_pos(V) input_cells = V.mesh.topology.original_cell_index[local_cells] num_cells_global = V.mesh.topology.index_map(V.mesh.topology.dim).size_global owners = index_owner(V.mesh.comm, input_cells, num_cells_global) unique_owners, owner_count = np.unique(owners, return_counts=True) # FIXME: In C++ use NBX to find neighbourhood sub_comm = V.mesh.comm.Create_dist_graph( [V.mesh.comm.rank], [len(unique_owners)], unique_owners, reorder=False ) source, dest, _ = sub_comm.Get_dist_neighbors() sub_comm.Free() owned_values = send_dofmap_and_recv_values( comm, np.asarray(source, dtype=np.int32), np.asarray(dest, dtype=np.int32), owners, owner_count.astype(np.int32), input_cells, dof_pos, num_cells_global, recv_array, input_dofmap.offsets, ) u.x.array[: len(owned_values)] = owned_values u.x.scatter_forward() def read_mesh_data( filename: typing.Union[Path, str], comm: MPI.Intracomm, engine: str = "BP4", ghost_mode: dolfinx.mesh.GhostMode = dolfinx.mesh.GhostMode.shared_facet, time: float = 0.0, legacy: bool = False, read_from_partition: bool = False, ) -> tuple[np.ndarray, np.ndarray, ufl.Mesh, typing.Callable]: """ Read an ADIOS2 mesh data for use with DOLFINx. Args: filename: Path to input file comm: The MPI communciator to distribute the mesh over engine: ADIOS engine to use for reading (BP4, BP5 or HDF5) ghost_mode: Ghost mode to use for mesh. If `read_from_partition` is set to `True` this option is ignored. time: Time stamp associated with mesh legacy: If checkpoint was made prior to time-dependent mesh-writer set to True read_from_partition: Read mesh with partition from file Returns: The mesh topology, geometry, UFL domain and partition function """ check_file_exists(filename) adios = adios2.ADIOS(comm) with ADIOSFile( adios=adios, filename=filename, mode=adios2.Mode.Read, engine=engine, io_name="MeshReader", ) as adios_file: # Get time independent mesh variables (mesh topology and cell type info) first adios_file.file.BeginStep() # Get mesh topology (distributed) if "Topology" not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"Mesh topology not found at Topology in {filename}") topology = adios_file.io.InquireVariable("Topology") shape = topology.Shape() local_range = compute_local_range(comm, shape[0]) topology.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) mesh_topology = np.empty((local_range[1] - local_range[0], shape[1]), dtype=np.int64) adios_file.file.Get(topology, mesh_topology, adios2.Mode.Deferred) # Check validity of partitioning information if read_from_partition: if "PartitionProcesses" not in adios_file.io.AvailableAttributes().keys(): raise KeyError(f"Partitioning information not found in {filename}") par_num_procs = adios_file.io.InquireAttribute("PartitionProcesses") num_procs = par_num_procs.Data()[0] if num_procs != comm.size: raise ValueError(f"Number of processes in file ({num_procs})!=({comm.size=})") # Get mesh cell type if "CellType" not in adios_file.io.AvailableAttributes().keys(): raise KeyError(f"Mesh cell type not found at CellType in {filename}") celltype = adios_file.io.InquireAttribute("CellType") cell_type = celltype.DataString()[0] # Get basix info if "LagrangeVariant" not in adios_file.io.AvailableAttributes().keys(): raise KeyError(f"Mesh LagrangeVariant not found in {filename}") lvar = adios_file.io.InquireAttribute("LagrangeVariant").Data()[0] if "Degree" not in adios_file.io.AvailableAttributes().keys(): raise KeyError(f"Mesh degree not found in {filename}") degree = adios_file.io.InquireAttribute("Degree").Data()[0] if not legacy: time_name = "MeshTime" for i in range(adios_file.file.Steps()): if i > 0: adios_file.file.BeginStep() if time_name in adios_file.io.AvailableVariables().keys(): arr = adios_file.io.InquireVariable(time_name) time_shape = arr.Shape() arr.SetSelection([[0], [time_shape[0]]]) times = np.empty(time_shape[0], dtype=adios_to_numpy_dtype[arr.Type()]) adios_file.file.Get(arr, times, adios2.Mode.Sync) if times[0] == time: break if i == adios_file.file.Steps() - 1: raise KeyError( f"No data associated with {time_name}={time} found in {filename}" ) adios_file.file.EndStep() if time_name not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"No data associated with {time_name}={time} found in {filename}") # Get mesh geometry if "Points" not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"Mesh coordinates not found at Points in {filename}") geometry = adios_file.io.InquireVariable("Points") x_shape = geometry.Shape() geometry_range = compute_local_range(comm, x_shape[0]) geometry.SetSelection( [ [geometry_range[0], 0], [geometry_range[1] - geometry_range[0], x_shape[1]], ] ) mesh_geometry = np.empty( (geometry_range[1] - geometry_range[0], x_shape[1]), dtype=adios_to_numpy_dtype[geometry.Type()], ) adios_file.file.Get(geometry, mesh_geometry, adios2.Mode.Deferred) adios_file.file.PerformGets() adios_file.file.EndStep() # Create DOLFINx mesh element = basix.ufl.element( basix.ElementFamily.P, cell_type, degree, basix.LagrangeVariant(int(lvar)), shape=(mesh_geometry.shape[1],), dtype=mesh_geometry.dtype, ) domain = ufl.Mesh(element) if read_from_partition: partition_graph = read_adjacency_list( adios, comm, filename, "PartitioningData", "PartitioningOffset", shape[0], engine ) def partitioner(comm: MPI.Intracomm, n, m, topo): assert len(topo[0]) % (len(partition_graph.offsets) - 1) == 0 if Version(dolfinx.__version__) > Version("0.9.0"): return partition_graph._cpp_object else: return partition_graph else: partitioner = dolfinx.cpp.mesh.create_cell_partitioner(ghost_mode) return mesh_topology, mesh_geometry, domain, partitioner def read_mesh( filename: typing.Union[Path, str], comm: MPI.Intracomm, engine: str = "BP4", ghost_mode: dolfinx.mesh.GhostMode = dolfinx.mesh.GhostMode.shared_facet, time: float = 0.0, legacy: bool = False, read_from_partition: bool = False, ) -> dolfinx.mesh.Mesh: """ Read an ADIOS2 mesh into DOLFINx. Args: filename: Path to input file comm: The MPI communciator to distribute the mesh over engine: ADIOS engine to use for reading (BP4, BP5 or HDF5) ghost_mode: Ghost mode to use for mesh. If `read_from_partition` is set to `True` this option is ignored. time: Time stamp associated with mesh legacy: If checkpoint was made prior to time-dependent mesh-writer set to True read_from_partition: Read mesh with partition from file Returns: The distributed mesh """ check_file_exists(filename) return dolfinx.mesh.create_mesh( comm, *read_mesh_data( filename, comm, engine=engine, ghost_mode=ghost_mode, time=time, legacy=legacy, read_from_partition=read_from_partition, ), ) def write_mesh( filename: Path, mesh: dolfinx.mesh.Mesh, engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Write, time: float = 0.0, store_partition_info: bool = False, ): """ Write a mesh to specified ADIOS2 format, see: https://adios2.readthedocs.io/en/stable/engines/engines.html for possible formats. Args: filename: Path to save mesh (without file-extension) mesh: The mesh to write to file engine: Adios2 Engine store_partition_info: Store mesh partitioning (including ghosting) to file """ num_xdofs_local = mesh.geometry.index_map().size_local num_xdofs_global = mesh.geometry.index_map().size_global geometry_range = mesh.geometry.index_map().local_range gdim = mesh.geometry.dim # Convert local connectivity to globa l connectivity g_imap = mesh.geometry.index_map() g_dmap = mesh.geometry.dofmap num_cells_local = mesh.topology.index_map(mesh.topology.dim).size_local num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global cell_range = mesh.topology.index_map(mesh.topology.dim).local_range cmap = mesh.geometry.cmap geom_layout = cmap.create_dof_layout() num_dofs_per_cell = geom_layout.num_entity_closure_dofs(mesh.topology.dim) dofs_out = np.zeros((num_cells_local, num_dofs_per_cell), dtype=np.int64) assert g_dmap.shape[1] == num_dofs_per_cell dofs_out[:, :] = np.asarray( g_imap.local_to_global(g_dmap[:num_cells_local, :].reshape(-1)) ).reshape(dofs_out.shape) if store_partition_info: partition_processes = mesh.comm.size # Get partitioning if Version(dolfinx.__version__) > Version("0.9.0"): consensus_tag = 1202 cell_map = mesh.topology.index_map(mesh.topology.dim).index_to_dest_ranks(consensus_tag) else: cell_map = mesh.topology.index_map(mesh.topology.dim).index_to_dest_ranks() num_cells_local = mesh.topology.index_map(mesh.topology.dim).size_local cell_offsets = cell_map.offsets[: num_cells_local + 1] if cell_offsets[-1] == 0: cell_array = np.empty(0, dtype=np.int32) else: cell_array = cell_map.array[: cell_offsets[-1]] # Compute adjacency with current process as first entry ownership_array = np.full(num_cells_local + cell_offsets[-1], -1, dtype=np.int32) ownership_offset = cell_offsets + np.arange(len(cell_offsets), dtype=np.int32) ownership_array[ownership_offset[:-1]] = mesh.comm.rank insert_position = np.flatnonzero(ownership_array == -1) ownership_array[insert_position] = cell_array partition_map = dolfinx.common.IndexMap(mesh.comm, ownership_array.size) ownership_offset += partition_map.local_range[0] partition_range = partition_map.local_range partition_global = partition_map.size_global else: partition_processes = None ownership_array = None ownership_offset = None partition_range = None partition_global = None mesh_data = MeshData( local_geometry=mesh.geometry.x[:num_xdofs_local, :gdim].copy(), local_geometry_pos=geometry_range, num_nodes_global=num_xdofs_global, local_topology=dofs_out, local_topology_pos=cell_range, num_cells_global=num_cells_global, cell_type=mesh.topology.cell_name(), degree=mesh.geometry.cmap.degree, lagrange_variant=mesh.geometry.cmap.variant, store_partition=store_partition_info, partition_processes=partition_processes, ownership_array=ownership_array, ownership_offset=ownership_offset, partition_range=partition_range, partition_global=partition_global, ) _internal_mesh_writer( filename, mesh.comm, mesh_data, engine, mode=mode, time=time, io_name="MeshWriter", ) def write_function( filename: typing.Union[Path, str], u: dolfinx.fem.Function, engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Append, time: float = 0.0, name: typing.Optional[str] = None, ): """ Write function checkpoint to file. Args: u: Function to write to file filename: Path to write to engine: ADIOS2 engine mode: Write or append. time: Time-stamp for simulation name: Name of function to write. If None, the name of the function is used. """ dofmap = u.function_space.dofmap values = u.x.array mesh = u.function_space.mesh comm = mesh.comm mesh.topology.create_entity_permutations() cell_perm = mesh.topology.get_cell_permutation_info() num_cells_local = mesh.topology.index_map(mesh.topology.dim).size_local local_cell_range = mesh.topology.index_map(mesh.topology.dim).local_range num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global # Convert local dofmap into global_dofmap dmap = dofmap.list num_dofs_per_cell = dmap.shape[1] dofmap_bs = dofmap.bs num_dofs_local_dmap = num_cells_local * num_dofs_per_cell * dofmap_bs index_map_bs = dofmap.index_map_bs # Unroll dofmap for block size unrolled_dofmap = unroll_dofmap(dofmap.list[:num_cells_local, :], dofmap_bs) dmap_loc = (unrolled_dofmap // index_map_bs).reshape(-1) dmap_rem = (unrolled_dofmap % index_map_bs).reshape(-1) # Convert imap index to global index imap_global = dofmap.index_map.local_to_global(dmap_loc) dofmap_global = imap_global * index_map_bs + dmap_rem dofmap_imap = dolfinx.common.IndexMap(mesh.comm, num_dofs_local_dmap) # Compute dofmap offsets local_dofmap_offsets = np.arange(num_cells_local + 1, dtype=np.int64) local_dofmap_offsets[:] *= num_dofs_per_cell * dofmap_bs local_dofmap_offsets += dofmap_imap.local_range[0] num_dofs_global = dofmap.index_map.size_global * dofmap.index_map_bs local_dof_range = np.asarray(dofmap.index_map.local_range) * dofmap.index_map_bs num_dofs_local = local_dof_range[1] - local_dof_range[0] # Create internal data structure for function data to write to file function_data = FunctionData( cell_permutations=cell_perm[:num_cells_local].copy(), local_cell_range=local_cell_range, num_cells_global=num_cells_global, dofmap_array=dofmap_global, dofmap_offsets=local_dofmap_offsets, dofmap_range=dofmap_imap.local_range, global_dofs_in_dofmap=dofmap_imap.size_global, values=values[:num_dofs_local].copy(), dof_range=local_dof_range, num_dofs_global=num_dofs_global, name=name or u.name, ) # Write to file fname = Path(filename) _internal_function_writer(fname, comm, function_data, engine, mode, time, "FunctionWriter") adios4dolfinx-0.9.3/src/adios4dolfinx/comm_helpers.py000066400000000000000000000230201477331607400226700ustar00rootroot00000000000000from __future__ import annotations from mpi4py import MPI import numpy as np import numpy.typing as npt from .utils import compute_insert_position, compute_local_range, valid_function_types __all__ = [ "send_dofmap_and_recv_values", "send_and_recv_cell_perm", "send_dofs_and_recv_values", "numpy_to_mpi", ] """ Helpers for sending and receiving values for checkpointing """ numpy_to_mpi = { np.float64: MPI.DOUBLE, np.float32: MPI.FLOAT, np.complex64: MPI.COMPLEX, np.complex128: MPI.DOUBLE_COMPLEX, } def send_dofmap_and_recv_values( comm: MPI.Intracomm, source_ranks: npt.NDArray[np.int32], dest_ranks: npt.NDArray[np.int32], output_owners: npt.NDArray[np.int32], dest_size: npt.NDArray[np.int32], input_cells: npt.NDArray[np.int64], dofmap_pos: npt.NDArray[np.int32], num_cells_global: np.int64, values: npt.NDArray[valid_function_types], dofmap_offsets: npt.NDArray[np.int32], ) -> npt.NDArray[valid_function_types]: """ Given a set of positions in input dofmap, give the global input index of this dofmap entry in input file. Args: comm: The MPI communicator to create the Neighbourhood-communicator from source_ranks: Ranks that will send dofmap indices to current process dest_ranks: Ranks that will receive dofmap indices from current process output_owners: The owners of each dofmap entry on this process. The unique set of these entries should be the same as the dest_ranks. dest_size: The number of entries sent to each owner input_cells: A cell associated with the degree of freedom sent (global index). dofmap_pos: The local position in the dofmap. I.e. `dof = dofmap.links(input_cells)[dofmap_pos]` num_cells_global: Number of global cells values: Values currently held by this process. These are ordered (num_cells_local, num_dofs_per_cell), flattened row-major. dofmap_offsets: Local dofmap offsets to access the correct `values`. Returns: Values corresponding to the dofs owned by this process. """ insert_position = compute_insert_position(output_owners, dest_ranks, dest_size) # Pack the cells and dofmap position for all dofs this process is distributing out_cells = np.zeros(len(output_owners), dtype=np.int64) out_cells[insert_position] = input_cells out_pos = np.zeros(len(output_owners), dtype=np.int32) out_pos[insert_position] = dofmap_pos # Compute map from the data index sent to each process and the local # number on the current process proc_to_dof = np.zeros_like(input_cells, dtype=np.int32) proc_to_dof[insert_position] = np.arange(len(input_cells), dtype=np.int32) del insert_position # Send sizes to create data structures for receiving from NeighAlltoAllv recv_size = np.zeros(len(source_ranks), dtype=np.int32) mesh_to_data_comm = comm.Create_dist_graph_adjacent( source_ranks.tolist(), dest_ranks.tolist(), reorder=False ) mesh_to_data_comm.Neighbor_alltoall(dest_size, recv_size) # Prepare data-structures for receiving total_incoming = sum(recv_size) inc_cells = np.zeros(total_incoming, dtype=np.int64) inc_pos = np.zeros(total_incoming, dtype=np.intc) # Compute incoming offset inc_offsets = np.zeros(len(recv_size) + 1, dtype=np.intc) inc_offsets[1:] = np.cumsum(recv_size) # Send data s_msg = [out_cells, dest_size, MPI.INT64_T] r_msg = [inc_cells, recv_size, MPI.INT64_T] mesh_to_data_comm.Neighbor_alltoallv(s_msg, r_msg) s_msg = [out_pos, dest_size, MPI.INT32_T] r_msg = [inc_pos, recv_size, MPI.INT32_T] mesh_to_data_comm.Neighbor_alltoallv(s_msg, r_msg) mesh_to_data_comm.Free() local_input_range = compute_local_range(comm, num_cells_global) values_to_distribute = np.zeros_like(inc_pos, dtype=values.dtype) # Map values based on input cells and dofmap local_cells = inc_cells - local_input_range[0] values_to_distribute = values[dofmap_offsets[local_cells] + inc_pos] # Send input dofs back to owning process data_to_mesh_comm = comm.Create_dist_graph_adjacent( dest_ranks.tolist(), source_ranks.tolist(), reorder=False ) incoming_global_dofs = np.zeros(sum(dest_size), dtype=values.dtype) s_msg = [values_to_distribute, recv_size, numpy_to_mpi[values.dtype.type]] r_msg = [incoming_global_dofs, dest_size, numpy_to_mpi[values.dtype.type]] data_to_mesh_comm.Neighbor_alltoallv(s_msg, r_msg) # Sort incoming global dofs as they were inputted assert len(incoming_global_dofs) == len(input_cells) sorted_global_dofs = np.zeros_like(incoming_global_dofs, dtype=values.dtype) sorted_global_dofs[proc_to_dof] = incoming_global_dofs data_to_mesh_comm.Free() return sorted_global_dofs def send_and_recv_cell_perm( cells: npt.NDArray[np.int64], perms: npt.NDArray[np.uint32], cell_owners: npt.NDArray[np.int32], comm: MPI.Intracomm, ) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.uint32]]: """ Send global cell index and permutation to corresponding entry in `dest_ranks`. Args: cells: The global input index of the cell perms: The corresponding cell permutation of the cell cell_owners: The rank to send the i-th entry of cells and perms to comm: Rank of comm to generate neighbourhood communicator from """ dest_ranks, dest_size = np.unique(cell_owners, return_counts=True) dest_size = dest_size.astype(np.int32) mesh_to_data = comm.Create_dist_graph( [comm.rank], [len(dest_ranks)], dest_ranks.tolist(), reorder=False ) source, dest, _ = mesh_to_data.Get_dist_neighbors() assert np.allclose(dest, dest_ranks) insert_position = compute_insert_position(cell_owners, dest_ranks, dest_size) # Pack cells and permutations for sending out_cells = np.zeros_like(cells, dtype=np.int64) out_perm = np.zeros_like(perms, dtype=np.uint32) out_cells[insert_position] = cells out_perm[insert_position] = perms del insert_position # Send sizes to create data structures for receiving from NeighAlltoAllv recv_size = np.zeros_like(source, dtype=np.int32) mesh_to_data.Neighbor_alltoall(dest_size, recv_size) # Prepare data-structures for receiving total_incoming = sum(recv_size) inc_cells = np.zeros(total_incoming, dtype=np.int64) inc_perm = np.zeros(total_incoming, dtype=np.uint32) # Compute incoming offset inc_offsets = np.zeros(len(recv_size) + 1, dtype=np.intc) inc_offsets[1:] = np.cumsum(recv_size) # Send data s_msg = [out_cells, dest_size, MPI.INT64_T] r_msg = [inc_cells, recv_size, MPI.INT64_T] mesh_to_data.Neighbor_alltoallv(s_msg, r_msg) s_msg = [out_perm, dest_size, MPI.UINT32_T] r_msg = [inc_perm, recv_size, MPI.UINT32_T] mesh_to_data.Neighbor_alltoallv(s_msg, r_msg) mesh_to_data.Free() return inc_cells, inc_perm def send_dofs_and_recv_values( input_dofmap: npt.NDArray[np.int64], dofmap_owners: npt.NDArray[np.int32], comm: MPI.Intracomm, input_array: npt.NDArray[valid_function_types], array_start: int, ): """ Send a set of dofs (global index) to the process holding the DOF values to retrieve them. Args: input_dofmap: List of dofs (global index) that this process wants values for dofmap_owners: The process currently holding the values this process want to get. comm: MPI communicator input_array: Values for dofs array_start: The global starting index of `input_array`. """ dest_ranks, dest_size = np.unique(dofmap_owners, return_counts=True) dest_size = dest_size.astype(np.int32) dofmap_to_values = comm.Create_dist_graph( [comm.rank], [len(dest_ranks)], dest_ranks.tolist(), reorder=False ) source, dest, _ = dofmap_to_values.Get_dist_neighbors() assert np.allclose(dest_ranks, dest) # Compute amount of data to send to each process insert_position = compute_insert_position(dofmap_owners, dest_ranks, dest_size) # Pack dofs for sending out_dofs = np.zeros(len(dofmap_owners), dtype=np.int64) out_dofs[insert_position] = input_dofmap # Compute map from the data index sent to each process and the local number on # the current process proc_to_local = np.zeros_like(input_dofmap, dtype=np.int32) proc_to_local[insert_position] = np.arange(len(input_dofmap), dtype=np.int32) del insert_position # Send sizes to create data structures for receiving from NeighAlltoAllv recv_size = np.zeros_like(source, dtype=np.int32) dofmap_to_values.Neighbor_alltoall(dest_size, recv_size) # Send input dofs to processes holding input array inc_dofs = np.zeros(sum(recv_size), dtype=np.int64) s_msg = [out_dofs, dest_size, MPI.INT64_T] r_msg = [inc_dofs, recv_size, MPI.INT64_T] dofmap_to_values.Neighbor_alltoallv(s_msg, r_msg) dofmap_to_values.Free() # Send back appropriate input values sending_values = input_array[inc_dofs - array_start] values_to_dofmap = comm.Create_dist_graph_adjacent(dest, source, reorder=False) inc_values = np.zeros_like(out_dofs, dtype=input_array.dtype) s_msg_rev = [sending_values, recv_size, numpy_to_mpi[input_array.dtype.type]] r_msg_rev = [inc_values, dest_size, numpy_to_mpi[input_array.dtype.type]] values_to_dofmap.Neighbor_alltoallv(s_msg_rev, r_msg_rev) values_to_dofmap.Free() # Sort inputs according to local dof number (input process) values = np.empty_like(inc_values, dtype=input_array.dtype) values[proc_to_local] = inc_values return values adios4dolfinx-0.9.3/src/adios4dolfinx/legacy_readers.py000066400000000000000000000353311477331607400231740ustar00rootroot00000000000000# Copyright (C) 2023 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT from __future__ import annotations import pathlib import typing from mpi4py import MPI import adios2 import basix import dolfinx import numpy as np import numpy.typing as npt import ufl from .adios2_helpers import ( ADIOSFile, adios_to_numpy_dtype, read_array, resolve_adios_scope, ) from .comm_helpers import send_dofs_and_recv_values from .utils import ( compute_dofmap_pos, compute_insert_position, compute_local_range, index_owner, ) adios2 = resolve_adios_scope(adios2) __all__ = [ "read_mesh_from_legacy_h5", "read_function_from_legacy_h5", ] def read_dofmap_legacy( comm: MPI.Intracomm, filename: pathlib.Path, dofmap: str, dofmap_offsets: str, num_cells_global: np.int64, engine: str, cells: npt.NDArray[np.int64], dof_pos: npt.NDArray[np.int32], bs: int, ) -> npt.NDArray[np.int64]: """ Read dofmap with given communicator, split in continuous chunks based on number of cells in the mesh (global). Args: comm: MPI communicator filename: Path to input file dofmap: Variable name for dofmap num_cells_global: Number of cells in the global mesh engine: ADIOS2 engine type cells: Cells (global index) that contain a degree of freedom dof_pos: Each entry `dof_pos[i]` corresponds to the local position in the `input_dofmap.links(cells[i])[dof_pos[i]]` Returns: The global dof index in the input data for each dof described by the (cells[i], dof_pos[i]) tuples. .. note:: No MPI communication is done during this call """ local_cell_range = compute_local_range(comm, num_cells_global) # Open ADIOS engine adios = adios2.ADIOS(comm) with ADIOSFile( adios=adios, filename=filename, mode=adios2.Mode.Read, engine=engine, io_name="DofmapReader", ) as adios_file: for i in range(adios_file.file.Steps()): adios_file.file.BeginStep() if dofmap_offsets in adios_file.io.AvailableVariables().keys(): break adios_file.file.EndStep() d_offsets = adios_file.io.InquireVariable(dofmap_offsets) shape = d_offsets.Shape() # As the offsets are one longer than the number of cells, we need to read in with an overlap if len(shape) == 1: d_offsets.SetSelection( [[local_cell_range[0]], [local_cell_range[1] + 1 - local_cell_range[0]]] ) in_offsets = np.empty( local_cell_range[1] + 1 - local_cell_range[0], dtype=d_offsets.Type().strip("_t"), ) else: d_offsets.SetSelection( [ [local_cell_range[0], 0], [local_cell_range[1] + 1 - local_cell_range[0], shape[1]], ] ) in_offsets = np.empty( (local_cell_range[1] + 1 - local_cell_range[0], shape[1]), dtype=d_offsets.Type().strip("_t"), ) in_offsets = in_offsets.squeeze() adios_file.file.Get(d_offsets, in_offsets, adios2.Mode.Sync) # Get the relevant part of the dofmap if dofmap not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"Dof offsets not found at {dofmap}") cell_dofs = adios_file.io.InquireVariable(dofmap) if len(shape) == 1: cell_dofs.SetSelection([[in_offsets[0]], [in_offsets[-1] - in_offsets[0]]]) in_dofmap = np.empty(in_offsets[-1] - in_offsets[0], dtype=cell_dofs.Type().strip("_t")) else: cell_dofs.SetSelection([[in_offsets[0], 0], [in_offsets[-1] - in_offsets[0], shape[1]]]) in_dofmap = np.empty( (in_offsets[-1] - in_offsets[0], shape[1]), dtype=cell_dofs.Type().strip("_t"), ) assert shape[1] == 1 adios_file.file.Get(cell_dofs, in_dofmap, adios2.Mode.Sync) in_dofmap = in_dofmap.reshape(-1).astype(np.int64) # Map xxxyyyzzz to xyzxyz mapped_dofmap = np.empty_like(in_dofmap) for i in range(len(in_offsets) - 1): pos_begin, pos_end = ( in_offsets[i] - in_offsets[0], in_offsets[i + 1] - in_offsets[0], ) dofs_i = in_dofmap[pos_begin:pos_end] assert (pos_end - pos_begin) % bs == 0 num_dofs_local = int((pos_end - pos_begin) // bs) for k in range(bs): for j in range(num_dofs_local): mapped_dofmap[int(pos_begin + j * bs + k)] = dofs_i[int(num_dofs_local * k + j)] # Extract dofmap data global_dofs = np.zeros_like(cells, dtype=np.int64) input_cell_positions = cells - local_cell_range[0] read_pos = (in_offsets[input_cell_positions] + dof_pos - in_offsets[0]).astype(np.int32) global_dofs = mapped_dofmap[read_pos] del input_cell_positions, read_pos adios_file.file.EndStep() return global_dofs def send_cells_and_receive_dofmap_index( filename: pathlib.Path, comm: MPI.Intracomm, source_ranks: npt.NDArray[np.int32], dest_ranks: npt.NDArray[np.int32], dest_size: npt.NDArray[np.int32], output_owners: npt.NDArray[np.int32], input_cells: npt.NDArray[np.int64], dofmap_pos: npt.NDArray[np.int32], num_cells_global: np.int64, dofmap_path: str, xdofmap_path: str, engine: str, bs: int, ) -> npt.NDArray[np.int64]: """ Given a set of positions in input dofmap, give the global input index of this dofmap entry in input file. """ recv_size = np.zeros(len(source_ranks), dtype=np.int32) mesh_to_data_comm = comm.Create_dist_graph_adjacent( source_ranks.tolist(), dest_ranks.tolist(), reorder=False ) # Send sizes to create data structures for receiving from NeighAlltoAllv mesh_to_data_comm.Neighbor_alltoall(dest_size, recv_size) # Sort output for sending and fill send data out_cells = np.zeros(len(output_owners), dtype=np.int64) out_pos = np.zeros(len(output_owners), dtype=np.int32) proc_to_dof = np.zeros_like(input_cells, dtype=np.int32) insertion_array = compute_insert_position(output_owners, dest_ranks, dest_size) out_cells[insertion_array] = input_cells out_pos[insertion_array] = dofmap_pos proc_to_dof[insertion_array] = np.arange(len(input_cells), dtype=np.int32) del insertion_array # Prepare data-structures for receiving total_incoming = sum(recv_size) inc_cells = np.zeros(total_incoming, dtype=np.int64) inc_pos = np.zeros(total_incoming, dtype=np.intc) # Send data s_msg = [out_cells, dest_size, MPI.INT64_T] r_msg = [inc_cells, recv_size, MPI.INT64_T] mesh_to_data_comm.Neighbor_alltoallv(s_msg, r_msg) s_msg = [out_pos, dest_size, MPI.INT32_T] r_msg = [inc_pos, recv_size, MPI.INT32_T] mesh_to_data_comm.Neighbor_alltoallv(s_msg, r_msg) mesh_to_data_comm.Free() # Read dofmap from file input_dofs = read_dofmap_legacy( comm, filename, dofmap_path, xdofmap_path, num_cells_global, engine, inc_cells, inc_pos, bs, ) # Send input dofs back to owning process data_to_mesh_comm = comm.Create_dist_graph_adjacent( dest_ranks.tolist(), source_ranks.tolist(), reorder=False ) incoming_global_dofs = np.zeros(sum(dest_size), dtype=np.int64) s_msg = [input_dofs, recv_size, MPI.INT64_T] r_msg = [incoming_global_dofs, dest_size, MPI.INT64_T] data_to_mesh_comm.Neighbor_alltoallv(s_msg, r_msg) # Sort incoming global dofs as they were inputted sorted_global_dofs = np.zeros_like(incoming_global_dofs, dtype=np.int64) assert len(incoming_global_dofs) == len(input_cells) sorted_global_dofs[proc_to_dof] = incoming_global_dofs data_to_mesh_comm.Free() return sorted_global_dofs def read_mesh_geometry(io: adios2.ADIOS, infile: adios2.Engine, group: str): for geometry_key in [f"{group}/geometry", f"{group}/coordinates"]: if geometry_key in io.AvailableVariables().keys(): break else: raise KeyError(f"Mesh coordintes not found at '{group}/coordinates'") geometry = io.InquireVariable(geometry_key) shape = geometry.Shape() local_range = compute_local_range(MPI.COMM_WORLD, shape[0]) geometry.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) mesh_geometry = np.empty( (local_range[1] - local_range[0], shape[1]), dtype=adios_to_numpy_dtype[geometry.Type()], ) infile.Get(geometry, mesh_geometry, adios2.Mode.Sync) return mesh_geometry def read_mesh_from_legacy_h5( filename: pathlib.Path, comm: MPI.Intracomm, group: str, cell_type: str = "tetrahedron", ) -> dolfinx.mesh.Mesh: """ Read mesh from `h5`-file generated by legacy DOLFIN `HDF5File.write` or `XDMF.write_checkpoint`. Args: comm: MPI communicator to distribute mesh over filename: Path to `h5` or `xdmf` file group: Name of mesh in `h5`-file cell_type: What type of cell type, by default tetrahedron. """ # Make sure we use the HDF5File and check that the file is present filename = pathlib.Path(filename).with_suffix(".h5") if not filename.is_file(): raise FileNotFoundError(f"File {filename} does not exist") # Create ADIOS2 reader adios = adios2.ADIOS(comm) with ADIOSFile( adios=adios, filename=filename, mode=adios2.Mode.Read, io_name="Mesh reader", engine="HDF5", ) as adios_file: # Get mesh topology (distributed) if f"{group}/topology" not in adios_file.io.AvailableVariables().keys(): raise KeyError(f"Mesh topology not found at '{group}/topology'") topology = adios_file.io.InquireVariable(f"{group}/topology") shape = topology.Shape() local_range = compute_local_range(MPI.COMM_WORLD, shape[0]) topology.SetSelection([[local_range[0], 0], [local_range[1] - local_range[0], shape[1]]]) mesh_topology = np.empty( (local_range[1] - local_range[0], shape[1]), dtype=topology.Type().strip("_t"), ) adios_file.file.Get(topology, mesh_topology, adios2.Mode.Sync) # Get mesh cell type if f"{group}/topology/celltype" in adios_file.io.AvailableAttributes().keys(): celltype = adios_file.io.InquireAttribute(f"{group}/topology/celltype") cell_type = celltype.DataString()[0] # Get mesh geometry mesh_geometry = read_mesh_geometry(io=adios_file.io, infile=adios_file.file, group=group) # Create DOLFINx mesh element = basix.ufl.element( basix.ElementFamily.P, cell_type, 1, basix.LagrangeVariant.equispaced, shape=(mesh_geometry.shape[1],), ) domain = ufl.Mesh(element) return dolfinx.mesh.create_mesh(MPI.COMM_WORLD, mesh_topology, mesh_geometry, domain) def read_function_from_legacy_h5( filename: pathlib.Path, comm: MPI.Intracomm, u: dolfinx.fem.Function, group: str = "mesh", step: typing.Optional[int] = None, ): """ Read function from a `h5`-file generated by legacy DOLFIN `HDF5File.write` or `XDMF.write_checkpoint`. Args: comm : MPI communicator to distribute mesh over filename : Path to `h5` or `xdmf` file u : The function used to stored the read values group : Group within the `h5` file where the function is stored, by default "mesh" step : The time step used when saving the checkpoint. If not provided it will assume that the function is saved as a regular function (i.e with `HDF5File.write`) """ # Make sure we use the HDF5File and check that the file is present filename = pathlib.Path(filename).with_suffix(".h5") if not filename.is_file(): raise FileNotFoundError(f"File {filename} does not exist") V = u.function_space mesh = u.function_space.mesh if u.function_space.element.needs_dof_transformations: raise RuntimeError( "Function-spaces requiring dof permutations are not compatible with legacy data" ) # ----------------------Step 1--------------------------------- # Compute index of input cells, and position in input dofmap local_cells, dof_pos = compute_dofmap_pos(u.function_space) input_cells = mesh.topology.original_cell_index[local_cells] # Compute mesh->input communicator # 1.1 Compute mesh->input communicator num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global owners = index_owner(mesh.comm, input_cells, num_cells_global) unique_owners, owner_count = np.unique(owners, return_counts=True) # FIXME: In C++ use NBX to find neighbourhood _tmp_comm = mesh.comm.Create_dist_graph( [mesh.comm.rank], [len(unique_owners)], unique_owners, reorder=False ) source, dest, _ = _tmp_comm.Get_dist_neighbors() _tmp_comm.Free() # Strip out any / group = group.strip("/") if step is not None: group = f"{group}/{group}_{step}" vector_group = "vector" else: vector_group = "vector_0" # ----------------------Step 2-------------------------------- # Get global dofmap indices from input process bs = V.dofmap.bs num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global dofmap_indices = send_cells_and_receive_dofmap_index( filename, comm, np.asarray(source, dtype=np.int32), np.asarray(dest, dtype=np.int32), owner_count.astype(np.int32), owners, input_cells, dof_pos, num_cells_global, f"/{group}/cell_dofs", f"/{group}/x_cell_dofs", "HDF5", bs, ) # ----------------------Step 3--------------------------------- # Compute owner of global dof on distributed mesh num_dof_global = V.dofmap.index_map_bs * V.dofmap.index_map.size_global dof_owner = index_owner(comm=mesh.comm, indices=dofmap_indices, N=num_dof_global) # Create MPI neigh comm to owner. # NOTE: USE NBX in C++ # Read input data adios = adios2.ADIOS(comm) local_array, starting_pos = read_array( adios, filename, f"/{group}/{vector_group}", "HDF5", comm, legacy=True ) # Send global dof indices to correct input process, and receive value of given dof local_values = send_dofs_and_recv_values( dofmap_indices, dof_owner, comm, local_array, starting_pos ) # ----------------------Step 4--------------------------------- # Populate local part of array and scatter forward u.x.array[: len(local_values)] = local_values u.x.scatter_forward() adios4dolfinx-0.9.3/src/adios4dolfinx/original_checkpoint.py000066400000000000000000000377241477331607400242460ustar00rootroot00000000000000# Copyright (C) 2024 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT from __future__ import annotations import typing from pathlib import Path from mpi4py import MPI import adios2 import dolfinx import numpy as np from .adios2_helpers import resolve_adios_scope from .comm_helpers import numpy_to_mpi from .structures import FunctionData, MeshData from .utils import ( compute_insert_position, compute_local_range, index_owner, unroll_dofmap, unroll_insert_position, ) from .writers import write_function, write_mesh adios2 = resolve_adios_scope(adios2) __all__ = ["write_function_on_input_mesh", "write_mesh_input_order"] def create_original_mesh_data(mesh: dolfinx.mesh.Mesh) -> MeshData: """ Store data locally on output process """ # 1. Send cell indices owned by current process to the process which owned its input # Get the input cell index for cells owned by this process num_owned_cells = mesh.topology.index_map(mesh.topology.dim).size_local original_cell_index = mesh.topology.original_cell_index[:num_owned_cells] # Compute owner of cells on this process based on the original cell index num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global output_cell_owner = index_owner(mesh.comm, original_cell_index, num_cells_global) local_cell_range = compute_local_range(mesh.comm, num_cells_global) # Compute outgoing edges from current process to outputting process # Computes the number of cells sent to each process at the same time cell_destinations, send_cells_per_proc = np.unique(output_cell_owner, return_counts=True) cell_to_output_comm = mesh.comm.Create_dist_graph( [mesh.comm.rank], [len(cell_destinations)], cell_destinations.tolist(), reorder=False, ) cell_sources, cell_dests, _ = cell_to_output_comm.Get_dist_neighbors() assert np.allclose(cell_dests, cell_destinations) # Compute number of recieving cells recv_cells_per_proc = np.zeros_like(cell_sources, dtype=np.int32) if len(send_cells_per_proc) == 0: send_cells_per_proc = np.zeros(1, dtype=np.int32) if len(recv_cells_per_proc) == 0: recv_cells_per_proc = np.zeros(1, dtype=np.int32) send_cells_per_proc = send_cells_per_proc.astype(np.int32) cell_to_output_comm.Neighbor_alltoall(send_cells_per_proc, recv_cells_per_proc) assert recv_cells_per_proc.sum() == local_cell_range[1] - local_cell_range[0] # Pack and send cell indices (used for mapping topology dofmap later) cell_insert_position = compute_insert_position( output_cell_owner, cell_destinations, send_cells_per_proc ) send_cells = np.empty_like(cell_insert_position, dtype=np.int64) send_cells[cell_insert_position] = original_cell_index recv_cells = np.empty(recv_cells_per_proc.sum(), dtype=np.int64) send_cells_msg = [send_cells, send_cells_per_proc, MPI.INT64_T] recv_cells_msg = [recv_cells, recv_cells_per_proc, MPI.INT64_T] cell_to_output_comm.Neighbor_alltoallv(send_cells_msg, recv_cells_msg) del send_cells_msg, recv_cells_msg, send_cells # Map received cells to the local index local_cell_index = recv_cells - local_cell_range[0] # 2. Create dofmap based on original geometry indices and re-order in the same order as original # cell indices on output process # Get original node index for all nodes (including ghosts) and convert dofmap to these indices original_node_index = mesh.geometry.input_global_indices _, num_nodes_per_cell = mesh.geometry.dofmap.shape local_geometry_dofmap = mesh.geometry.dofmap[:num_owned_cells, :] global_geometry_dofmap = original_node_index[local_geometry_dofmap.reshape(-1)] # Unroll insert position for geometry dofmap dofmap_insert_position = unroll_insert_position(cell_insert_position, num_nodes_per_cell) # Create and commmnicate connecitivity in original geometry indices send_geometry_dofmap = np.empty_like(dofmap_insert_position, dtype=np.int64) send_geometry_dofmap[dofmap_insert_position] = global_geometry_dofmap del global_geometry_dofmap send_sizes_dofmap = send_cells_per_proc * num_nodes_per_cell recv_sizes_dofmap = recv_cells_per_proc * num_nodes_per_cell recv_geometry_dofmap = np.empty(recv_sizes_dofmap.sum(), dtype=np.int64) send_geometry_dofmap_msg = [send_geometry_dofmap, send_sizes_dofmap, MPI.INT64_T] recv_geometry_dofmap_msg = [recv_geometry_dofmap, recv_sizes_dofmap, MPI.INT64_T] cell_to_output_comm.Neighbor_alltoallv(send_geometry_dofmap_msg, recv_geometry_dofmap_msg) del send_geometry_dofmap_msg, recv_geometry_dofmap_msg # Reshape dofmap and sort by original cell index recv_dofmap = recv_geometry_dofmap.reshape(-1, num_nodes_per_cell) sorted_recv_dofmap = np.empty_like(recv_dofmap) sorted_recv_dofmap[local_cell_index] = recv_dofmap # 3. Move geometry coordinates to input process # Compute outgoing edges from current process and create neighbourhood communicator # Also create number of outgoing cells at the same time num_owned_nodes = mesh.geometry.index_map().size_local num_nodes_global = mesh.geometry.index_map().size_global output_node_owner = index_owner( mesh.comm, original_node_index[:num_owned_nodes], num_nodes_global ) node_destinations, send_nodes_per_proc = np.unique(output_node_owner, return_counts=True) send_nodes_per_proc = send_nodes_per_proc.astype(np.int32) geometry_to_owner_comm = mesh.comm.Create_dist_graph( [mesh.comm.rank], [len(node_destinations)], node_destinations.tolist(), reorder=False, ) node_sources, node_dests, _ = geometry_to_owner_comm.Get_dist_neighbors() assert np.allclose(node_dests, node_destinations) # Compute send node insert positions send_nodes_position = compute_insert_position( output_node_owner, node_destinations, send_nodes_per_proc ) unrolled_nodes_positiion = unroll_insert_position(send_nodes_position, 3) send_coordinates = np.empty_like(unrolled_nodes_positiion, dtype=mesh.geometry.x.dtype) send_coordinates[unrolled_nodes_positiion] = mesh.geometry.x[:num_owned_nodes, :].reshape(-1) # Send and recieve geometry sizes send_coordinate_sizes = (send_nodes_per_proc * 3).astype(np.int32) recv_coordinate_sizes = np.zeros_like(node_sources, dtype=np.int32) geometry_to_owner_comm.Neighbor_alltoall(send_coordinate_sizes, recv_coordinate_sizes) # Send node coordinates recv_coordinates = np.empty(recv_coordinate_sizes.sum(), dtype=mesh.geometry.x.dtype) mpi_type = numpy_to_mpi[recv_coordinates.dtype.type] send_coord_msg = [send_coordinates, send_coordinate_sizes, mpi_type] recv_coord_msg = [recv_coordinates, recv_coordinate_sizes, mpi_type] geometry_to_owner_comm.Neighbor_alltoallv(send_coord_msg, recv_coord_msg) del send_coord_msg, recv_coord_msg # Send node ordering for reordering the coordinates on output process send_nodes = np.empty(num_owned_nodes, dtype=np.int64) send_nodes[send_nodes_position] = original_node_index[:num_owned_nodes] recv_indices = np.empty(recv_coordinate_sizes.sum() // 3, dtype=np.int64) send_nodes_msg = [send_nodes, send_nodes_per_proc, MPI.INT64_T] recv_nodes_msg = [recv_indices, recv_coordinate_sizes // 3, MPI.INT64_T] geometry_to_owner_comm.Neighbor_alltoallv(send_nodes_msg, recv_nodes_msg) # Compute local ording of received nodes local_node_range = compute_local_range(mesh.comm, num_nodes_global) recv_indices -= local_node_range[0] # Sort geometry based on input index and strip to gdim gdim = mesh.geometry.dim recv_nodes = recv_coordinates.reshape(-1, 3) geometry = np.empty_like(recv_nodes) geometry[recv_indices, :] = recv_nodes geometry = geometry[:, :gdim].copy() assert local_node_range[1] - local_node_range[0] == geometry.shape[0] cmap = mesh.geometry.cmap cell_to_output_comm.Free() geometry_to_owner_comm.Free() # NOTE: Could in theory store partitioning information, but would not work nicely # as one would need to read this data rather than the xdmffile. return MeshData( local_geometry=geometry, local_geometry_pos=local_node_range, num_nodes_global=num_nodes_global, local_topology=sorted_recv_dofmap, local_topology_pos=local_cell_range, num_cells_global=num_cells_global, cell_type=mesh.topology.cell_name(), degree=cmap.degree, lagrange_variant=cmap.variant, store_partition=False, partition_processes=None, ownership_array=None, ownership_offset=None, partition_range=None, partition_global=None, ) def create_function_data_on_original_mesh( u: dolfinx.fem.Function, name: typing.Optional[str] = None ) -> FunctionData: """ Create data object to save with ADIOS2 """ mesh = u.function_space.mesh # Compute what cells owned by current process should be sent to what output process # FIXME: Cache this num_owned_cells = mesh.topology.index_map(mesh.topology.dim).size_local original_cell_index = mesh.topology.original_cell_index[:num_owned_cells] # Compute owner of cells on this process based on the original cell index num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global output_cell_owner = index_owner(mesh.comm, original_cell_index, num_cells_global) local_cell_range = compute_local_range(mesh.comm, num_cells_global) # Compute outgoing edges from current process to outputting process # Computes the number of cells sent to each process at the same time cell_destinations, send_cells_per_proc = np.unique(output_cell_owner, return_counts=True) send_cells_per_proc = send_cells_per_proc.astype(np.int32) cell_to_output_comm = mesh.comm.Create_dist_graph( [mesh.comm.rank], [len(cell_destinations)], cell_destinations.tolist(), reorder=False, ) cell_sources, cell_dests, _ = cell_to_output_comm.Get_dist_neighbors() assert np.allclose(cell_dests, cell_destinations) # Compute number of recieving cells recv_cells_per_proc = np.zeros_like(cell_sources, dtype=np.int32) send_cells_per_proc = send_cells_per_proc.astype(np.int32) cell_to_output_comm.Neighbor_alltoall(send_cells_per_proc, recv_cells_per_proc) assert recv_cells_per_proc.sum() == local_cell_range[1] - local_cell_range[0] # Pack and send cell indices (used for mapping topology dofmap later) cell_insert_position = compute_insert_position( output_cell_owner, cell_destinations, send_cells_per_proc ) send_cells = np.empty_like(cell_insert_position, dtype=np.int64) send_cells[cell_insert_position] = original_cell_index recv_cells = np.empty(recv_cells_per_proc.sum(), dtype=np.int64) send_cells_msg = [send_cells, send_cells_per_proc, MPI.INT64_T] recv_cells_msg = [recv_cells, recv_cells_per_proc, MPI.INT64_T] cell_to_output_comm.Neighbor_alltoallv(send_cells_msg, recv_cells_msg) del send_cells_msg, recv_cells_msg # Map received cells to the local index local_cell_index = recv_cells - local_cell_range[0] # Pack and send cell permutation info mesh.topology.create_entity_permutations() cell_permutation_info = mesh.topology.get_cell_permutation_info()[:num_owned_cells] send_perm = np.empty_like(send_cells, dtype=np.uint32) send_perm[cell_insert_position] = cell_permutation_info recv_perm = np.empty_like(recv_cells, dtype=np.uint32) send_perm_msg = [send_perm, send_cells_per_proc, MPI.UINT32_T] recv_perm_msg = [recv_perm, recv_cells_per_proc, MPI.UINT32_T] cell_to_output_comm.Neighbor_alltoallv(send_perm_msg, recv_perm_msg) cell_permutation_info = np.empty_like(recv_perm) cell_permutation_info[local_cell_index] = recv_perm # 2. Extract function data (array is the same, keeping global indices from DOLFINx) # Dofmap is moved by the original cell index similar to the mesh geometry dofmap dofmap = u.function_space.dofmap dmap = dofmap.list num_dofs_per_cell = dmap.shape[1] dofmap_bs = dofmap.bs index_map_bs = dofmap.index_map_bs # Unroll dofmap for block size unrolled_dofmap = unroll_dofmap(dofmap.list[:num_owned_cells, :], dofmap_bs) dmap_loc = (unrolled_dofmap // index_map_bs).reshape(-1) dmap_rem = (unrolled_dofmap % index_map_bs).reshape(-1) # Convert imap index to global index imap_global = dofmap.index_map.local_to_global(dmap_loc) dofmap_global = (imap_global * index_map_bs + dmap_rem).reshape(unrolled_dofmap.shape) num_dofs_per_cell = dofmap_global.shape[1] dofmap_insert_position = unroll_insert_position(cell_insert_position, num_dofs_per_cell) # Create and send array for global dofmap send_function_dofmap = np.empty(len(dofmap_insert_position), dtype=np.int64) send_function_dofmap[dofmap_insert_position] = dofmap_global.reshape(-1) send_sizes_dofmap = send_cells_per_proc * num_dofs_per_cell recv_size_dofmap = recv_cells_per_proc * num_dofs_per_cell recv_function_dofmap = np.empty(recv_size_dofmap.sum(), dtype=np.int64) cell_to_output_comm.Neighbor_alltoallv( [send_function_dofmap, send_sizes_dofmap, MPI.INT64_T], [recv_function_dofmap, recv_size_dofmap, MPI.INT64_T], ) shaped_dofmap = recv_function_dofmap.reshape( local_cell_range[1] - local_cell_range[0], num_dofs_per_cell ).copy() final_dofmap = np.empty_like(shaped_dofmap) final_dofmap[local_cell_index] = shaped_dofmap final_dofmap = final_dofmap.reshape(-1) # Get offsets of dofmap num_cells_local = local_cell_range[1] - local_cell_range[0] num_dofs_local_dmap = num_cells_local * num_dofs_per_cell dofmap_imap = dolfinx.common.IndexMap(mesh.comm, num_dofs_local_dmap) local_dofmap_offsets = np.arange(num_cells_local + 1, dtype=np.int64) local_dofmap_offsets[:] *= num_dofs_per_cell local_dofmap_offsets[:] += dofmap_imap.local_range[0] num_dofs_local = dofmap.index_map.size_local * dofmap.index_map_bs num_dofs_global = dofmap.index_map.size_global * dofmap.index_map_bs local_range = np.asarray(dofmap.index_map.local_range, dtype=np.int64) * dofmap.index_map_bs func_name = name if name is not None else u.name cell_to_output_comm.Free() return FunctionData( cell_permutations=cell_permutation_info, local_cell_range=local_cell_range, num_cells_global=num_cells_global, dofmap_array=final_dofmap, dofmap_offsets=local_dofmap_offsets, values=u.x.array[:num_dofs_local].copy(), dof_range=local_range, num_dofs_global=num_dofs_global, dofmap_range=dofmap_imap.local_range, global_dofs_in_dofmap=dofmap_imap.size_global, name=func_name, ) def write_function_on_input_mesh( filename: typing.Union[Path, str], u: dolfinx.fem.Function, engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Append, time: float = 0.0, name: typing.Optional[str] = None, ): """ Write function checkpoint (to be read with the input mesh). Parameters: u: The function to checkpoint filename: The filename to write to engine: The ADIOS2 engine to use mode: The ADIOS2 mode to use (write or append) time: Time-stamp associated with function at current write step name: Name of function. If None, the name of the function is used. """ mesh = u.function_space.mesh function_data = create_function_data_on_original_mesh(u, name) fname = Path(filename) write_function( fname, mesh.comm, function_data, engine, mode, time, io_name="OriginalFunctionWriter", ) def write_mesh_input_order( filename: typing.Union[Path, str], mesh: dolfinx.mesh.Mesh, engine: str = "BP4" ): """ Write mesh to checkpoint file in original input ordering """ mesh_data = create_original_mesh_data(mesh) fname = Path(filename) write_mesh(fname, mesh.comm, mesh_data, engine, io_name="OriginalMeshWriter") adios4dolfinx-0.9.3/src/adios4dolfinx/py.typed000066400000000000000000000000001477331607400213310ustar00rootroot00000000000000adios4dolfinx-0.9.3/src/adios4dolfinx/snapshot.py000066400000000000000000000035211477331607400220560ustar00rootroot00000000000000# Copyright (C) 2024 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT from pathlib import Path import adios2 import dolfinx from .adios2_helpers import ADIOSFile, resolve_adios_scope adios2 = resolve_adios_scope(adios2) __all__ = [ "snapshot_checkpoint", ] def snapshot_checkpoint(uh: dolfinx.fem.Function, file: Path, mode: adios2.Mode): """Read or write a snapshot checkpoint This checkpoint is only meant to be used on the same mesh during the same simulation. :param uh: The function to write data from or read to :param file: The file to write to or read from :param mode: Either read or write """ if mode not in [adios2.Mode.Write, adios2.Mode.Read]: raise ValueError("Got invalid mode {mode}") # Create ADIOS IO adios = adios2.ADIOS(uh.function_space.mesh.comm) with ADIOSFile( adios=adios, filename=file, mode=mode, io_name="SnapshotCheckPoint", engine="BP4", ) as adios_file: if mode == adios2.Mode.Write: dofmap = uh.function_space.dofmap num_dofs_local = dofmap.index_map.size_local * dofmap.index_map_bs local_dofs = uh.x.array[:num_dofs_local].copy() # Write to file adios_file.file.BeginStep() dofs = adios_file.io.DefineVariable("dofs", local_dofs, count=[num_dofs_local]) adios_file.file.Put(dofs, local_dofs, adios2.Mode.Sync) adios_file.file.EndStep() else: adios_file.file.BeginStep() in_variable = adios_file.io.InquireVariable("dofs") in_variable.SetBlockSelection(uh.function_space.mesh.comm.rank) adios_file.file.Get(in_variable, uh.x.array, adios2.Mode.Sync) adios_file.file.EndStep() uh.x.scatter_forward() adios4dolfinx-0.9.3/src/adios4dolfinx/structures.py000066400000000000000000000042411477331607400224420ustar00rootroot00000000000000# Copyright (C) 2024 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT from __future__ import annotations import typing from dataclasses import dataclass import numpy as np import numpy.typing as npt """Internal library classes for storing mesh and function data""" __all__ = ["MeshData", "FunctionData"] @dataclass class MeshData: # 2 dimensional array of node coordinates local_geometry: npt.NDArray[np.floating] local_geometry_pos: tuple[int, int] # Insert range on current process for geometry nodes num_nodes_global: int # Number of nodes in global geometry array local_topology: npt.NDArray[np.int64] # 2 dimensional connecitivty array for mesh topology # Insert range on current process for topology local_topology_pos: tuple[int, int] num_cells_global: int # NUmber of cells in global topology cell_type: str degree: int lagrange_variant: int # Partitioning_information store_partition: bool partition_processes: typing.Optional[int] # Number of processes in partition ownership_array: typing.Optional[npt.NDArray[np.int32]] # Ownership array for cells ownership_offset: typing.Optional[npt.NDArray[np.int32]] # Ownership offset for cells partition_range: typing.Optional[ tuple[int, int] ] # Local insert position for partitioning information partition_global: typing.Optional[int] @dataclass class FunctionData: cell_permutations: npt.NDArray[np.uint32] # Cell permutations for dofmap local_cell_range: tuple[int, int] # Range of cells on current process num_cells_global: int # Number of cells in global topology dofmap_array: npt.NDArray[np.int64] # Local function dofmap (using global indices) dofmap_offsets: npt.NDArray[np.int64] # Global dofmap offsets dofmap_range: tuple[int, int] # Range of dofmap on current process global_dofs_in_dofmap: int # Number of entries in global dofmap values: npt.NDArray[np.floating] # Local function values dof_range: tuple[int, int] # Range of local function values num_dofs_global: int # Number of global function values name: str # Name of function adios4dolfinx-0.9.3/src/adios4dolfinx/utils.py000066400000000000000000000137441477331607400213670ustar00rootroot00000000000000# Copyright (C) 2023 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT """ Vectorized numpy operations used internally in adios4dolfinx """ from __future__ import annotations import typing from mpi4py import MPI import dolfinx import numpy as np import numpy.typing as npt from packaging.version import Version __all__ = [ "compute_local_range", "index_owner", "compute_dofmap_pos", "unroll_dofmap", "compute_insert_position", "unroll_insert_position", ] valid_function_types = typing.Union[np.float32, np.float64, np.complex64, np.complex128] valid_real_types = typing.Union[np.float32, np.float64] def element_signature(V): if Version(dolfinx.__version__) > Version("0.9.0"): return V.element.signature else: return V.element.signature() def compute_insert_position( data_owner: npt.NDArray[np.int32], destination_ranks: npt.NDArray[np.int32], out_size: npt.NDArray[np.int32], ) -> npt.NDArray[np.int32]: """ Giving a list of ranks, compute the local insert position for each rank in a list sorted by destination ranks. This function is used for packing data from a given process to its destination processes. Example: .. highlight:: python .. code-block:: python data_owner = [0, 1, 1, 0, 2, 3] destination_ranks = [2,0,3,1] out_size = [1, 2, 1, 2] insert_position = compute_insert_position(data_owner, destination_ranks, out_size) Insert position is then ``[1, 4, 5, 2, 0, 3]`` """ process_pos_indicator = data_owner.reshape(-1, 1) == destination_ranks # Compute offsets for insertion based on input size send_offsets = np.zeros(len(out_size) + 1, dtype=np.intc) send_offsets[1:] = np.cumsum(out_size) assert send_offsets[-1] == len(data_owner) # Compute local insert index on each process proc_row, proc_col = np.nonzero(process_pos_indicator) cum_pos = np.cumsum(process_pos_indicator, axis=0) insert_position = cum_pos[proc_row, proc_col] - 1 # Add process offset for each local index insert_position += send_offsets[proc_col] return insert_position def unroll_insert_position( insert_position: npt.NDArray[np.int32], block_size: int ) -> npt.NDArray[np.int32]: """ Unroll insert position by a block size Example: .. highlight:: python .. code-block:: python insert_position = [1, 4, 5, 2, 0, 3] unrolled_ip = unroll_insert_position(insert_position, 3) where ``unrolled_ip = [3, 4 ,5, 12, 13, 14, 15, 16, 17, 6, 7, 8, 0, 1, 2, 9, 10, 11]`` """ unrolled_ip = np.repeat(insert_position, block_size) * block_size unrolled_ip += np.tile(np.arange(block_size), len(insert_position)) return unrolled_ip def compute_local_range(comm: MPI.Intracomm, N: np.int64): """ Divide a set of `N` objects into `M` partitions, where `M` is the size of the MPI communicator `comm`. NOTE: If N is not divisible by the number of ranks, the first `r` processes gets an extra value Returns the local range of values """ rank = comm.rank size = comm.size n = N // size r = N % size # First r processes has one extra value if rank < r: return [rank * (n + 1), (rank + 1) * (n + 1)] else: return [rank * n + r, (rank + 1) * n + r] def index_owner( comm: MPI.Intracomm, indices: npt.NDArray[np.int64], N: np.int64 ) -> npt.NDArray[np.int32]: """ Find which rank (local to comm) which owns an `index`, given that data of size `N` has been split equally among the ranks. NOTE: If `N` is not divisible by the number of ranks, the first `r` processes gets an extra value. """ size = comm.size assert (indices < N).all() n = N // size r = N % size owner = np.empty_like(indices, dtype=np.int32) inc_remainder = indices < (n + 1) * r owner[inc_remainder] = indices[inc_remainder] // (n + 1) owner[~inc_remainder] = r + (indices[~inc_remainder] - r * (n + 1)) // n return owner def unroll_dofmap(dofs: npt.NDArray[np.int32], bs: int) -> npt.NDArray[np.int32]: """ Given a two-dimensional dofmap of size `(num_cells, num_dofs_per_cell)` Expand the dofmap by its block size such that the resulting array is of size `(num_cells, bs*num_dofs_per_cell)` """ num_cells, num_dofs_per_cell = dofs.shape unrolled_dofmap = np.repeat(dofs, bs).reshape(num_cells, num_dofs_per_cell * bs) * bs unrolled_dofmap += np.tile(np.arange(bs), num_dofs_per_cell) return unrolled_dofmap def compute_dofmap_pos( V: dolfinx.fem.FunctionSpace, ) -> tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]: """ Compute a map from each owned dof in the dofmap to a single cell owned by the process, and the relative position of the dof. :param V: The function space :returns: The tuple (`cells`, `dof_pos`) where each array is the size of the number of owned dofs (unrolled for block size) """ dofs = V.dofmap.list mesh = V.mesh num_owned_cells = mesh.topology.index_map(mesh.topology.dim).size_local dofmap_bs = V.dofmap.bs num_owned_dofs = V.dofmap.index_map.size_local * V.dofmap.index_map_bs local_cell = np.empty( num_owned_dofs, dtype=np.int32 ) # Local cell index for each dof owned by process dof_pos = np.empty(num_owned_dofs, dtype=np.int32) # Position in dofmap for said dof unrolled_dofmap = unroll_dofmap(dofs[:num_owned_cells, :], dofmap_bs) markers = unrolled_dofmap < num_owned_dofs local_indices = np.broadcast_to(np.arange(markers.shape[1]), markers.shape) cell_indicator = np.broadcast_to( np.arange(num_owned_cells, dtype=np.int32).reshape(-1, 1), (num_owned_cells, markers.shape[1]), ) indicator = unrolled_dofmap[markers].reshape(-1) local_cell[indicator] = cell_indicator[markers].reshape(-1) dof_pos[indicator] = local_indices[markers].reshape(-1) return local_cell, dof_pos adios4dolfinx-0.9.3/src/adios4dolfinx/writers.py000066400000000000000000000154161477331607400217240ustar00rootroot00000000000000# Copyright (C) 2024 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT import warnings from pathlib import Path from mpi4py import MPI import adios2 import numpy as np from .adios2_helpers import ADIOSFile, resolve_adios_scope from .structures import FunctionData, MeshData adios2 = resolve_adios_scope(adios2) def write_mesh( filename: Path, comm: MPI.Intracomm, mesh: MeshData, engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Write, time: float = 0.0, io_name: str = "MeshWriter", ): """ Write a mesh to file using ADIOS2 Parameters: comm: MPI communicator used in storage mesh: Internal data structure for the mesh data to save to file filename: Path to file to write to engine: ADIOS2 engine to use mode: ADIOS2 mode to use (write or append) io_name: Internal name used for the ADIOS IO object """ gdim = mesh.local_geometry.shape[1] adios = adios2.ADIOS(comm) with ADIOSFile( adios=adios, filename=filename, mode=mode, engine=engine, io_name=io_name ) as adios_file: adios_file.file.BeginStep() # Write geometry pointvar = adios_file.io.DefineVariable( "Points", mesh.local_geometry, shape=[mesh.num_nodes_global, gdim], start=[mesh.local_geometry_pos[0], 0], count=[mesh.local_geometry_pos[1] - mesh.local_geometry_pos[0], gdim], ) adios_file.file.Put(pointvar, mesh.local_geometry, adios2.Mode.Sync) if mode == adios2.Mode.Write: adios_file.io.DefineAttribute("CellType", mesh.cell_type) adios_file.io.DefineAttribute("Degree", np.array([mesh.degree], dtype=np.int32)) adios_file.io.DefineAttribute( "LagrangeVariant", np.array([mesh.lagrange_variant], dtype=np.int32) ) # Write topology (on;y on first write as topology is constant) num_dofs_per_cell = mesh.local_topology.shape[1] dvar = adios_file.io.DefineVariable( "Topology", mesh.local_topology, shape=[mesh.num_cells_global, num_dofs_per_cell], start=[mesh.local_topology_pos[0], 0], count=[ mesh.local_topology_pos[1] - mesh.local_topology_pos[0], num_dofs_per_cell, ], ) adios_file.file.Put(dvar, mesh.local_topology) # Add partitioning data if mesh.store_partition: assert mesh.partition_range is not None par_data = adios_file.io.DefineVariable( "PartitioningData", mesh.ownership_array, shape=[mesh.partition_global], start=[mesh.partition_range[0]], count=[ mesh.partition_range[1] - mesh.partition_range[0], ], ) adios_file.file.Put(par_data, mesh.ownership_array) assert mesh.ownership_offset is not None par_offset = adios_file.io.DefineVariable( "PartitioningOffset", mesh.ownership_offset, shape=[mesh.num_cells_global + 1], start=[mesh.local_topology_pos[0]], count=[mesh.local_topology_pos[1] - mesh.local_topology_pos[0] + 1], ) adios_file.file.Put(par_offset, mesh.ownership_offset) assert mesh.partition_processes is not None adios_file.io.DefineAttribute( "PartitionProcesses", np.array([mesh.partition_processes], dtype=np.int32) ) if mode == adios2.Mode.Append and mesh.store_partition: warnings.warn("Partitioning data is not written in append mode") # Add time step to file t_arr = np.array([time], dtype=np.float64) time_var = adios_file.io.DefineVariable( "MeshTime", t_arr, shape=[1], start=[0], count=[1 if comm.rank == 0 else 0], ) adios_file.file.Put(time_var, t_arr) adios_file.file.PerformPuts() adios_file.file.EndStep() def write_function( filename: Path, comm: MPI.Intracomm, u: FunctionData, engine: str = "BP4", mode: adios2.Mode = adios2.Mode.Append, time: float = 0.0, io_name: str = "FunctionWriter", ): """ Write a function to file using ADIOS2 Parameters: comm: MPI communicator used in storage u: Internal data structure for the function data to save to file filename: Path to file to write to engine: ADIOS2 engine to use mode: ADIOS2 mode to use (write or append) time: Time stamp associated with function io_name: Internal name used for the ADIOS IO object """ adios = adios2.ADIOS(comm) with ADIOSFile( adios=adios, filename=filename, mode=mode, engine=engine, io_name=io_name ) as adios_file: adios_file.file.BeginStep() # Add mesh permutations pvar = adios_file.io.DefineVariable( "CellPermutations", u.cell_permutations, shape=[u.num_cells_global], start=[u.local_cell_range[0]], count=[u.local_cell_range[1] - u.local_cell_range[0]], ) adios_file.file.Put(pvar, u.cell_permutations) dofmap_var = adios_file.io.DefineVariable( f"{u.name}_dofmap", u.dofmap_array, shape=[u.global_dofs_in_dofmap], start=[u.dofmap_range[0]], count=[u.dofmap_range[1] - u.dofmap_range[0]], ) adios_file.file.Put(dofmap_var, u.dofmap_array) xdofmap_var = adios_file.io.DefineVariable( f"{u.name}_XDofmap", u.dofmap_offsets, shape=[u.num_cells_global + 1], start=[u.local_cell_range[0]], count=[u.local_cell_range[1] - u.local_cell_range[0] + 1], ) adios_file.file.Put(xdofmap_var, u.dofmap_offsets) val_var = adios_file.io.DefineVariable( f"{u.name}_values", u.values, shape=[u.num_dofs_global], start=[u.dof_range[0]], count=[u.dof_range[1] - u.dof_range[0]], ) adios_file.file.Put(val_var, u.values) # Add time step to file t_arr = np.array([time], dtype=np.float64) time_var = adios_file.io.DefineVariable( f"{u.name}_time", t_arr, shape=[1], start=[0], count=[1 if comm.rank == 0 else 0], ) adios_file.file.Put(time_var, t_arr) adios_file.file.PerformPuts() adios_file.file.EndStep() adios4dolfinx-0.9.3/tests/000077500000000000000000000000001477331607400154505ustar00rootroot00000000000000adios4dolfinx-0.9.3/tests/conftest.py000066400000000000000000000116271477331607400176560ustar00rootroot00000000000000from mpi4py import MPI import dolfinx import ipyparallel as ipp import numpy as np import numpy.typing import pytest import adios4dolfinx @pytest.fixture(scope="module") def cluster(): cluster = ipp.Cluster(engines="mpi", n=2) rc = cluster.start_and_connect_sync() yield rc cluster.stop_cluster_sync() @pytest.fixture(scope="function") def write_function(tmp_path): def _write_function(mesh, el, f, dtype, name="uh", append: bool = False) -> str: V = dolfinx.fem.functionspace(mesh, el) uh = dolfinx.fem.Function(V, dtype=dtype) uh.interpolate(f) uh.name = name el_hash = ( adios4dolfinx.utils.element_signature(V) .replace(" ", "") .replace(",", "") .replace("(", "") .replace(")", "") .replace("[", "") .replace("]", "") ) # Consistent tmp dir across processes f_path = MPI.COMM_WORLD.bcast(tmp_path, root=0) file_hash = f"{el_hash}_{np.dtype(dtype).name}" filename = f_path / f"mesh_{file_hash}.bp" if mesh.comm.size != 1: if not append: adios4dolfinx.write_mesh(filename, mesh) adios4dolfinx.write_function(filename, uh, time=0.0) else: if MPI.COMM_WORLD.rank == 0: if not append: adios4dolfinx.write_mesh(filename, mesh) adios4dolfinx.write_function(filename, uh, time=0.0) return filename return _write_function @pytest.fixture(scope="function") def read_function(): def _read_function(comm, el, f, path, dtype, name="uh"): engine = "BP4" mesh = adios4dolfinx.read_mesh(path, comm, engine, dolfinx.mesh.GhostMode.shared_facet) V = dolfinx.fem.functionspace(mesh, el) v = dolfinx.fem.Function(V, dtype=dtype) v.name = name adios4dolfinx.read_function(path, v, engine) v_ex = dolfinx.fem.Function(V, dtype=dtype) v_ex.interpolate(f) res = np.finfo(dtype).resolution np.testing.assert_allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) return _read_function @pytest.fixture(scope="function") def get_dtype(): def _get_dtype(in_dtype: np.dtype, is_complex: bool): dtype: numpy.typing.DTypeLike if in_dtype == np.float32: if is_complex: dtype = np.complex64 else: dtype = np.float32 elif in_dtype == np.float64: if is_complex: dtype = np.complex128 else: dtype = np.float64 else: raise ValueError("Unsuported dtype") return dtype return _get_dtype @pytest.fixture(scope="function") def write_function_time_dep(tmp_path): def _write_function_time_dep(mesh, el, f0, f1, t0, t1, dtype) -> str: V = dolfinx.fem.functionspace(mesh, el) uh = dolfinx.fem.Function(V, dtype=dtype) uh.interpolate(f0) el_hash = ( adios4dolfinx.utils.element_signature(V) .replace(" ", "") .replace(",", "") .replace("(", "") .replace(")", "") .replace("[", "") .replace("]", "") ) file_hash = f"{el_hash}_{np.dtype(dtype).name}" # Consistent tmp dir across processes f_path = MPI.COMM_WORLD.bcast(tmp_path, root=0) filename = f_path / f"mesh_{file_hash}.bp" if mesh.comm.size != 1: adios4dolfinx.write_mesh(filename, mesh) adios4dolfinx.write_function(filename, uh, time=t0) uh.interpolate(f1) adios4dolfinx.write_function(filename, uh, time=t1) else: if MPI.COMM_WORLD.rank == 0: adios4dolfinx.write_mesh(filename, mesh) adios4dolfinx.write_function(filename, uh, time=t0) uh.interpolate(f1) adios4dolfinx.write_function(filename, uh, time=t1) return filename return _write_function_time_dep @pytest.fixture(scope="function") def read_function_time_dep(): def _read_function_time_dep(comm, el, f0, f1, t0, t1, path, dtype): engine = "BP4" mesh = adios4dolfinx.read_mesh(path, comm, engine, dolfinx.mesh.GhostMode.shared_facet) V = dolfinx.fem.functionspace(mesh, el) v = dolfinx.fem.Function(V, dtype=dtype) adios4dolfinx.read_function(path, v, engine, time=t1) v_ex = dolfinx.fem.Function(V, dtype=dtype) v_ex.interpolate(f1) res = np.finfo(dtype).resolution assert np.allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) adios4dolfinx.read_function(path, v, engine, time=t0) v_ex = dolfinx.fem.Function(V, dtype=dtype) v_ex.interpolate(f0) res = np.finfo(dtype).resolution assert np.allclose(v.x.array, v_ex.x.array, atol=10 * res, rtol=10 * res) return _read_function_time_dep adios4dolfinx-0.9.3/tests/create_legacy_checkpoint.py000066400000000000000000000036071477331607400230260ustar00rootroot00000000000000# Copyright (C) 2024 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT """ Functions to create checkpoints with adios4dolfinx v0.7.x """ import argparse import pathlib from importlib.metadata import version from mpi4py import MPI import dolfinx import numpy as np import adios4dolfinx a4d_version = version("adios4dolfinx") assert a4d_version < "0.7.2", ( f"Creating a legacy checkpoint requires adios4dolfinx < 0.7.2, you have {a4d_version}." ) def f(x): values = np.zeros((2, x.shape[1]), dtype=np.float64) values[0] = x[0] values[1] = -x[1] return values def write_checkpoint(filename, mesh, el, f): V = dolfinx.fem.FunctionSpace(mesh, el) uh = dolfinx.fem.Function(V, dtype=np.float64) uh.interpolate(f) adios4dolfinx.write_mesh(V.mesh, filename) adios4dolfinx.write_function(uh, filename) def verify_checkpoint(filename, el, f): mesh = adios4dolfinx.read_mesh( MPI.COMM_WORLD, filename, "BP4", dolfinx.mesh.GhostMode.shared_facet ) V = dolfinx.fem.FunctionSpace(mesh, el) uh = dolfinx.fem.Function(V, dtype=np.float64) adios4dolfinx.read_function(uh, filename) u_ex = dolfinx.fem.Function(V, dtype=np.float64) u_ex.interpolate(f) np.testing.assert_allclose(u_ex.x.array, uh.x.array, atol=1e-15) if __name__ == "__main__": parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--output-dir", type=str, default="legacy_checkpoint", dest="dir") inputs = parser.parse_args() path = pathlib.Path(inputs.dir) path.mkdir(exist_ok=True, parents=True) filename = path / "adios4dolfinx_checkpoint.bp" mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 10, 10) el = ("N1curl", 3) write_checkpoint(filename, mesh, el, f) MPI.COMM_WORLD.Barrier() verify_checkpoint(filename, el, f) adios4dolfinx-0.9.3/tests/create_legacy_data.py000066400000000000000000000117431477331607400216100ustar00rootroot00000000000000# Copyright (C) 2023 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT """ Functions to create checkpoints with Legacy dolfin """ import argparse import pathlib import dolfin import numpy as np import ufl_legacy as ufl def create_reference_data( h5_file: pathlib.Path, xdmf_file: pathlib.Path, mesh_name: str, function_name: str, family: str, degree: int, function_name_vec: str, ) -> dolfin.Function: mesh = dolfin.UnitCubeMesh(1, 1, 1) V = dolfin.FunctionSpace(mesh, family, degree) W = dolfin.VectorFunctionSpace(mesh, family, degree) x = dolfin.SpatialCoordinate(mesh) f0 = ufl.conditional(ufl.gt(x[0], 0.5), x[1], 2 * x[0]) v0 = dolfin.project(f0, V) w0 = dolfin.interpolate(dolfin.Expression(("x[0]", "3*x[2]", "7*x[1]"), degree=1), W) v1 = dolfin.interpolate(dolfin.Expression("x[0]", degree=1), V) w1 = dolfin.interpolate(dolfin.Expression(("x[0]", "0", "x[1]"), degree=1), W) with dolfin.HDF5File(mesh.mpi_comm(), str(h5_file), "w") as hdf: hdf.write(mesh, mesh_name) hdf.write(v0, function_name) hdf.write(w0, function_name_vec) with dolfin.XDMFFile(mesh.mpi_comm(), str(xdmf_file)) as xdmf: xdmf.write(mesh) xdmf.write_checkpoint(v0, function_name, 0, dolfin.XDMFFile.Encoding.HDF5, append=True) xdmf.write_checkpoint(w0, function_name_vec, 0, dolfin.XDMFFile.Encoding.HDF5, append=True) xdmf.write_checkpoint(v1, function_name, 1, dolfin.XDMFFile.Encoding.HDF5, append=True) xdmf.write_checkpoint(w1, function_name_vec, 1, dolfin.XDMFFile.Encoding.HDF5, append=True) with dolfin.XDMFFile(mesh.mpi_comm(), "test.xdmf") as xdmf: xdmf.write(mesh) return v0, w0, v1, w1 def verify_hdf5( v_ref: dolfin.Function, w_ref: dolfin.Function, h5_file: pathlib.Path, mesh_name: str, function_name: str, family: str, degree: int, function_name_vec: str, ): mesh = dolfin.Mesh() with dolfin.HDF5File(mesh.mpi_comm(), str(h5_file), "r") as hdf: hdf.read(mesh, mesh_name, False) V = dolfin.FunctionSpace(mesh, family, degree) v = dolfin.Function(V) hdf.read(v, function_name) W = dolfin.VectorFunctionSpace(mesh, family, degree) w = dolfin.Function(W) hdf.read(w, function_name_vec) assert np.allclose(v.vector().get_local(), v_ref.vector().get_local()) assert np.allclose(w.vector().get_local(), w_ref.vector().get_local()) def verify_xdmf( v0_ref: dolfin.Function, w0_ref: dolfin.Function, v1_ref: dolfin.Function, w1_ref: dolfin.Function, xdmf_file: pathlib.Path, function_name: str, family: str, degree: int, function_name_vec: str, ): mesh = dolfin.Mesh() with dolfin.XDMFFile(mesh.mpi_comm(), str(xdmf_file)) as xdmf: xdmf.read(mesh) V = dolfin.FunctionSpace(mesh, family, degree) v0 = dolfin.Function(V) xdmf.read_checkpoint(v0, function_name, 0) v1 = dolfin.Function(V) xdmf.read_checkpoint(v1, function_name, 1) W = dolfin.VectorFunctionSpace(mesh, family, degree) w0 = dolfin.Function(W) xdmf.read_checkpoint(w0, function_name_vec, 0) w1 = dolfin.Function(W) xdmf.read_checkpoint(w1, function_name_vec, 1) assert np.allclose(v0.vector().get_local(), v0_ref.vector().get_local()) assert np.allclose(w0.vector().get_local(), w0_ref.vector().get_local()) assert np.allclose(v1.vector().get_local(), v1_ref.vector().get_local()) assert np.allclose(w1.vector().get_local(), w1_ref.vector().get_local()) if __name__ == "__main__": parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--family", type=str, default="DG") parser.add_argument("--degree", type=int, default=2) parser.add_argument("--output-dir", type=str, default="legacy", dest="dir") parser.add_argument("--mesh-name", type=str, default="mesh", dest="name") parser.add_argument("--function-name", type=str, default="v", dest="f_name") parser.add_argument("--function-name-vec", type=str, default="w", dest="f_name_vec") inputs = parser.parse_args() path = pathlib.Path(inputs.dir) path.mkdir(exist_ok=True, parents=True) h5_filename = path / f"{inputs.name}.h5" xdmf_filename = path / f"{inputs.name}_checkpoint.xdmf" v0_ref, w0_ref, v1_ref, w1_ref = create_reference_data( h5_filename, xdmf_filename, inputs.name, inputs.f_name, inputs.family, inputs.degree, inputs.f_name_vec, ) verify_hdf5( v0_ref, w0_ref, h5_filename, inputs.name, inputs.f_name, inputs.family, inputs.degree, inputs.f_name_vec, ) verify_xdmf( v0_ref, w0_ref, v1_ref, w1_ref, xdmf_filename, inputs.f_name, inputs.family, inputs.degree, inputs.f_name_vec, ) adios4dolfinx-0.9.3/tests/test_attributes.py000066400000000000000000000030301477331607400212430ustar00rootroot00000000000000from pathlib import Path from mpi4py import MPI import adios2 import numpy as np import pytest from packaging.version import parse as _v import adios4dolfinx @pytest.mark.skipif( _v(np.__version__) >= _v("2.0.0") and _v(adios2.__version__) < _v("2.10.2"), reason="Cannot use numpy>=2.0.0 and adios2<2.10.2", ) @pytest.mark.parametrize("comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_read_write_attributes(comm, tmp_path): attributes1 = { "a": np.array([1, 2, 3], dtype=np.uint8), "b": np.array([4, 5], dtype=np.uint8), } attributes2 = { "c": np.array([6], dtype=np.uint8), "d": np.array([7, 8, 9, 10], dtype=np.uint8), } fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) file = fname / Path("attributes.bp") adios4dolfinx.write_attributes(comm=comm, filename=file, name="group1", attributes=attributes1) adios4dolfinx.write_attributes(comm=comm, filename=file, name="group2", attributes=attributes2) MPI.COMM_WORLD.Barrier() loaded_attributes1 = adios4dolfinx.read_attributes(comm=comm, filename=file, name="group1") loaded_attributes2 = adios4dolfinx.read_attributes(comm=comm, filename=file, name="group2") for k, v in loaded_attributes1.items(): assert np.allclose(v, attributes1[k]) for k, v in attributes1.items(): assert np.allclose(v, loaded_attributes1[k]) for k, v in loaded_attributes2.items(): assert np.allclose(v, attributes2[k]) for k, v in attributes2.items(): assert np.allclose(v, loaded_attributes2[k]) adios4dolfinx-0.9.3/tests/test_checkpointing.py000066400000000000000000000212171477331607400217110ustar00rootroot00000000000000import itertools from mpi4py import MPI import basix import basix.ufl import dolfinx import numpy as np import pytest import adios4dolfinx dtypes = [np.float64, np.float32] # Mesh geometry dtypes write_comm = [MPI.COMM_SELF, MPI.COMM_WORLD] # Communicators for creating mesh two_dimensional_cell_types = [ dolfinx.mesh.CellType.triangle, dolfinx.mesh.CellType.quadrilateral, ] three_dimensional_cell_types = [ dolfinx.mesh.CellType.tetrahedron, dolfinx.mesh.CellType.hexahedron, ] two_dim_combinations = itertools.product(dtypes, two_dimensional_cell_types, write_comm) three_dim_combinations = itertools.product(dtypes, three_dimensional_cell_types, write_comm) @pytest.fixture(params=two_dim_combinations, scope="module") def mesh_2D(request): dtype, cell_type, write_comm = request.param mesh = dolfinx.mesh.create_unit_square(write_comm, 10, 10, cell_type=cell_type, dtype=dtype) return mesh @pytest.fixture(params=three_dim_combinations, scope="module") def mesh_3D(request): dtype, cell_type, write_comm = request.param M = 5 mesh = dolfinx.mesh.create_unit_cube(write_comm, M, M, M, cell_type=cell_type, dtype=dtype) return mesh @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_read_write_P_2D( read_comm, family, degree, is_complex, mesh_2D, get_dtype, write_function, read_function ): mesh = mesh_2D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element( family, mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, shape=(mesh.geometry.dim,), dtype=mesh.geometry.x.dtype, ) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) + x[0] values[1] = x[0] if is_complex: values[0] += 1j * x[1] values[1] -= 3j * x[1] return values hash = write_function(mesh, el, f, f_dtype) MPI.COMM_WORLD.Barrier() read_function(read_comm, el, f, hash, f_dtype) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_read_write_P_3D( read_comm, family, degree, is_complex, mesh_3D, get_dtype, write_function, read_function ): mesh = mesh_3D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element( family, mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, shape=(mesh.geometry.dim,), dtype=mesh.geometry.x.dtype, ) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) values[0] = np.pi + x[0] values[1] = x[1] + 2 * x[0] values[2] = np.cos(x[2]) if is_complex: values[0] -= 2j * x[2] values[2] += 1j * x[1] return values hash = write_function(mesh, el, f, f_dtype) MPI.COMM_WORLD.Barrier() read_function(read_comm, el, f, hash, f_dtype) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_read_write_P_2D_time( read_comm, family, degree, is_complex, mesh_2D, get_dtype, write_function_time_dep, read_function_time_dep, ): mesh = mesh_2D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element( family, mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, shape=(mesh.geometry.dim,), dtype=mesh.geometry.x.dtype, ) def f0(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) + x[0] values[1] = x[0] if is_complex: values[0] += x[1] * 1j values[1] -= 3j * x[1] return values def f1(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) values[0] = 2 * np.full(x.shape[1], np.pi) + x[0] values[1] = -x[0] + 2 * x[1] if is_complex: values[0] += x[1] * 1j values[1] += 3j * x[1] return values t0 = 0.8 t1 = 0.6 hash = write_function_time_dep(mesh, el, f0, f1, t0, t1, f_dtype) MPI.COMM_WORLD.Barrier() read_function_time_dep(read_comm, el, f0, f1, t0, t1, hash, f_dtype) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_read_write_P_3D_time( read_comm, family, degree, is_complex, mesh_3D, get_dtype, write_function_time_dep, read_function_time_dep, ): mesh = mesh_3D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element( family, mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, shape=(mesh.geometry.dim,), dtype=mesh.geometry.x.dtype, ) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) values[0] = np.pi + x[0] values[1] = x[1] + 2 * x[0] values[2] = np.cos(x[2]) if is_complex: values[0] += 2j * x[2] values[2] += 5j * x[1] return values def g(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) values[0] = x[0] values[1] = 2 * x[0] values[2] = x[0] if is_complex: values[0] += np.pi * 2j * x[2] values[1] += 1j * x[2] values[2] += 1j * np.cos(x[1]) return values t0 = 0.1 t1 = 1.3 hash = write_function_time_dep(mesh, el, g, f, t0, t1, f_dtype) MPI.COMM_WORLD.Barrier() read_function_time_dep(read_comm, el, g, f, t0, t1, hash, f_dtype) @pytest.mark.parametrize( "func, args", [ (adios4dolfinx.read_attributes, ("nonexisting_file.bp", MPI.COMM_WORLD, "")), (adios4dolfinx.read_timestamps, ("nonexisting_file.bp", MPI.COMM_WORLD, "")), (adios4dolfinx.read_meshtags, ("nonexisting_file.bp", MPI.COMM_WORLD, None, "")), (adios4dolfinx.read_function, ("nonexisting_file.bp", None)), (adios4dolfinx.read_mesh, ("nonexisting_file.bp", MPI.COMM_WORLD)), ], ) def test_read_nonexisting_file_raises_FileNotFoundError(func, args): with pytest.raises(FileNotFoundError): func(*args) def test_read_function_with_invalid_name_raises_KeyError(tmp_path): comm = MPI.COMM_WORLD f_path = comm.bcast(tmp_path, root=0) filename = f_path / "func.bp" mesh = dolfinx.mesh.create_unit_square(comm, 10, 10, cell_type=dolfinx.mesh.CellType.triangle) V = dolfinx.fem.functionspace(mesh, ("P", 1)) u = dolfinx.fem.Function(V) adios4dolfinx.write_function(filename, u, time=0, name="some_name") adios4dolfinx.write_function(filename, u, time=0, name="some_other_name") variables = set(sorted(["some_name", "some_other_name"])) with pytest.raises(KeyError) as e: adios4dolfinx.read_function(filename, u, time=0, name="nonexisting_name") assert e.value.args[0] == ( f"nonexisting_name not found in {filename}. Did you mean one of {variables}?" ) def test_read_timestamps(get_dtype, mesh_2D, tmp_path): mesh = mesh_2D dtype = get_dtype(mesh.geometry.x.dtype, False) el = basix.ufl.element( "Lagrange", mesh.ufl_cell().cellname(), 1, shape=(mesh.geometry.dim,), dtype=mesh.geometry.x.dtype, ) V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V, dtype=dtype, name="u") v = dolfinx.fem.Function(V, dtype=dtype, name="v") f_path = mesh.comm.bcast(tmp_path, root=0) filename = f_path / "read_time_stamps.bp" t_u = [0.1, 1.4] t_v = [0.45, 1.2] adios4dolfinx.write_mesh(filename, mesh) adios4dolfinx.write_function(filename, u, time=t_u[0]) adios4dolfinx.write_function(filename, v, time=t_v[0]) adios4dolfinx.write_function(filename, u, time=t_u[1]) adios4dolfinx.write_function(filename, v, time=t_v[1]) timestamps_u = adios4dolfinx.read_timestamps( comm=mesh.comm, filename=filename, function_name="u" ) timestamps_v = adios4dolfinx.read_timestamps( comm=mesh.comm, filename=filename, function_name="v" ) assert np.allclose(timestamps_u, t_u) assert np.allclose(timestamps_v, t_v) adios4dolfinx-0.9.3/tests/test_checkpointing_vector.py000066400000000000000000000156011477331607400232730ustar00rootroot00000000000000import itertools from mpi4py import MPI import basix import basix.ufl import dolfinx import numpy as np import pytest dtypes = [np.float64, np.float32] # Mesh geometry dtypes write_comm = [MPI.COMM_SELF, MPI.COMM_WORLD] # Communicators for creating mesh simplex_two_dim = itertools.product(dtypes, [dolfinx.mesh.CellType.triangle], write_comm) simplex_three_dim = itertools.product(dtypes, [dolfinx.mesh.CellType.tetrahedron], write_comm) non_simplex_two_dim = itertools.product(dtypes, [dolfinx.mesh.CellType.quadrilateral], write_comm) non_simplex_three_dim = itertools.product(dtypes, [dolfinx.mesh.CellType.hexahedron], write_comm) @pytest.fixture(params=simplex_two_dim, scope="module") def simplex_mesh_2D(request): dtype, cell_type, write_comm = request.param mesh = dolfinx.mesh.create_unit_square(write_comm, 10, 10, cell_type=cell_type, dtype=dtype) return mesh @pytest.fixture(params=simplex_three_dim, scope="module") def simplex_mesh_3D(request): dtype, cell_type, write_comm = request.param mesh = dolfinx.mesh.create_unit_cube(write_comm, 5, 5, 5, cell_type=cell_type, dtype=dtype) return mesh @pytest.fixture(params=non_simplex_two_dim, scope="module") def non_simplex_mesh_2D(request): dtype, cell_type, write_comm = request.param mesh = dolfinx.mesh.create_unit_square(write_comm, 10, 10, cell_type=cell_type, dtype=dtype) return mesh @pytest.fixture(params=non_simplex_three_dim, scope="module") def non_simplex_mesh_3D(request): dtype, cell_type, write_comm = request.param mesh = dolfinx.mesh.create_unit_cube(write_comm, 5, 5, 5, cell_type=cell_type, dtype=dtype) return mesh @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_read_write_2D( read_comm, family, degree, is_complex, simplex_mesh_2D, get_dtype, write_function, read_function ): mesh = simplex_mesh_2D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, dtype=mesh.geometry.x.dtype) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) + x[0] values[1] = x[1] if is_complex: values[0] += 2j * x[1] values[1] += 2j * x[0] return values fname = write_function(mesh, el, f, f_dtype) MPI.COMM_WORLD.Barrier() read_function(read_comm, el, f, fname, f_dtype) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_read_write_3D( read_comm, family, degree, is_complex, simplex_mesh_3D, get_dtype, write_function, read_function ): mesh = simplex_mesh_3D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, dtype=mesh.geometry.x.dtype) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) values[1] = x[1] + 2 * x[0] values[2] = np.cos(x[2]) if is_complex: values[0] += 2j * x[2] values[1] += 2j * np.cos(x[2]) return values fname = write_function(mesh, el, f, dtype=f_dtype) MPI.COMM_WORLD.Barrier() read_function(read_comm, el, f, fname, dtype=f_dtype) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_read_write_2D_quad( read_comm, family, degree, is_complex, non_simplex_mesh_2D, get_dtype, write_function, read_function, ): mesh = non_simplex_mesh_2D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, dtype=mesh.geometry.x.dtype) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) values[1] = x[1] + 2 * x[0] if is_complex: values[0] += 2j * x[2] values[1] += 2j * np.cos(x[2]) return values hash = write_function(mesh, el, f, f_dtype) MPI.COMM_WORLD.Barrier() read_function(read_comm, el, f, hash, f_dtype) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["NCF"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_read_write_hex( read_comm, family, degree, is_complex, non_simplex_mesh_3D, get_dtype, write_function, read_function, ): mesh = non_simplex_mesh_3D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, dtype=mesh.geometry.x.dtype) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) + x[0] values[1] = np.cos(x[2]) values[2] = x[0] if is_complex: values[0] += 2j * x[2] values[2] -= 1j * x[1] return values hash = write_function(mesh, el, f, dtype=f_dtype) MPI.COMM_WORLD.Barrier() read_function(read_comm, el, f, hash, dtype=f_dtype) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_read_write_multiple( read_comm, family, degree, is_complex, non_simplex_mesh_2D, get_dtype, write_function, read_function, ): mesh = non_simplex_mesh_2D f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, dtype=mesh.geometry.x.dtype) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) values[1] = x[1] + 2 * x[0] if is_complex: values[0] -= 2j * x[2] values[1] += 2j * np.cos(x[2]) return values def g(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) values[0] = 2 * x[1] values[1] = 3 * x[0] if is_complex: values[0] += 3j * x[0] values[1] += 2j * x[0] * x[1] return values hash_f = write_function(mesh, el, f, dtype=f_dtype, name="f", append=False) hash_g = write_function(mesh, el, g, dtype=f_dtype, name="g", append=True) assert hash_f == hash_g MPI.COMM_WORLD.Barrier() read_function(read_comm, el, f, hash_f, dtype=f_dtype, name="f") read_function(read_comm, el, g, hash_g, dtype=f_dtype, name="g") adios4dolfinx-0.9.3/tests/test_legacy_readers.py000066400000000000000000000127071477331607400220410ustar00rootroot00000000000000# Copyright (C) 2023 Jørgen Schartum Dokken # # This file is part of adios4dolfinx # # SPDX-License-Identifier: MIT import pathlib from mpi4py import MPI import dolfinx import numpy as np import pytest import ufl from dolfinx.fem.petsc import LinearProblem from adios4dolfinx import ( read_function, read_function_from_legacy_h5, read_mesh, read_mesh_from_legacy_h5, ) def test_legacy_mesh(): comm = MPI.COMM_WORLD path = (pathlib.Path("legacy") / "mesh.h5").absolute() if not path.exists(): pytest.skip(f"{path} does not exist") mesh = read_mesh_from_legacy_h5(filename=path, comm=comm, group="/mesh") assert mesh.topology.dim == 3 volume = mesh.comm.allreduce( dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * ufl.dx(domain=mesh))), op=MPI.SUM, ) surface = mesh.comm.allreduce( dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * ufl.ds(domain=mesh))), op=MPI.SUM, ) assert np.isclose(volume, 1) assert np.isclose(surface, 6) mesh.topology.create_entities(mesh.topology.dim - 1) num_facets = mesh.topology.index_map(mesh.topology.dim - 1).size_global assert num_facets == 18 def test_read_legacy_mesh_from_checkpoint(): comm = MPI.COMM_WORLD filename = (pathlib.Path("legacy") / "mesh_checkpoint.h5").absolute() if not filename.exists(): pytest.skip(f"{filename} does not exist") mesh = read_mesh_from_legacy_h5(filename=filename, comm=comm, group="/Mesh/mesh") assert mesh.topology.dim == 3 volume = mesh.comm.allreduce( dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * ufl.dx(domain=mesh))), op=MPI.SUM, ) surface = mesh.comm.allreduce( dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * ufl.ds(domain=mesh))), op=MPI.SUM, ) assert np.isclose(volume, 1) assert np.isclose(surface, 6) mesh.topology.create_entities(mesh.topology.dim - 1) num_facets = mesh.topology.index_map(mesh.topology.dim - 1).size_global assert num_facets == 18 def test_legacy_function(): comm = MPI.COMM_WORLD path = (pathlib.Path("legacy") / "mesh.h5").absolute() if not path.exists(): pytest.skip(f"{path} does not exist") mesh = read_mesh_from_legacy_h5(path, comm, "/mesh") V = dolfinx.fem.functionspace(mesh, ("DG", 2)) u = ufl.TrialFunction(V) v = ufl.TestFunction(V) a = ufl.inner(u, v) * ufl.dx x = ufl.SpatialCoordinate(mesh) f = ufl.conditional(ufl.gt(x[0], 0.5), x[1], 2 * x[0]) L = ufl.inner(f, v) * ufl.dx uh = dolfinx.fem.Function(V) problem = LinearProblem(a, L, [], uh, petsc_options={"ksp_type": "preonly", "pc_type": "lu"}) problem.solve() u_in = dolfinx.fem.Function(V) read_function_from_legacy_h5(path, mesh.comm, u_in, group="v") np.testing.assert_allclose(uh.x.array, u_in.x.array, atol=1e-14) W = dolfinx.fem.functionspace(mesh, ("DG", 2, (mesh.geometry.dim,))) wh = dolfinx.fem.Function(W) wh.interpolate(lambda x: (x[0], 3 * x[2], 7 * x[1])) w_in = dolfinx.fem.Function(W) read_function_from_legacy_h5(path, mesh.comm, w_in, group="w") np.testing.assert_allclose(wh.x.array, w_in.x.array, atol=1e-14) def test_read_legacy_function_from_checkpoint(): comm = MPI.COMM_WORLD path = (pathlib.Path("legacy") / "mesh_checkpoint.h5").absolute() if not path.exists(): pytest.skip(f"{path} does not exist") mesh = read_mesh_from_legacy_h5(path, comm, "/Mesh/mesh") V = dolfinx.fem.functionspace(mesh, ("DG", 2)) u = ufl.TrialFunction(V) v = ufl.TestFunction(V) a = ufl.inner(u, v) * ufl.dx x = ufl.SpatialCoordinate(mesh) f = ufl.conditional(ufl.gt(x[0], 0.5), x[1], 2 * x[0]) L = ufl.inner(f, v) * ufl.dx uh = dolfinx.fem.Function(V) problem = LinearProblem(a, L, [], uh, petsc_options={"ksp_type": "preonly", "pc_type": "lu"}) problem.solve() u_in = dolfinx.fem.Function(V) read_function_from_legacy_h5(path, mesh.comm, u_in, group="v", step=0) assert np.allclose(uh.x.array, u_in.x.array) # Check second step uh.interpolate(lambda x: x[0]) read_function_from_legacy_h5(path, mesh.comm, u_in, group="v", step=1) assert np.allclose(uh.x.array, u_in.x.array) W = dolfinx.fem.functionspace(mesh, ("DG", 2, (mesh.geometry.dim,))) wh = dolfinx.fem.Function(W) wh.interpolate(lambda x: (x[0], 3 * x[2], 7 * x[1])) w_in = dolfinx.fem.Function(W) read_function_from_legacy_h5(path, mesh.comm, w_in, group="w", step=0) np.testing.assert_allclose(wh.x.array, w_in.x.array, atol=1e-14) wh.interpolate(lambda x: np.vstack((x[0], 0 * x[0], x[1]))) read_function_from_legacy_h5(path, mesh.comm, w_in, group="w", step=1) np.testing.assert_allclose(wh.x.array, w_in.x.array, atol=1e-14) def test_adios4dolfinx_legacy(): comm = MPI.COMM_WORLD path = (pathlib.Path("legacy_checkpoint") / "adios4dolfinx_checkpoint.bp").absolute() if not path.exists(): pytest.skip(f"{path} does not exist") el = ("N1curl", 3) mesh = read_mesh(path, comm, "BP4", dolfinx.mesh.GhostMode.shared_facet, legacy=True) def f(x): values = np.zeros((2, x.shape[1]), dtype=np.float64) values[0] = x[0] values[1] = -x[1] return values V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V) read_function(path, u, engine="BP4", legacy=True) u_ex = dolfinx.fem.Function(V) u_ex.interpolate(f) np.testing.assert_allclose(u.x.array, u_ex.x.array, atol=1e-14) adios4dolfinx-0.9.3/tests/test_mesh_writer.py000066400000000000000000000156531477331607400214230ustar00rootroot00000000000000from mpi4py import MPI import dolfinx import numpy as np import pytest import ufl from adios4dolfinx import read_mesh, write_mesh from adios4dolfinx.adios2_helpers import adios2 @pytest.mark.parametrize("encoder, suffix", [("BP4", ".bp"), ("HDF5", ".h5"), ("BP5", ".bp")]) @pytest.mark.parametrize( "ghost_mode", [dolfinx.mesh.GhostMode.shared_facet, dolfinx.mesh.GhostMode.none] ) @pytest.mark.parametrize("store_partition", [True, False]) def test_mesh_read_writer(encoder, suffix, ghost_mode, tmp_path, store_partition): N = 7 # Consistent tmp dir across processes fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) file = fname / f"adios_mesh_{encoder}_{store_partition}" xdmf_file = fname / "xdmf_mesh_{encode}_{ghost_mode}_{store_partition}" mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, N, N, N, ghost_mode=ghost_mode) write_mesh(file.with_suffix(suffix), mesh, encoder, store_partition_info=store_partition) mesh.comm.Barrier() with dolfinx.io.XDMFFile(mesh.comm, xdmf_file.with_suffix(".xdmf"), "w") as xdmf: xdmf.write_mesh(mesh) mesh.comm.Barrier() mesh_adios = read_mesh( file.with_suffix(suffix), MPI.COMM_WORLD, engine=encoder, ghost_mode=ghost_mode, read_from_partition=store_partition, ) mesh_adios.comm.Barrier() if store_partition: def compute_distance_matrix(points_A, points_B, tol=1e-12): points_A_e = np.expand_dims(points_A, 1) points_B_e = np.expand_dims(points_B, 0) distances = np.sum(np.square(points_A_e - points_B_e), axis=2) return distances < tol cell_map = mesh.topology.index_map(mesh.topology.dim) new_cell_map = mesh_adios.topology.index_map(mesh_adios.topology.dim) assert cell_map.size_local == new_cell_map.size_local assert cell_map.num_ghosts == new_cell_map.num_ghosts mesh.topology.create_connectivity(mesh.topology.dim, mesh.topology.dim) midpoints = dolfinx.mesh.compute_midpoints( mesh, mesh.topology.dim, np.arange(cell_map.size_local + cell_map.num_ghosts, dtype=np.int32), ) mesh_adios.topology.create_connectivity(mesh_adios.topology.dim, mesh_adios.topology.dim) new_midpoints = dolfinx.mesh.compute_midpoints( mesh_adios, mesh_adios.topology.dim, np.arange(new_cell_map.size_local + new_cell_map.num_ghosts, dtype=np.int32), ) # Check that all points in owned by initial mesh is owned by the new mesh # (might be locally reordered) owned_distances = compute_distance_matrix( midpoints[: cell_map.size_local], new_midpoints[: new_cell_map.size_local] ) np.testing.assert_allclose(np.sum(owned_distances, axis=1), 1) # Check that all points that are ghosted in original mesh is ghosted on the # same process in the new mesh ghost_distances = compute_distance_matrix( midpoints[cell_map.size_local :], new_midpoints[new_cell_map.size_local :] ) np.testing.assert_allclose(np.sum(ghost_distances, axis=1), 1) mesh.comm.Barrier() with dolfinx.io.XDMFFile(mesh.comm, xdmf_file.with_suffix(".xdmf"), "r") as xdmf: mesh_xdmf = xdmf.read_mesh(ghost_mode=ghost_mode) for i in range(mesh.topology.dim + 1): mesh.topology.create_entities(i) mesh_xdmf.topology.create_entities(i) mesh_adios.topology.create_entities(i) assert ( mesh_xdmf.topology.index_map(i).size_global == mesh_adios.topology.index_map(i).size_global ) # Check that integration over different entities are consistent measures = ( [ufl.ds, ufl.dx] if ghost_mode is dolfinx.mesh.GhostMode.none else [ufl.ds, ufl.dS, ufl.dx] ) for measure in measures: c_adios = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh_adios))) c_ref = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh))) c_xdmf = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh_xdmf))) assert np.isclose( mesh_adios.comm.allreduce(c_adios, MPI.SUM), mesh.comm.allreduce(c_xdmf, MPI.SUM), ) assert np.isclose( mesh_adios.comm.allreduce(c_adios, MPI.SUM), mesh.comm.allreduce(c_ref, MPI.SUM), ) @pytest.mark.parametrize("encoder, suffix", [("BP4", ".bp"), ("BP5", ".bp")]) @pytest.mark.parametrize( "ghost_mode", [dolfinx.mesh.GhostMode.shared_facet, dolfinx.mesh.GhostMode.none] ) @pytest.mark.parametrize("store_partition", [True, False]) def test_timedep_mesh(encoder, suffix, ghost_mode, tmp_path, store_partition): # Currently unsupported, unclear why ("HDF5", ".h5"), N = 13 # Consistent tmp dir across processes fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) file = fname / f"adios_time_dep_mesh_{encoder}" mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, N, N, N, ghost_mode=ghost_mode) def u(x): return np.asarray([x[0] + 0.1 * np.sin(x[1]), 0.2 * np.cos(x[1]), x[2]]) write_mesh( file.with_suffix(suffix), mesh, encoder, mode=adios2.Mode.Write, time=0.0, store_partition_info=store_partition, ) delta_x = u(mesh.geometry.x.T).T mesh.geometry.x[:] += delta_x write_mesh(file.with_suffix(suffix), mesh, encoder, mode=adios2.Mode.Append, time=3.0) mesh.geometry.x[:] -= delta_x mesh_first = read_mesh( file.with_suffix(suffix), MPI.COMM_WORLD, encoder, ghost_mode, time=0.0, read_from_partition=store_partition, ) mesh_first.comm.Barrier() # Check that integration over different entities are consistent measures = [ufl.ds, ufl.dx] if ghost_mode == dolfinx.mesh.GhostMode.shared_facet: measures.append(ufl.dx) for measure in measures: c_adios = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh_first))) c_ref = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh))) assert np.isclose( mesh_first.comm.allreduce(c_adios, MPI.SUM), mesh.comm.allreduce(c_ref, MPI.SUM), ) mesh.geometry.x[:] += delta_x mesh_second = read_mesh( file.with_suffix(suffix), MPI.COMM_WORLD, encoder, ghost_mode, time=3.0, read_from_partition=store_partition, ) mesh_second.comm.Barrier() measures = [ufl.ds, ufl.dx] if ghost_mode == dolfinx.mesh.GhostMode.shared_facet: measures.append(ufl.dx) for measure in measures: c_adios = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh_second))) c_ref = dolfinx.fem.assemble_scalar(dolfinx.fem.form(1 * measure(domain=mesh))) assert np.isclose( mesh_second.comm.allreduce(c_adios, MPI.SUM), mesh.comm.allreduce(c_ref, MPI.SUM), ) adios4dolfinx-0.9.3/tests/test_meshtags.py000066400000000000000000000265601477331607400207050ustar00rootroot00000000000000from __future__ import annotations import itertools import typing from collections import ChainMap from mpi4py import MPI import dolfinx import numpy as np import numpy.typing as npt import pytest import adios4dolfinx root = 0 dtypes: list["str"] = ["float64", "float32"] # Mesh geometry dtypes write_comm: list[MPI.Intracomm] = [ MPI.COMM_SELF, MPI.COMM_WORLD, ] # Communicators for creating mesh read_modes: list[dolfinx.mesh.GhostMode] = [ dolfinx.mesh.GhostMode.none, dolfinx.mesh.GhostMode.shared_facet, ] # Cell types of different dimensions two_dimensional_cell_types: list[dolfinx.mesh.CellType] = [ dolfinx.mesh.CellType.triangle, dolfinx.mesh.CellType.quadrilateral, ] three_dimensional_cell_types: list[dolfinx.mesh.CellType] = [ dolfinx.mesh.CellType.tetrahedron, dolfinx.mesh.CellType.hexahedron, ] one_dim_combinations = itertools.product(dtypes, write_comm) two_dim_combinations = itertools.product(dtypes, two_dimensional_cell_types, write_comm) three_dim_combinations = itertools.product(dtypes, three_dimensional_cell_types, write_comm) @pytest.fixture(params=one_dim_combinations, scope="module") def mesh_1D(request): dtype, write_comm = request.param mesh = dolfinx.mesh.create_unit_interval(write_comm, 8, dtype=np.dtype(dtype)) return mesh @pytest.fixture(params=two_dim_combinations, scope="module") def mesh_2D(request): dtype, cell_type, write_comm = request.param mesh = dolfinx.mesh.create_unit_square( write_comm, 10, 7, cell_type=cell_type, dtype=np.dtype(dtype) ) return mesh @pytest.fixture(params=three_dim_combinations, scope="module") def mesh_3D(request): dtype, cell_type, write_comm = request.param mesh = dolfinx.mesh.create_unit_cube( write_comm, 5, 7, 3, cell_type=cell_type, dtype=np.dtype(dtype) ) return mesh def generate_reference_map( mesh: dolfinx.mesh.Mesh, meshtag: dolfinx.mesh.MeshTags, comm: MPI.Intracomm, root: int, ) -> typing.Optional[dict[str, tuple[int, npt.NDArray]]]: """ Helper function to generate map from meshtag value to its corresponding index and midpoint. Args: mesh: The mesh meshtag: The associated meshtag comm: MPI communicator to gather the map from all processes with root (int): Rank to store data on Returns: Root rank returns the map, all other ranks return None """ mesh.topology.create_connectivity(meshtag.dim, mesh.topology.dim) midpoints = dolfinx.mesh.compute_midpoints(mesh, meshtag.dim, meshtag.indices) e_map = mesh.topology.index_map(meshtag.dim) value_to_midpoint = {} for index, value in zip(meshtag.indices, meshtag.values): value_to_midpoint[value] = ( int(e_map.local_range[0] + index), midpoints[index], ) global_map = comm.gather(value_to_midpoint, root=root) if comm.rank == root: return dict(ChainMap(*global_map)) # type: ignore return None @pytest.mark.parametrize("read_mode", read_modes) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_checkpointing_meshtags_1D(mesh_1D, read_comm, read_mode, tmp_path): mesh = mesh_1D # Write unique mesh file for each combination of MPI communicator and dtype hash = f"{mesh.comm.size}_{mesh.geometry.x.dtype}" fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) filename = fname / f"meshtags_1D_{hash}.bp" # If mesh communicator is more than a self communicator or serial write on all processes. # If serial or self communicator, only write on root rank if mesh.comm.size != 1: adios4dolfinx.write_mesh(filename, mesh, engine="BP4") else: if MPI.COMM_WORLD.rank == root: adios4dolfinx.write_mesh(filename, mesh, engine="BP4") # Create meshtags labeling each entity (of each co-dimension) with a # unique number (their initial global index). org_maps = [] for dim in range(mesh.topology.dim + 1): mesh.topology.create_connectivity(dim, mesh.topology.dim) e_map = mesh.topology.index_map(dim) num_entities_local = e_map.size_local entities = np.arange(num_entities_local, dtype=np.int32) ft = dolfinx.mesh.meshtags(mesh, dim, entities, e_map.local_range[0] + entities) ft.name = f"entity_{dim}" # If parallel write on all processes, else write on root rank if mesh.comm.size != 1: adios4dolfinx.write_meshtags(filename, mesh, ft, engine="BP4") # Create map from mesh tag value to its corresponding index and midpoint org_map = generate_reference_map(mesh, ft, mesh.comm, root) org_maps.append(org_map) else: if MPI.COMM_WORLD.rank == root: adios4dolfinx.write_meshtags(filename, mesh, ft, engine="BP4") # Create map from mesh tag value to its corresponding index and midpoint org_map = generate_reference_map(mesh, ft, MPI.COMM_SELF, root) org_maps.append(org_map) del ft del mesh MPI.COMM_WORLD.Barrier() # Read mesh on testing communicator new_mesh = adios4dolfinx.read_mesh(filename, read_comm, engine="BP4", ghost_mode=read_mode) for dim in range(new_mesh.topology.dim + 1): # Read meshtags on all processes if testing communicator has multiple ranks # else read on root 0 if read_comm.size != 1: new_ft = adios4dolfinx.read_meshtags( filename, new_mesh, meshtag_name=f"entity_{dim}", engine="BP4" ) # Generate meshtags map from mesh tag value to its corresponding index and midpoint # and gather on root process read_map = generate_reference_map(new_mesh, new_ft, new_mesh.comm, root) else: if MPI.COMM_WORLD.rank == root: new_ft = adios4dolfinx.read_meshtags( filename, new_mesh, meshtag_name=f"entity_{dim}", engine="BP4" ) read_map = generate_reference_map(new_mesh, new_ft, read_comm, root) # On root process, check that midpoints are the same for each value in the meshtag if MPI.COMM_WORLD.rank == root: org_map = org_maps[dim] assert len(org_map) == len(read_map) for value, (_, midpoint) in org_map.items(): _, read_midpoint = read_map[value] np.testing.assert_allclose(read_midpoint, midpoint) @pytest.mark.parametrize("read_mode", read_modes) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_checkpointing_meshtags_2D(mesh_2D, read_comm, read_mode, tmp_path): mesh = mesh_2D hash = f"{mesh.comm.size}_{mesh.topology.cell_name()}_{mesh.geometry.x.dtype}" fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) filename = fname / f"meshtags_1D_{hash}.bp" if mesh.comm.size != 1: adios4dolfinx.write_mesh(filename, mesh, engine="BP4") else: if MPI.COMM_WORLD.rank == root: adios4dolfinx.write_mesh(filename, mesh, engine="BP4") org_maps = [] for dim in range(mesh.topology.dim + 1): mesh.topology.create_connectivity(dim, mesh.topology.dim) e_map = mesh.topology.index_map(dim) num_entities_local = e_map.size_local entities = np.arange(num_entities_local, dtype=np.int32) ft = dolfinx.mesh.meshtags(mesh, dim, entities, e_map.local_range[0] + entities) ft.name = f"entity_{dim}" if mesh.comm.size != 1: adios4dolfinx.write_meshtags(filename, mesh, ft, engine="BP4") org_map = generate_reference_map(mesh, ft, mesh.comm, root) org_maps.append(org_map) else: if MPI.COMM_WORLD.rank == root: adios4dolfinx.write_meshtags(filename, mesh, ft, engine="BP4") org_map = generate_reference_map(mesh, ft, MPI.COMM_SELF, root) org_maps.append(org_map) del ft del mesh MPI.COMM_WORLD.Barrier() new_mesh = adios4dolfinx.read_mesh(filename, read_comm, engine="BP4", ghost_mode=read_mode) for dim in range(new_mesh.topology.dim + 1): if read_comm.size != 1: new_ft = adios4dolfinx.read_meshtags( filename, new_mesh, meshtag_name=f"entity_{dim}", engine="BP4" ) read_map = generate_reference_map(new_mesh, new_ft, new_mesh.comm, root) else: if MPI.COMM_WORLD.rank == root: new_ft = adios4dolfinx.read_meshtags( filename, new_mesh, meshtag_name=f"entity_{dim}", engine="BP4" ) read_map = generate_reference_map(new_mesh, new_ft, read_comm, root) if MPI.COMM_WORLD.rank == root: org_map = org_maps[dim] assert len(org_map) == len(read_map) for value, (_, midpoint) in org_map.items(): _, read_midpoint = read_map[value] np.testing.assert_allclose(read_midpoint, midpoint) @pytest.mark.parametrize("read_mode", read_modes) @pytest.mark.parametrize("read_comm", [MPI.COMM_SELF, MPI.COMM_WORLD]) def test_checkpointing_meshtags_3D(mesh_3D, read_comm, read_mode, tmp_path): mesh = mesh_3D hash = f"{mesh.comm.size}_{mesh.topology.cell_name()}_{mesh.geometry.x.dtype}" fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) filename = fname / f"meshtags_1D_{hash}.bp" if mesh.comm.size != 1: adios4dolfinx.write_mesh(filename, mesh, engine="BP4") else: if MPI.COMM_WORLD.rank == root: adios4dolfinx.write_mesh(filename, mesh, engine="BP4") org_maps = [] for dim in range(mesh.topology.dim + 1): mesh.topology.create_connectivity(dim, mesh.topology.dim) e_map = mesh.topology.index_map(dim) num_entities_local = e_map.size_local entities = np.arange(num_entities_local, dtype=np.int32) ft = dolfinx.mesh.meshtags(mesh, dim, entities, e_map.local_range[0] + entities) ft.name = f"entity_{dim}" if mesh.comm.size != 1: adios4dolfinx.write_meshtags(filename, mesh, ft, engine="BP4") org_map = generate_reference_map(mesh, ft, mesh.comm, root) org_maps.append(org_map) else: if MPI.COMM_WORLD.rank == root: adios4dolfinx.write_meshtags(filename, mesh, ft, engine="BP4") org_map = generate_reference_map(mesh, ft, MPI.COMM_SELF, root) org_maps.append(org_map) del ft del mesh MPI.COMM_WORLD.Barrier() new_mesh = adios4dolfinx.read_mesh(filename, read_comm, engine="BP4", ghost_mode=read_mode) for dim in range(new_mesh.topology.dim + 1): if read_comm.size != 1: new_ft = adios4dolfinx.read_meshtags( filename, new_mesh, meshtag_name=f"entity_{dim}", engine="BP4" ) read_map = generate_reference_map(new_mesh, new_ft, new_mesh.comm, root) else: if MPI.COMM_WORLD.rank == root: new_ft = adios4dolfinx.read_meshtags( filename, new_mesh, meshtag_name=f"entity_{dim}", engine="BP4" ) read_map = generate_reference_map(new_mesh, new_ft, MPI.COMM_SELF, root) if MPI.COMM_WORLD.rank == root: org_map = org_maps[dim] assert len(org_map) == len(read_map) for value, (_, midpoint) in org_map.items(): _, read_midpoint = read_map[value] np.testing.assert_allclose(read_midpoint, midpoint) adios4dolfinx-0.9.3/tests/test_numpy_vectorization.py000066400000000000000000000132701477331607400232140ustar00rootroot00000000000000import itertools from typing import Tuple from mpi4py import MPI import basix.ufl import dolfinx import numpy as np import numpy.typing as npt import pytest from adios4dolfinx.utils import compute_dofmap_pos, unroll_dofmap write_comm = [MPI.COMM_SELF, MPI.COMM_WORLD] # Communicators for creating mesh ghost_mode = [dolfinx.mesh.GhostMode.none, dolfinx.mesh.GhostMode.shared_facet] two_dimensional_cell_types = [ dolfinx.mesh.CellType.triangle, dolfinx.mesh.CellType.quadrilateral, ] three_dimensional_cell_types = [dolfinx.mesh.CellType.hexahedron] two_dim_combinations = itertools.product(two_dimensional_cell_types, write_comm, ghost_mode) three_dim_combinations = itertools.product(three_dimensional_cell_types, write_comm, ghost_mode) @pytest.fixture(params=two_dim_combinations, scope="module") def mesh_2D(request): cell_type, write_comm, ghost_mode = request.param mesh = dolfinx.mesh.create_unit_square( write_comm, 10, 10, cell_type=cell_type, ghost_mode=ghost_mode ) return mesh @pytest.fixture(params=three_dim_combinations, scope="module") def mesh_3D(request): cell_type, write_comm, ghost_mode = request.param M = 5 mesh = dolfinx.mesh.create_unit_cube( write_comm, M, M, M, cell_type=cell_type, ghost_mode=ghost_mode ) return mesh def compute_positions( dofs: npt.NDArray[np.int32], dofmap_bs: int, num_owned_dofs: int, num_owned_cells: int, ) -> Tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]: """ Support function for test. Given a dofmap, compute the local cell and position in the dofmap for each owned dof. The last cell (wrt) local index will be the one in the output map """ dof_to_cell_map = np.zeros(num_owned_dofs, dtype=np.int32) dof_to_pos_map = np.zeros(num_owned_dofs, dtype=np.int32) for c in range(num_owned_cells): for i, dof in enumerate(dofs[c]): for b in range(dofmap_bs): local_dof = dof * dofmap_bs + b if local_dof < num_owned_dofs: dof_to_cell_map[local_dof] = c dof_to_pos_map[local_dof] = i * dofmap_bs + b return dof_to_cell_map, dof_to_pos_map @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) def test_unroll_P(family, degree, mesh_2D): V = dolfinx.fem.functionspace(mesh_2D, (family, degree)) dofmap = V.dofmap unrolled_map = unroll_dofmap(dofmap.list, dofmap.bs) normal_unroll = np.zeros( (dofmap.list.shape[0], dofmap.list.shape[1] * dofmap.bs), dtype=np.int32 ) for i, dofs in enumerate(dofmap.list): for j, dof in enumerate(dofs): for k in range(dofmap.bs): normal_unroll[i, j * dofmap.bs + k] = dof * dofmap.bs + k np.testing.assert_allclose(unrolled_map, normal_unroll) @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) def test_unroll_RTCF(family, degree, mesh_3D): el = basix.ufl.element(family, mesh_3D.ufl_cell().cellname(), degree) V = dolfinx.fem.functionspace(mesh_3D, el) dofmap = V.dofmap unrolled_map = unroll_dofmap(dofmap.list, dofmap.bs) normal_unroll = np.zeros( (dofmap.list.shape[0], dofmap.list.shape[1] * dofmap.bs), dtype=np.int32 ) for i, dofs in enumerate(dofmap.list): for j, dof in enumerate(dofs): for k in range(dofmap.bs): normal_unroll[i, j * dofmap.bs + k] = dof * dofmap.bs + k np.testing.assert_allclose(unrolled_map, normal_unroll) @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) def test_compute_dofmap_pos_RTCF(family, degree, mesh_3D): el = basix.ufl.element(family, mesh_3D.ufl_cell().cellname(), degree) V = dolfinx.fem.functionspace(mesh_3D, el) local_cells, local_pos = compute_dofmap_pos(V) num_cells_local = mesh_3D.topology.index_map(mesh_3D.topology.dim).size_local num_dofs_local = V.dofmap.index_map.size_local * V.dofmap.index_map_bs reference_cells, reference_pos = compute_positions( V.dofmap.list, V.dofmap.bs, num_dofs_local, num_cells_local ) np.testing.assert_allclose(reference_cells, local_cells) np.testing.assert_allclose(reference_pos, local_pos) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) def test_compute_dofmap_pos_P(family, degree, mesh_2D): el = basix.ufl.element(family, mesh_2D.ufl_cell().cellname(), degree) V = dolfinx.fem.functionspace(mesh_2D, el) local_cells, local_pos = compute_dofmap_pos(V) num_cells_local = mesh_2D.topology.index_map(mesh_2D.topology.dim).size_local num_dofs_local = V.dofmap.index_map.size_local * V.dofmap.index_map_bs reference_cells, reference_pos = compute_positions( V.dofmap.list, V.dofmap.bs, num_dofs_local, num_cells_local ) np.testing.assert_allclose(reference_cells, local_cells) np.testing.assert_allclose(reference_pos, local_pos) def test_compute_send_sizes(): np.random.seed(42) N = 0 M = 10 num_data = 100 # Set of ranks to recieve data dest_ranks = np.arange(N, M, dtype=np.int32) # Random data owners data_owners = np.random.randint(N, M, num_data).astype(np.int32) # Compute the number of data to send to each rank with loops out_size = np.zeros(len(dest_ranks), dtype=np.int32) for owner in data_owners: for j, rank in enumerate(dest_ranks): if owner == rank: out_size[j] += 1 break process_pos_indicator = data_owners.reshape(-1, 1) == dest_ranks vectorized_out_size = np.count_nonzero(process_pos_indicator, axis=0) np.testing.assert_allclose(vectorized_out_size, out_size) adios4dolfinx-0.9.3/tests/test_original_checkpoint.py000066400000000000000000000411001477331607400230700ustar00rootroot00000000000000from __future__ import annotations import itertools import os from collections.abc import Callable from pathlib import Path from mpi4py import MPI import basix import basix.ufl import dolfinx import numpy as np import pytest import adios4dolfinx dtypes = [np.float64, np.float32] # Mesh geometry dtypes two_dimensional_cell_types = [ dolfinx.mesh.CellType.triangle, dolfinx.mesh.CellType.quadrilateral, ] three_dimensional_cell_types = [ dolfinx.mesh.CellType.tetrahedron, dolfinx.mesh.CellType.hexahedron, ] two_dim_combinations = itertools.product(dtypes, two_dimensional_cell_types) three_dim_combinations = itertools.product(dtypes, three_dimensional_cell_types) @pytest.fixture(scope="module") def create_simplex_mesh_2D(tmp_path_factory): mesh = dolfinx.mesh.create_unit_square( MPI.COMM_WORLD, 10, 10, cell_type=dolfinx.mesh.CellType.triangle, dtype=np.float64, ) fname = tmp_path_factory.mktemp("output") / "original_mesh_2D_simplex.xdmf" fname = MPI.COMM_WORLD.bcast(fname, root=0) with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @pytest.fixture(scope="module") def create_simplex_mesh_3D(tmp_path_factory): mesh = dolfinx.mesh.create_unit_cube( MPI.COMM_WORLD, 5, 5, 5, cell_type=dolfinx.mesh.CellType.tetrahedron, dtype=np.float64, ) fname = tmp_path_factory.mktemp("output") / "original_mesh_3D_simplex.xdmf" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @pytest.fixture(scope="module") def create_non_simplex_mesh_2D(tmp_path_factory): mesh = dolfinx.mesh.create_unit_square( MPI.COMM_WORLD, 10, 10, cell_type=dolfinx.mesh.CellType.quadrilateral, dtype=np.float64, ) fname = tmp_path_factory.mktemp("output") / "original_mesh_2D_non_simplex.xdmf" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @pytest.fixture(scope="module") def create_non_simplex_mesh_3D(tmp_path_factory): mesh = dolfinx.mesh.create_unit_cube( MPI.COMM_WORLD, 5, 5, 5, cell_type=dolfinx.mesh.CellType.hexahedron, dtype=np.float64, ) fname = tmp_path_factory.mktemp("output") / "original_mesh_3D_non_simplex.xdmf" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @pytest.fixture(params=two_dim_combinations, scope="module") def create_2D_mesh(request, tmpdir_factory): dtype, cell_type = request.param mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 5, 7, cell_type=cell_type, dtype=dtype) fname = Path(tmpdir_factory.mktemp("output")) / f"original_mesh_2D_{dtype}_{cell_type}.xdmf" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname @pytest.fixture(params=three_dim_combinations, scope="module") def create_3D_mesh(request, tmpdir_factory): dtype, cell_type = request.param mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 5, 7, 3, cell_type=cell_type, dtype=dtype) fname = Path(tmpdir_factory.mktemp("output")) / f"original_mesh_3D_{dtype}_{cell_type}.xdmf" with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "w") as xdmf: xdmf.write_mesh(mesh) return fname def write_function_original( write_mesh: bool, mesh: dolfinx.mesh.Mesh, el: basix.ufl._ElementBase, f: Callable[[np.ndarray], np.ndarray], dtype: np.dtype, name: str, path: Path, ) -> Path: """Convenience function for writing function to file on the original input mesh""" V = dolfinx.fem.functionspace(mesh, el) uh = dolfinx.fem.Function(V, dtype=dtype) uh.interpolate(f) uh.name = name el_hash = ( adios4dolfinx.utils.element_signature(V) .replace(" ", "") .replace(",", "") .replace("(", "") .replace(")", "") .replace("[", "") .replace("]", "") ) file_hash = f"{el_hash}_{np.dtype(dtype).name}" filename = path / f"mesh_{file_hash}.bp" if write_mesh: adios4dolfinx.write_mesh_input_order(filename, mesh) adios4dolfinx.write_function_on_input_mesh(filename, uh, time=0.0) return filename def read_function_original( mesh_fname: Path, u_fname: Path, u_name: str, family: str, degree: int, f: Callable[[np.ndarray], np.ndarray], u_dtype: np.dtype, ): """ Convenience function for reading mesh with IPython-parallel and compare to exact solution """ from mpi4py import MPI import dolfinx import adios4dolfinx assert MPI.COMM_WORLD.size > 1 if mesh_fname.suffix == ".xdmf": with dolfinx.io.XDMFFile(MPI.COMM_WORLD, mesh_fname, "r") as xdmf: mesh = xdmf.read_mesh() elif mesh_fname.suffix == ".bp": mesh = adios4dolfinx.read_mesh( mesh_fname, MPI.COMM_WORLD, "BP4", dolfinx.mesh.GhostMode.shared_facet ) el = basix.ufl.element( family, mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, shape=(mesh.geometry.dim,), dtype=mesh.geometry.x.dtype, ) V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V, name=u_name, dtype=u_dtype) adios4dolfinx.read_function(u_fname, u, time=0.0) MPI.COMM_WORLD.Barrier() u_ex = dolfinx.fem.Function(V, name="exact", dtype=u_dtype) u_ex.interpolate(f) u_ex.x.scatter_forward() atol = 10 * np.finfo(u_dtype).resolution np.testing.assert_allclose(u.x.array, u_ex.x.array, atol=atol) # type: ignore def write_function_vector( write_mesh: bool, fname: Path, family: str, degree: int, f: Callable[[np.ndarray], np.ndarray], dtype: np.dtype, name: str, dir: Path, ) -> Path: """Convenience function for writing function to file on the original input mesh""" from mpi4py import MPI import basix.ufl import dolfinx import adios4dolfinx assert MPI.COMM_WORLD.size > 1 with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "r") as xdmf: mesh = xdmf.read_mesh() el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, dtype=mesh.geometry.x.dtype) V = dolfinx.fem.functionspace(mesh, el) uh = dolfinx.fem.Function(V, dtype=dtype) uh.interpolate(f) uh.name = name el_hash = ( adios4dolfinx.utils.element_signature(V) .replace(" ", "") .replace(",", "") .replace("(", "") .replace(")", "") .replace("[", "") .replace("]", "") ) file_hash = f"{el_hash}_{np.dtype(dtype).name}" filename = dir / f"mesh_{file_hash}.bp" if write_mesh: adios4dolfinx.write_mesh_input_order(filename, mesh) adios4dolfinx.write_function_on_input_mesh(filename, uh, time=0.0) return filename def read_function_vector( mesh_fname: Path, u_fname: Path, u_name: str, family: str, degree: int, f: Callable[[np.ndarray], np.ndarray], u_dtype: np.dtype, ): """ Convenience function for reading mesh with IPython-parallel and compare to exact solution """ if mesh_fname.suffix == ".xdmf": with dolfinx.io.XDMFFile(MPI.COMM_WORLD, mesh_fname, "r") as xdmf: mesh = xdmf.read_mesh() elif mesh_fname.suffix == ".bp": mesh = adios4dolfinx.read_mesh( mesh_fname, MPI.COMM_WORLD, "BP4", dolfinx.mesh.GhostMode.shared_facet ) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V, name=u_name, dtype=u_dtype) adios4dolfinx.read_function(u_fname, u, time=0.0) MPI.COMM_WORLD.Barrier() u_ex = dolfinx.fem.Function(V, name="exact", dtype=u_dtype) u_ex.interpolate(f) u_ex.x.scatter_forward() atol = 10 * np.finfo(u_dtype).resolution np.testing.assert_allclose(u.x.array, u_ex.x.array, atol=atol) # type: ignore @pytest.mark.skipif( os.cpu_count() == 1, reason="Test requires that the system has more than one process" ) @pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("write_mesh", [True, False]) def test_read_write_P_2D( write_mesh, family, degree, is_complex, create_2D_mesh, cluster, get_dtype, tmp_path ): fname = create_2D_mesh with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "r") as xdmf: mesh = xdmf.read_mesh() f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element( family, mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, shape=(mesh.geometry.dim,), dtype=mesh.geometry.x.dtype, ) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) + x[0] values[1] = x[0] if is_complex: values[0] -= 3j * x[1] values[1] += 2j * x[0] return values hash = write_function_original(write_mesh, mesh, el, f, f_dtype, "u_original", tmp_path) if write_mesh: mesh_fname = hash else: mesh_fname = fname query = cluster[:].apply_async( read_function_original, mesh_fname, hash, "u_original", family, degree, f, f_dtype ) query.wait() assert query.successful(), query.error @pytest.mark.skipif( os.cpu_count() == 1, reason="Test requires that the system has more than one process" ) @pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) @pytest.mark.parametrize("write_mesh", [True, False]) def test_read_write_P_3D( write_mesh, family, degree, is_complex, create_3D_mesh, cluster, get_dtype, tmp_path ): fname = create_3D_mesh with dolfinx.io.XDMFFile(MPI.COMM_WORLD, fname, "r") as xdmf: mesh = xdmf.read_mesh() f_dtype = get_dtype(mesh.geometry.x.dtype, is_complex) el = basix.ufl.element( family, mesh.ufl_cell().cellname(), degree, basix.LagrangeVariant.gll_warped, shape=(mesh.geometry.dim,), ) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) values[0] = np.pi + x[0] values[1] = x[1] + 2 * x[0] values[2] = np.cos(x[2]) if is_complex: values[0] -= np.pi * x[1] values[1] += 3j * x[2] values[2] += 2j return values hash = write_function_original(write_mesh, mesh, el, f, f_dtype, "u_original", tmp_path) MPI.COMM_WORLD.Barrier() if write_mesh: mesh_fname = hash else: mesh_fname = fname query = cluster[:].apply_async( read_function_original, mesh_fname, hash, "u_original", family, degree, f, f_dtype ) query.wait() assert query.successful(), query.error @pytest.mark.skipif( os.cpu_count() == 1, reason="Test requires that the system has more than one process" ) @pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") @pytest.mark.parametrize("write_mesh", [True, False]) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_2D_vector_simplex( write_mesh, family, degree, is_complex, create_simplex_mesh_2D, cluster, get_dtype, tmp_path ): fname = create_simplex_mesh_2D f_dtype = get_dtype(np.float64, is_complex) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) + x[0] values[1] = x[1] if is_complex: values[0] -= np.sin(x[1]) * 2j values[1] += 3j return values query = cluster[:].apply_async( write_function_vector, write_mesh, fname, family, degree, f, f_dtype, "u_original", tmp_path ) query.wait() assert query.successful(), query.error paths = query.result() file_path = paths[0] assert all([file_path == path for path in paths]) if write_mesh: mesh_fname = file_path else: mesh_fname = fname read_function_vector(mesh_fname, file_path, "u_original", family, degree, f, f_dtype) @pytest.mark.skipif( os.cpu_count() == 1, reason="Test requires that the system has more than one process" ) @pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") @pytest.mark.parametrize("write_mesh", [True, False]) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["N1curl", "RT"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_3D_vector_simplex( write_mesh, family, degree, is_complex, create_simplex_mesh_3D, cluster, get_dtype, tmp_path ): fname = create_simplex_mesh_3D f_dtype = get_dtype(np.float64, is_complex) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) values[1] = x[1] + 2 * x[0] values[2] = np.cos(x[2]) if is_complex: values[0] += 2j * x[2] values[1] += 2j * np.cos(x[2]) return values query = cluster[:].apply_async( write_function_vector, write_mesh, fname, family, degree, f, f_dtype, "u_original", tmp_path ) query.wait() assert query.successful(), query.error paths = query.result() file_path = paths[0] assert all([file_path == path for path in paths]) if write_mesh: mesh_fname = file_path else: mesh_fname = fname read_function_vector(mesh_fname, file_path, "u_original", family, degree, f, f_dtype) @pytest.mark.skipif( os.cpu_count() == 1, reason="Test requires that the system has more than one process" ) @pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") @pytest.mark.parametrize("write_mesh", [True, False]) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["RTCF"]) @pytest.mark.parametrize("degree", [1, 2, 3]) def test_read_write_2D_vector_non_simplex( write_mesh, family, degree, is_complex, create_non_simplex_mesh_2D, cluster, get_dtype, tmp_path ): fname = create_non_simplex_mesh_2D f_dtype = get_dtype(np.float64, is_complex) def f(x): values = np.empty((2, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) values[1] = x[1] + 2 * x[0] if is_complex: values[0] += 2j * x[1] values[1] -= np.sin(x[0]) * 9j return values query = cluster[:].apply_async( write_function_vector, write_mesh, fname, family, degree, f, f_dtype, "u_original", tmp_path ) query.wait() assert query.successful(), query.error paths = query.result() file_path = paths[0] assert all([file_path == path for path in paths]) if write_mesh: mesh_fname = file_path else: mesh_fname = fname read_function_vector(mesh_fname, file_path, "u_original", family, degree, f, f_dtype) @pytest.mark.skipif( os.cpu_count() == 1, reason="Test requires that the system has more than one process" ) @pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="Test uses ipythonparallel for MPI") @pytest.mark.parametrize("write_mesh", [True, False]) @pytest.mark.parametrize("is_complex", [True, False]) @pytest.mark.parametrize("family", ["NCF"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_3D_vector_non_simplex( write_mesh, family, degree, is_complex, create_non_simplex_mesh_3D, cluster, get_dtype, tmp_path ): fname = create_non_simplex_mesh_3D f_dtype = get_dtype(np.float64, is_complex) def f(x): values = np.empty((3, x.shape[1]), dtype=f_dtype) values[0] = np.full(x.shape[1], np.pi) + x[0] values[1] = np.cos(x[2]) values[2] = x[0] if is_complex: values[2] += x[0] * x[1] * 3j return values query = cluster[:].apply_async( write_function_vector, write_mesh, fname, family, degree, f, f_dtype, "u_original", tmp_path ) query.wait() assert query.successful(), query.error paths = query.result() file_path = paths[0] assert all([file_path == path for path in paths]) if write_mesh: mesh_fname = file_path else: mesh_fname = fname read_function_vector(mesh_fname, file_path, "u_original", family, degree, f, f_dtype) adios4dolfinx-0.9.3/tests/test_snapshot_checkpoint.py000066400000000000000000000075351477331607400231410ustar00rootroot00000000000000from pathlib import Path from mpi4py import MPI import adios2 import basix.ufl import dolfinx import numpy as np import pytest from adios4dolfinx import snapshot_checkpoint from adios4dolfinx.adios2_helpers import resolve_adios_scope adios2 = resolve_adios_scope(adios2) triangle = dolfinx.mesh.CellType.triangle quad = dolfinx.mesh.CellType.quadrilateral tetra = dolfinx.mesh.CellType.tetrahedron hex = dolfinx.mesh.CellType.hexahedron @pytest.mark.parametrize( "cell_type, family", [(triangle, "N1curl"), (triangle, "RT"), (quad, "RTCF")] ) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_2D(family, degree, cell_type, tmp_path): mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 10, 10, cell_type=cell_type) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) def f(x): return (np.full(x.shape[1], np.pi) + x[0], x[1]) V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V) u.interpolate(f) fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) file = fname / Path("snapshot_2D_vs.bp") snapshot_checkpoint(u, file, adios2.Mode.Write) v = dolfinx.fem.Function(V) snapshot_checkpoint(v, file, adios2.Mode.Read) assert np.allclose(u.x.array, v.x.array) @pytest.mark.parametrize("cell_type, family", [(tetra, "N1curl"), (tetra, "RT"), (hex, "NCF")]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_3D(family, degree, cell_type, tmp_path): mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 3, 3, 3, cell_type=cell_type) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree) def f(x): return (np.full(x.shape[1], np.pi) + x[0], x[1], x[1] * x[2]) V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V) u.interpolate(f) fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) file = fname / Path("snapshot_3D_vs.bp") snapshot_checkpoint(u, file, adios2.Mode.Write) v = dolfinx.fem.Function(V) snapshot_checkpoint(v, file, adios2.Mode.Read) assert np.allclose(u.x.array, v.x.array) @pytest.mark.parametrize( "cell_type", [dolfinx.mesh.CellType.triangle, dolfinx.mesh.CellType.quadrilateral] ) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_P_2D(family, degree, cell_type, tmp_path): mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 5, 5, cell_type=cell_type) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, shape=(mesh.geometry.dim,)) def f(x): return (np.full(x.shape[1], np.pi) + x[0], x[1]) V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V) u.interpolate(f) fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) file = fname / Path("snapshot_2D_p.bp") snapshot_checkpoint(u, file, adios2.Mode.Write) v = dolfinx.fem.Function(V) snapshot_checkpoint(v, file, adios2.Mode.Read) assert np.allclose(u.x.array, v.x.array) @pytest.mark.parametrize( "cell_type", [dolfinx.mesh.CellType.tetrahedron, dolfinx.mesh.CellType.hexahedron] ) @pytest.mark.parametrize("family", ["Lagrange", "DG"]) @pytest.mark.parametrize("degree", [1, 4]) def test_read_write_P_3D(family, degree, cell_type, tmp_path): mesh = dolfinx.mesh.create_unit_cube(MPI.COMM_WORLD, 5, 5, 5, cell_type=cell_type) el = basix.ufl.element(family, mesh.ufl_cell().cellname(), degree, shape=(mesh.geometry.dim,)) def f(x): return (np.full(x.shape[1], np.pi) + x[0], x[1] + 2 * x[0], np.cos(x[2])) V = dolfinx.fem.functionspace(mesh, el) u = dolfinx.fem.Function(V) u.interpolate(f) fname = MPI.COMM_WORLD.bcast(tmp_path, root=0) file = fname / Path("snapshot_3D_p.bp") snapshot_checkpoint(u, file, adios2.Mode.Write) v = dolfinx.fem.Function(V) snapshot_checkpoint(v, file, adios2.Mode.Read) assert np.allclose(u.x.array, v.x.array) adios4dolfinx-0.9.3/tests/test_version.py000066400000000000000000000001341477331607400205440ustar00rootroot00000000000000import adios4dolfinx def test_version(): assert adios4dolfinx.__version__ is not None