pax_global_header00006660000000000000000000000064147614127030014517gustar00rootroot0000000000000052 comment=f799a750f0f2b2eda6eeb776d1d3de1d0dd83b18 dolfinx_mpc-0.9.1/000077500000000000000000000000001476141270300140305ustar00rootroot00000000000000dolfinx_mpc-0.9.1/.clang-format000066400000000000000000000053651476141270300164140ustar00rootroot00000000000000--- Language: Cpp # BasedOnStyle: LLVM AccessModifierOffset: -2 AlignAfterOpenBracket: Align AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlinesLeft: false AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: true AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: All AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: true BinPackArguments: true BinPackParameters: true BraceWrapping: AfterClass: false AfterControlStatement: false AfterEnum: false AfterFunction: false AfterNamespace: false AfterObjCDeclaration: false AfterStruct: false AfterUnion: false BeforeCatch: false BeforeElse: false IndentBraces: false BreakBeforeBinaryOperators: All BreakBeforeBraces: Allman BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: false BreakAfterJavaFieldAnnotations: false BreakStringLiterals: true ColumnLimit: 80 CommentPragmas: '^ IWYU pragma:' ConstructorInitializerAllOnOneLineOrOnePerLine: false ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false DisableFormat: false ExperimentalAutoDetectBinPacking: false ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] IncludeCategories: - Regex: '^"(llvm|llvm-c|clang|clang-c)/' Priority: 2 - Regex: '^(<|"(gtest|isl|json)/)' Priority: 3 - Regex: '.*' Priority: 1 IncludeIsMainRegex: '$' IndentCaseLabels: false IndentWidth: 2 IndentWrappedFunctionNames: false JavaScriptQuotes: Leave JavaScriptWrapImports: true KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None ObjCBlockIndentWidth: 2 ObjCSpaceAfterProperty: false ObjCSpaceBeforeProtocolList: true PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Left ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceAfterTemplateKeyword: true SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Cpp11 TabWidth: 8 UseTab: Never ... dolfinx_mpc-0.9.1/.coveragerc000066400000000000000000000003021476141270300161440ustar00rootroot00000000000000# Omit numba code for now due to # https://github.com/numba/numba/issues/4268 [run] omit = */numba/* parallel = true source = dolfinx_mpc [html] directory= htmlcov [xml] output = coverage.xml dolfinx_mpc-0.9.1/.dockerignore000066400000000000000000000000751476141270300165060ustar00rootroot00000000000000# Don't include the .git in the image. It's big! .git .githubdolfinx_mpc-0.9.1/.github/000077500000000000000000000000001476141270300153705ustar00rootroot00000000000000dolfinx_mpc-0.9.1/.github/dependabot.yml000066400000000000000000000010301476141270300202120ustar00rootroot00000000000000# To get started with Dependabot version updates, you'll need to specify which # package ecosystems to update and where the package manifests are located. # Please see the documentation for all configuration options: # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file version: 2 updates: - package-ecosystem: "github-actions" # See documentation for possible values directory: "/" # Location of package manifests schedule: interval: "weekly" dolfinx_mpc-0.9.1/.github/workflows/000077500000000000000000000000001476141270300174255ustar00rootroot00000000000000dolfinx_mpc-0.9.1/.github/workflows/build_docs.yml000066400000000000000000000023611476141270300222610ustar00rootroot00000000000000name: Build docs on: workflow_dispatch: workflow_call: pull_request: branches: - main jobs: build: runs-on: ubuntu-22.04 container: ghcr.io/fenics/dolfinx/dev-env:current-mpich env: # Directory that will be published on github pages PUBLISH_DIR: ./_build/html PETSC_ARCH: "linux-gnu-real64-32" steps: - uses: actions/checkout@v4 - name: Install DOLFINx uses: jorgensd/actions/install-dolfinx@v0.3 with: dolfinx: v0.9.0 ufl: 2024.2.0 ffcx: v0.9.0 basix: v0.9.0 petsc_arch: ${PETSC_ARCH} - name: Install DOLFINx-MPC (C++) run: | cmake -G Ninja -B build-dir -DCMAKE_BUILD_TYPE=Release -S cpp/ cmake --build build-dir --parallel 3 cmake --install build-dir - name: Install DOLFINx-MPC (Python) run: python3 -m pip -v install --config-settings=cmake.build-type="Release" --no-build-isolation ./python/[docs] - name: Build docs run: jupyter book build . - name: Upload documentation as artifact uses: actions/upload-artifact@v4 with: name: documentation path: ${{ env.PUBLISH_DIR }} if-no-files-found: error dolfinx_mpc-0.9.1/.github/workflows/deploy-pages.yml000066400000000000000000000025401476141270300225420ustar00rootroot00000000000000name: Github Pages on: push: branches: [main] # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages permissions: contents: read pages: write id-token: write # Allow one concurrent deployment concurrency: group: "pages" cancel-in-progress: true jobs: run-coverage: uses: ./.github/workflows/test_mpc.yml build-docs: uses: ./.github/workflows/build_docs.yml deploy: needs: [run-coverage, build-docs] environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest steps: - name: Download docs artifact # docs artifact is uploaded by build-docs job uses: actions/download-artifact@v4 with: name: documentation path: "./public" - name: Download docs artifact # docs artifact is uploaded by build-docs job uses: actions/download-artifact@v4 with: name: code-coverage-report path: "./public/code-coverage-report" - name: Upload artifact uses: actions/upload-pages-artifact@v3 with: path: "./public" - name: Checkout uses: actions/checkout@v4 - name: Setup Pages uses: actions/configure-pages@v5 - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v4 dolfinx_mpc-0.9.1/.github/workflows/docker.yml000066400000000000000000000040401476141270300214150ustar00rootroot00000000000000on: push: tags: - "v*" pull_request: branches: - release - main workflow_dispatch: env: REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} jobs: build-and-push-image: runs-on: ubuntu-latest permissions: contents: read packages: write steps: - name: Checkout repository uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Log in to the Container registry uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@v5 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - name: Build and push Docker image uses: docker/build-push-action@v6 with: context: . load: true push: false file: docker/Dockerfile platforms: linux/amd64 tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - name: Build and push Docker image uses: docker/build-push-action@v6 if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') with: context: . load: true push: false file: docker/Dockerfile platforms: linux/amd64 tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - name: Build and push Docker image uses: docker/build-push-action@v5 if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') with: context: . push: true file: docker/Dockerfile platforms: linux/amd64,linux/arm64 tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} dolfinx_mpc-0.9.1/.github/workflows/sonarcloud.yml000066400000000000000000000070471476141270300223310ustar00rootroot00000000000000name: SonarCloud on: push: branches: - main pull_request: branches: - main jobs: build: name: Build runs-on: ubuntu-22.04 # avoid running on pull requests from forks # which don't have access to secrets if: (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository) container: ghcr.io/fenics/dolfinx/dev-env:current-mpich env: SONAR_SCANNER_VERSION: 6.1.0.4477 # Find the latest version in at: https://github.com/SonarSource/sonar-scanner-cli/tags SONAR_SERVER_URL: "https://sonarcloud.io" BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory # Directory where build-wrapper output will be placed PETSC_ARCH: linux-gnu-real64-32 PETSC_DIR: /usr/local/petsc steps: - uses: actions/checkout@v4 with: fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis - name: Install zip run: | apt-get -y update apt-get install unzip - name: Update pip run: | python3 -m pip install --upgrade pip setuptools - name: Set up JDK 17 uses: actions/setup-java@v4 with: distribution: "zulu" java-version: 17 - name: Cache SonarCloud packages uses: actions/cache@v4 with: path: ~/.sonar/cache key: ${{ runner.os }}-sonar restore-keys: ${{ runner.os }}-sonar - name: Download and set up sonar-scanner env: SONAR_SCANNER_DOWNLOAD_URL: https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${{ env.SONAR_SCANNER_VERSION }}-linux-x64.zip run: | mkdir -p $HOME/.sonar wget -O $HOME/.sonar/sonar-scanner.zip ${{ env.SONAR_SCANNER_DOWNLOAD_URL }} unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ echo "$HOME/.sonar/sonar-scanner-${{ env.SONAR_SCANNER_VERSION }}-linux-x64/bin" >> $GITHUB_PATH - name: Download and set up build-wrapper env: BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip run: | wget -O $HOME/.sonar/build-wrapper-linux-x86.zip ${{ env.BUILD_WRAPPER_DOWNLOAD_URL }} unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ echo "$HOME/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH - name: Install DOLFINx uses: jorgensd/actions/install-dolfinx@v0.3 with: petsc_arch: ${PETSC_ARCH} dolfinx: main basix: main ufl: main ffcx: main - name: Run build-wrapper run: | cmake -S ./cpp -B build-mpc build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build-mpc/ --config Release - name: Install DOLFINx-MPC (C++) run: | cmake -G Ninja -B build-dir -DCMAKE_BUILD_TYPE=${MPC_BUILD_MODE} -DCMAKE_CXX_FLAGS="${MPC_CMAKE_CXX_FLAGS}" -S cpp/ cmake --build build-dir --parallel 3 cmake --install build-dir - name: Install DOLFINx-MPC (Python) run: python3 -m pip -v install --config-settings=cmake.build-type="Release" --no-build-isolation ./python - name: Run sonar-scanner env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} run: | sonar-scanner --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" dolfinx_mpc-0.9.1/.github/workflows/test_mpc.yml000066400000000000000000000224201476141270300217660ustar00rootroot00000000000000name: Test MPC on: # Trigger tests on push workflow_dispatch: workflow_call: pull_request: branches: - main schedule: # '*' is a special character in YAML, so string must be quoted - cron: "0 8 * * *" jobs: build: runs-on: ubuntu-latest container: ghcr.io/fenics/dolfinx/dolfinx:stable strategy: matrix: build_mode: [Release, Debug] petsc_arch: [ linux-gnu-complex128-32, linux-gnu-complex64-32, linux-gnu-real64-32, linux-gnu-real32-32, ] # Due to: https://gitlab.com/petsc/petsc/-/issues/1288 CXX: [c++] #, clang++] CC: [cc] #, clang] # exclude: # - CC: cc # CXX: clang++ # - CC: clang # CXX: c++ env: DOLFINX_BRANCH: v0.9.0 BASIX_BRANCH: v0.9.0 UFL_BRANCH: 2024.2.0 FFCX_BRANCH: v0.9.0 CC: ${{ matrix.CC }} CXX: ${{ matrix.CXX }} PETSC_ARCH: "${{ matrix.petsc_arch }}" OMPI_ALLOW_RUN_AS_ROOT: 1 OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 OMPI_MCA_rmaps_base_oversubscribe: 1 OMPI_MCA_plm: isolated OMPI_MCA_btl_vader_single_copy_mechanism: none OMPI_MCA_mpi_yield_when_idle: 1 OMPI_MCA_hwloc_base_binding_policy: none HDF5_MPI: "ON" HDF5_DIR: "/usr/local/" MPC_BUILD_MODE: ${{ matrix.build_mode }} MPC_CMAKE_CXX_FLAGS: "-Wall -Werror -g -pedantic -Ofast -march=native" PYTHONPATH: "/usr/local/dolfinx-${PETSC_TYPE}/lib/python3.12/dist-packages:/usr/local/lib" LD_LIBRARY_PATH: "/usr/local/petsc/${PETSC_ARCH}/lib/:/usr/local" DEB_PYTHON_INSTALL_LAYOUT: deb_system steps: - uses: actions/checkout@v4 - name: Install clang if: ${{ matrix.CC }} == "clang" run: | apt-get update apt-get install -y clang - name: upgrade pip run: python3 -m pip install --upgrade setuptools pip - name: Check formatting run: | python3 -m pip install ruff ruff check ruff format - name: Check typing run: | python3 -m pip install mypy cd python python3 -m mypy . --exclude=build - name: Install h5py run: | python3 -m pip install --no-build-isolation --no-cache-dir --no-binary=h5py h5py - name: Install DOLFINx uses: jorgensd/actions/install-dolfinx@v0.3 with: dolfinx: ${{ env.DOLFINX_BRANCH }} ufl: ${{ env.UFL_BRANCH }} basix: ${{ env.BASIX_BRANCH }} ffcx: ${{ env.FFCX_BRANCH }} petsc_arch: ${{ env.PETSC_ARCH }} - name: Install DOLFINx-MPC (C++) run: | cmake -G Ninja -B build-dir -DCMAKE_BUILD_TYPE=${MPC_BUILD_MODE} -DCMAKE_CXX_FLAGS="${MPC_CMAKE_CXX_FLAGS}" -S cpp/ cmake --build build-dir --parallel 3 cmake --install build-dir - name: Install DOLFINx-MPC (Python) run: python3 -m pip -v install --config-settings=cmake.build-type=${MPC_BUILD_MODE} --no-build-isolation -e python/[test] - name: Run tests (serial) run: coverage run --rcfile=.coveragerc -m mpi4py -m pytest python/tests/ -vs - name: Run tests (2 processes) run: mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py -m pytest python/tests/ -vs - name: Run tests (3 processes) run: mpirun -n 3 coverage run --rcfile=.coveragerc -m mpi4py -m pytest python/tests/ - name: Run tests (4 processes) run: mpirun -n 4 coverage run --rcfile=.coveragerc -m mpi4py -m pytest python/tests/ - name: Run benchmarks run: | mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/benchmarks/bench_periodic.py --nref=2 --tet --gamg --timings mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/benchmarks/bench_elasticity_edge.py --nref=2 --gamg --timings mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/benchmarks/bench_contact_3D.py - name: Run demos (serial) run: | coverage run --rcfile=.coveragerc python/demos/demo_elasticity.py coverage run --rcfile=.coveragerc python/demos/demo_periodic_geometrical.py coverage run --rcfile=.coveragerc python/demos/demo_stokes.py coverage run --rcfile=.coveragerc python/demos/demo_periodic3d_topological.py coverage run --rcfile=.coveragerc python/demos/demo_elasticity_disconnect_2D.py coverage run --rcfile=.coveragerc python/demos/demo_elasticity_disconnect.py coverage run --rcfile=.coveragerc python/demos/demo_periodic_gep.py - name: Run demos (parallel) run: | mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_elasticity.py mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_periodic_geometrical.py mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_stokes.py mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_periodic3d_topological.py mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_elasticity_disconnect_2D.py mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_elasticity_disconnect.py mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_periodic_gep.py - name: Run contact demos 2D (serial) run: | coverage run --rcfile=.coveragerc python/demos/demo_contact_2D.py --theta 0 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_2D.py --theta 1.05 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_2D.py --gmsh --theta 0 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_2D.py --gmsh --theta 1.05 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_2D.py --quad --gmsh --theta 0 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_2D.py --quad --gmsh --theta 1.05 --timing - name: Run contact demos 2D (parallel) run: | mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_2D.py --theta 0 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_2D.py --theta 1.05 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_2D.py --gmsh --theta 0 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_2D.py --gmsh --theta 1.05 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_2D.py --quad --gmsh --theta 0 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_2D.py --quad --gmsh --theta 1.05 --timing - name: Run contact demos 3D (serial) run: | coverage run --rcfile=.coveragerc python/demos/demo_contact_3D.py --theta 0 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_3D.py --theta 1.05 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_3D.py --gmsh --theta 0 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_3D.py --gmsh --theta 1.05 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_3D.py --gmsh --no-slip --theta 0 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_3D.py --gmsh --no-slip --theta 1.05 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_3D.py --gmsh --no-slip --hex --theta 0 --timing coverage run --rcfile=.coveragerc python/demos/demo_contact_3D.py --gmsh --no-slip --hex --theta 1.05 --timing - name: Run contact demos 3D (parallel) run: | mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_3D.py --theta 0 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_3D.py --theta 1.05 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_3D.py --gmsh --theta 0 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_3D.py --gmsh --theta 1.05 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_3D.py --gmsh --no-slip --theta 0 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_3D.py --gmsh --no-slip --theta 1.05 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_3D.py --gmsh --no-slip --hex --theta 0 --timing mpirun -n 2 coverage run --rcfile=.coveragerc -m mpi4py python/demos/demo_contact_3D.py --gmsh --no-slip --hex --theta 1.05 --timing - name: Combine coverage reports run: | coverage combine coverage report -m coverage html - name: Upload coverage report as artifact if: matrix.build_mode == 'Release' && matrix.petsc_arch == 'linux-gnu-real64-32' && matrix.CC == 'cc' && matrix.CXX == 'c++' uses: actions/upload-artifact@v4 with: name: code-coverage-report path: htmlcov if-no-files-found: error dolfinx_mpc-0.9.1/.gitignore000066400000000000000000000003301476141270300160140ustar00rootroot00000000000000_petsc_cffi_None.* *.xdmf *.h5 *.so **/build**/** __pycache__ dolfinx_mpc.egg-info *.pvd *.vtu .vscode debug *.png *results* *.msh CMakeFiles *.hdf5 *.o .mypy_cache *.bp **/html/** **/_build/** .coverage .coverage.*dolfinx_mpc-0.9.1/CITATION.cff000066400000000000000000000004041476141270300157200ustar00rootroot00000000000000cff-version: 1.1.0 message: "If you use this software, please cite it as below." authors: - family-names: Dokken given-names: Jørgen Schartum orcid: https://orcid.org/0000-0001-6489-8858 title: "DOLFINx-MPC" version: 0.9.0 date-released: 2024-10-15 dolfinx_mpc-0.9.1/Changelog.md000066400000000000000000000173171476141270300162520ustar00rootroot00000000000000# Changelog ## Main - No changes ## v0.9.0 - No major API changes, only following DOLFINx API changes ## v0.8.0 - **API** - Various shared pointers in C++ interface is changed to const references - Multipoint-constraint now accept `std::span` instead of vectors - Now using [nanobind](https://github.com/wjakob/nanobind) for Python bindings - Switch to `pyproject.toml`, **see installation notes** for updated instructions - **DOLFINx API-changes** - `dolfinx.fem.FunctionSpaceBase` replaced by `dolfinx.fem.FunctionSpace` - `ufl.FiniteElement` and `ufl.VectorElement` is replaced by `basix.ufl.element` ## v0.7.2 - **New feature**: Add support for "scalar" inelastic contact conditions. This is a special case where you want to create a periodic constraint between two sets of facets, which might or might not align. ## v0.7.1 - Patch for Python 3.8 - Fix import order of `mpi4py`, `petsc4py`, `dolfinx` and `dolfinx_mpc` ## v0.7.0 - **API**: - Change input of `dolfinx_mpc.MultiPointConstraint.homogenize` and `dolfinx_mpc.backsubstitution` to `dolfinx.fem.Function` instead of `PETSc.Vec`. - **New feature**: Add support for more floating types (float32, float64, complex64, complex128). The floating type of a MPC is related to the mesh geometry. - This resulted in a minor refactoring of the pybindings, meaning that the class `dolfinx_mpc.cpp.mpc.MultiPointConstraint` is replaced by `dolfinx_mpc.cpp.mpc.MultiPointConstraint_{dtype}` - Casting scalar-type with `dolfinx.default_scalar_type` instead of `PETSc.ScalarType` - Remove usage of `VectorFunctionSpace`. Use blocked basix element instead. - **DOLFINX API-changes**: - Use `dolfinx.fem.functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim, )))` instead of `dolfinx.fem.VectorFunctionSpace(mesh, ("Lagrange", 1))` as the latter is being deprecated. - Use `basix.ufl.element` in favor of `ufl.FiniteElement` as the latter is deprecated in DOLFINx. ## v0.6.1 (30.01.2023) - Fixes for CI - Add auto-publishing CI - Fixes for `h5py` installation ## v0.6.0 (27.01.2023) - Remove `dolfinx::common::impl::copy_N` in favor of `std::copy_n` by @jorgensd in #24 - Improving and fixing `demo_periodic_gep.py` by @fmonteghetti in #22 and @conpierce8 in #30 - Remove xtensor by @jorgensd in #25 - Complex valued periodic constraint (scale) by @jorgensd in #34 - Implement Hermitian pre-multiplication by @conpierce8 in #38 - Fixes for packaging by @mirk in #41, #42, #4 - Various updates to dependencies ## v0.5.0 (12.08.2022) - Minimal C++ standard is now [C++20](https://en.cppreference.com/w/cpp/20) - Deprecating GMSH IO functions from `dolfinx_mpc.utils`, see: [DOLFINx PR: 2261](https://github.com/FEniCS/dolfinx/pull/2261) for details. - Various API changes in DOLFINx relating to `dolfinx.common.IndexMap`. - Made code [mypy](https://mypy.readthedocs.io/en/stable/)-compatible (tests added to CI). - Made code [PEP-561](https://peps.python.org/pep-0561/) compatible. ## v0.4.0 (30.04.2022) - **API**: - **New feature**: Support for nonlinear problems (by @nate-sime) for mpc, see `test_nonlinear_assembly.py` for usage - Updated user interface for `dolfinx_mpc.create_slip_constraint`. See documentation for details. - **New feature**: Support for periodic constraints on sub-spaces. See `dolfinx_mpc.create_periodic_constraint` for details. - **New feature**: `assemble_matrix_nest` and `assemble_vector_nest` by @nate-sime allows for block assembly of rectangular matrices, with different MPCs applied for rows and columns. This is highlighed in `demo_stokes_nest.py` - `assemble_matrix` and `assemble_vector` now only accepts compiled DOLFINx forms as opposed to `ufl`-forms. `LinearProblem` still accepts `ufl`-forms - `dolfinx_mpc.utils.create_normal_approximation` now takes in the meshtag and the marker, instead of the marked entities - No longer direct access to dofmap and indexmap of MPC, now collected through the `dolfinx_mpc.MultiPointConstraint.function_space`. - Introducing custom lifting operator: `dolfinx_mpc.apply_lifting`. Resolves a bug that woul occur if one had a non-zero Dirichlet BC on the same cell as a slave degree of freedom. However, one can still not use Dirichlet dofs as slaves or masters in a multi point constraint. - Move `dolfinx_mpc.cpp.mpc.create_*_contact_condition` to `dolfinx_mpc.MultiPointConstraint.create_*_contact_condition`. - New default assembler: The default for `assemble_matrix` and `assemble_vector` is now C++ implementations. The numba implementations can be accessed through the submodule `dolfinx_mpc.numba`. - New submodule: `dolfinx_mpc.numba`. This module contains the `assemble_matrix` and `assemble_vector` that uses numba. - The `mpc_data` is fully rewritten, now the data is accessible as properties `slaves`, `masters`, `owners`, `coeffs` and `offsets`. - The `MultiPointConstraint` class has been rewritten, with the following functions changing - The `add_constraint` function now only accept single arrays of data, instead of tuples of (owned, ghost) data. - `slave_cells` does now longer exist as it can be gotten implicitly from `cell_to_slaves`. - **Performance**: - Major rewrite of periodic boundary conditions. On average at least a 5 x performance speed-up. - The C++ assembler has been fully rewritten. - Various improvements to `ContactConstraint`. - **Bugs** - Resolved issue where `create_facet_normal_approximation` would give you a 0 normal for a surface dof it was not owned by any of the cells with facets on the surface. - **DOLFINX API-changes**: - `dolfinx.fem.DirichletBC` -> `dolfinx.fem.dirichletbc` - `dolfinx.fem.Form` -> `dolfinx.fem.form` - Updates to use latest import schemes from dolfinx, including `UnitSquareMesh` -> `create_unit_square`. - Updates to match dolfinx implementation of exterior facet integrals - Updated user-interface of `dolfinx.Constant`, explicitly casting scalar-type with `PETSc.ScalarType`. - Various internal changes to handle new `dolfinx.DirichletBC` without class inheritance - Various internal changes to handle new way of JIT-compliation of `dolfinx::fem::Form_{scalar_type}` ## 0.3.0 (25.08.2021) - Minor internal changes ## 0.2.0 (06.08.2021) - Add new MPC constraint: Periodic boundary condition constrained geometrically. See `demo_periodic_geometrical.py` for use-case. - New: `demo_periodic_gep.py` proposed and initally implemented by [fmonteghetti](https://github.com/fmonteghetti) using SLEPc for eigen-value problems. This demo illustrates the usage of the new `diagval` keyword argument in the `assemble_matrix` class. - **API**: - Renaming and clean-up of `assemble_matrix` in C++ - Renaming of Periodic constraint due to additional geometrical constraint, `mpc.create_periodic_constraint` -> `mpc.create_periodic_constraint_geometrical/topological`. - Introduce new class `dolfinx_mpc.LinearProblem` mimicking the DOLFINx class (Usage illustrated in `demo_periodic_geometrical.py`) - Additional `kwarg` `b: PETSc.Vec` for `assemble_vector` to be able to re-use Vector. - Additional `kwargs`: `form_compiler_parameters` and `jit_parameters` to `assemble_matrix`, `assemble_vector`, to allow usage of fast math etc. - **Performance**: - Slip condition constructor moved to C++ (Speedup for large problems) - Use scipy sparse matrices for verification - **Misc**: - Update GMSH code in demos to be compatible with [GMSH 4.8.4](https://gitlab.onelab.info/gmsh/gmsh/-/tags/gmsh_4_8_4). - **DOLFINX API-changes**: - `dolfinx.cpp.la.scatter_forward(x)` is replaced by `x.scatter_forward()` - Various interal updates to match DOLFINx API (including dof transformations moved outside of ffcx kernel) ## 0.1.0 (11.05.2021) - First tagged release of dolfinx_mpc, compatible with [DOLFINx 0.1.0](https://github.com/FEniCS/dolfinx/releases/tag/0.1.0). dolfinx_mpc-0.9.1/LICENSE000066400000000000000000000020401476141270300150310ustar00rootroot00000000000000Copyright 2021 Jørgen S. Dokken Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.dolfinx_mpc-0.9.1/README.md000066400000000000000000000067041476141270300153160ustar00rootroot00000000000000# Multi-point constraints with FEniCS-X [![Github Pages](https://github.com/jorgensd/dolfinx_mpc/actions/workflows/deploy-pages.yml/badge.svg?branch=main)](https://github.com/jorgensd/dolfinx_mpc/actions/workflows/deploy-pages.yml) [![SonarCloud](https://sonarcloud.io/images/project_badges/sonarcloud-orange.svg)](https://sonarcloud.io/summary/new_code?id=jorgensd_dolfinx_mpc) [Code Coverage Report](https://jsdokken.com/dolfinx_mpc/code-coverage-report/index.html) Author: Jørgen S. Dokken This library contains an add-on to FEniCSx enabling the possibilities of enforce multi-point constraints, such as $$u_i =\sum_{j=0,i \neq j}^n \alpha_j u_j, i\in I_N,$$ where $I_N$ is the set of degrees of freedom to constrain. This can be used to for instance enforce slip conditions strongly. Consider a linear system of the form $Au=b$, with the additional constraints written on the form $K\hat u=u$, where $K$ is a prolongation matrix, $\hat u$ is the vector of unknowns excluding the $I_N$ entries. We then solve the system $K^T A K \hat u = K^T b$, where $K^T A K$ is symmetric if $A$ was symmetric. (For complex numbers, we use the Hermitian transpose and solve the system $\overline{K^T} A K \hat u = \overline{K^T} b$, where $\overline{K^T}$ is the complex conjugate of $K^T$, and $\overline{K^T} A K$ is Hermitian if $A$ was Hermitian.) If we include boundary conditions on the form $u=g$, we assemble the system $K^TAK\hat u = K^T(b-A\hat g)$ where $A\hat g$ is an extension of the boundary condition $g$ to all degrees of freedom. The library performs custom matrix and vector assembly adding the extra constraints to the set of linear equations. All assemblies are local to the process, and no MPI communication except when setting up the multi point constraints. These assemblers are written in C++, but have equivalent Python assemblers in the optional `dolfinx_mpc.numba` module. # Documentation Documentation at [https://jorgensd.github.io/dolfinx_mpc](https://jorgensd.github.io/dolfinx_mpc) # Installation ## Conda The DOLFINx MPC package is now on Conda. The C++ library can be found under [libdolfinx_mpc](https://anaconda.org/conda-forge/libdolfinx_mpc) and the Python library under [dolfinx_mpc](https://anaconda.org/conda-forge/dolfinx_mpc). If you have any issues with these installations, add an issue at [dolfinx_mpc feedstock](https://github.com/conda-forge/dolfinx_mpc-feedstock). ## Docker Version 0.7.0 is available as an docker image at [Github Packages](https://github.com/jorgensd/dolfinx_mpc/pkgs/container/dolfinx_mpc) and can be ran using ```bash docker run -ti -v $(pwd):/root/shared -w /root/shared ghcr.io/jorgensd/dolfinx_mpc:v0.7.0 ``` To change to complex mode run `source dolfinx-complex-mode`. Similarly, to change back to real mode, call `source dolfinx-real-mode`. ## Source To install the latest version (main branch), you need to install the latest release of [DOLFINx](https://github.com/FEniCS/dolfinx). Easiest way to install DOLFINx is to use docker. The DOLFINx docker images goes under the name [dolfinx/dolfinx](https://hub.docker.com/r/dolfinx/dolfinx). Remember to use an appropriate tag to get the correct version of DOLFINx, i.e. (`:nightly` or `:vx.y.z`). To install the `dolfinx_mpc`-library run the following code from this directory: ```bash cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -B build-dir cpp/ ninja -j3 install -C build-dir python3 -m pip -v install --config-settings=cmake.build-type="Release" --no-build-isolation ./python -U ``` dolfinx_mpc-0.9.1/_config.yml000066400000000000000000000021741476141270300161630ustar00rootroot00000000000000# Book settings # Learn more at https://jupyterbook.org/customize/config.html title: "DOLFINx-MPC: An extension to DOLFINx for multi point constraints" author: Jørgen S. Dokken copyright: "2022" execute: execute_notebooks: cache only_build_toc_files: true # Information about where the book exists on the web repository: url: https://github.com/jorgensd/dolfinx_mpc # Online location of your book path_to_book: docs # Optional path to your book, relative to the repository root branch: main # Which branch of the repository should be used when creating links (optional) launch_buttons: notebook_interface: "jupyterlab" # The interface interactive links will activate ["classic", "jupyterlab"] binderhub_url: "https://mybinder.org" html: use_issues_button: true use_repository_button: true parse: myst_enable_extensions: - amsmath - dollarmath - linkify sphinx: extra_extensions: - 'sphinx.ext.autodoc' - 'sphinx.ext.napoleon' - 'sphinx.ext.viewcode' config: html_theme_options: navigation_with_keys: false nb_custom_formats: .py: - jupytext.reads - fmt: py dolfinx_mpc-0.9.1/_toc.yml000066400000000000000000000003421476141270300154760ustar00rootroot00000000000000format: jb-book root: index parts: - caption: "Demos" chapters: - file: "python/demos/demo_stokes.py" - caption: "Python API" chapters: - file: "docs/api" - file: "docs/utils" - file: "docs/numba" dolfinx_mpc-0.9.1/cpp/000077500000000000000000000000001476141270300146125ustar00rootroot00000000000000dolfinx_mpc-0.9.1/cpp/CMakeLists.txt000066400000000000000000000102711476141270300173530ustar00rootroot00000000000000#------------------------------------------------------------------------------ # Top level CMakeLists.txt file for DOLFINX_MPC cmake_minimum_required(VERSION 3.21) #------------------------------------------------------------------------------ # Set project name and version number project(DOLFINX_MPC VERSION "0.9.0.0") #------------------------------------------------------------------------------ # General configuration # Make sure CMake uses the correct DOLFINConfig.cmake for tests and demos set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${CMAKE_CURRENT_BINARY_DIR}/dolfinx_mpc) #------------------------------------------------------------------------------ # configurable options for how we want to build include(FeatureSummary) option(BUILD_SHARED_LIBS "Build DOLFINX_MPC with shared libraries." ON) add_feature_info(BUILD_SHARED_LIBS BUILD_SHARED_LIBS "Build DOLFINX_MPC with shared libraries.") option(CMAKE_INSTALL_RPATH_USE_LINK_PATH "Add paths to linker search and installed rpath." ON) add_feature_info(CMAKE_INSTALL_RPATH_USE_LINK_PATH CMAKE_INSTALL_RPATH_USE_LINK_PATH "Add paths to linker search and installed rpath.") # Check for required package DOLFINX find_package(DOLFINX 0.9.0.0 REQUIRED) set_package_properties(DOLFINX PROPERTIES TYPE REQUIRED DESCRIPTION "New generation Dynamic Object-oriented Library for - FINite element computation" URL "https://github.com/FEniCS/dolfinx" PURPOSE "Main dependency of library") feature_summary(WHAT ALL) # Installation of DOLFIN_MPC library # Declare the library (target) add_library(dolfinx_mpc) target_compile_features(dolfinx_mpc PUBLIC cxx_std_20) # dolfinx gives us transitive dependency on mpi, petsc, basix, ufcx # without us needing to reimplement detection/dependency target_link_libraries(dolfinx_mpc PUBLIC dolfinx) #------------------------------------------------------------------------------ include(GNUInstallDirs) #------------------------------------------------------------------------------ install(FILES dolfinx_mpc.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dolfinx_mpc COMPONENT Development) install(FILES assemble_utils.h mpi_utils.h ContactConstraint.h utils.h MultiPointConstraint.h SlipConstraint.h PeriodicConstraint.h assemble_matrix.h assemble_vector.h lifting.h mpc_helpers.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/dolfinx_mpc COMPONENT Development) # Add source files to the target target_sources(dolfinx_mpc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/utils.cpp ${CMAKE_CURRENT_SOURCE_DIR}/assemble_matrix.cpp ${CMAKE_CURRENT_SOURCE_DIR}/assemble_vector.cpp ${CMAKE_CURRENT_SOURCE_DIR}/assemble_utils.cpp ${CMAKE_CURRENT_SOURCE_DIR}/mpi_utils.cpp ) # Set target include location (for build and installed) target_include_directories(dolfinx_mpc PUBLIC $ "$") # Set target properties set_target_properties(dolfinx_mpc PROPERTIES VERSION ${DOLFINX_MPC_VERSION} SOVERSION ${DOLFINX_MPC_VERSION_MAJOR}.${DOLFINX_MPC_VERSION_MINOR}) # Add version to definitions (public) target_compile_definitions(dolfinx_mpc PUBLIC DOLFINX_MPC_VERSION="${DOLFINX_MPC_VERSION}") #------------------------------------------------------------------------------ # Install dolfinx library and header files install(TARGETS dolfinx_mpc EXPORT DOLFINX_MPCTargets RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT RuntimeExecutables LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT RuntimeLibraries ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT Development ) # Generate DOLFINTargets.cmake install(EXPORT DOLFINX_MPCTargets DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/dolfinx_mpc) include(CMakePackageConfigHelpers) configure_package_config_file(${DOLFINX_MPC_SOURCE_DIR}/DOLFINX_MPCConfig.cmake.in ${CMAKE_BINARY_DIR}/dolfinx_mpc/DOLFINX_MPCConfig.cmake INSTALL_DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/dolfinx_mpc) # Install CMake helper files install( FILES ${CMAKE_BINARY_DIR}/dolfinx_mpc/DOLFINX_MPCConfig.cmake DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/dolfinx_mpc COMPONENT Development) #------------------------------------------------------------------------------ dolfinx_mpc-0.9.1/cpp/ContactConstraint.h000066400000000000000000002262101476141270300204260ustar00rootroot00000000000000// Copyright (C) 2020 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #pragma once #include "utils.h" #include #include #include #include namespace impl { /// Create bounding box tree (of cells) based on a mesh tag and a given set of /// markers in the tag. This means that for a given set of facets, we compute /// the bounding box tree of the cells connected to the facets /// @param[in] mesh The mesh /// @param[in] meshtags The meshtags for a set of entities /// @param[in] marker The value in meshtags to extract entities for /// @param[in] padding How much to pad the boundingboxtree /// @returns A bounding box tree of the cells connected to the entities template dolfinx::geometry::BoundingBoxTree create_boundingbox_tree(const dolfinx::mesh::Mesh& mesh, const dolfinx::mesh::MeshTags& meshtags, std::int32_t marker, double padding) { assert(mesh.topology() == meshtags.topology()); const std::int32_t tdim = mesh.topology()->dim(); int dim = meshtags.dim(); auto entity_to_cell = mesh.topology()->connectivity(dim, tdim); assert(entity_to_cell); // Find all cells connected to master facets for collision detection std::int32_t num_local_cells = mesh.topology()->index_map(tdim)->size_local(); const std::vector facets = meshtags.find(marker); std::vector cells = dolfinx::mesh::compute_incident_entities( *mesh.topology(), facets, dim, tdim); dolfinx::geometry::BoundingBoxTree bb_tree(mesh, tdim, cells, padding); return bb_tree; } /// Compute contributions to slip constrain from master side (local to process) /// @param[in] local_rems List containing which block each slave dof is in /// @param[in] local_colliding_cell List with one-to-one correspondes to a cell /// that the block is colliding with /// @param[in] normals The normals at each slave dofs /// @param[in] V the function space /// @param[in] tabulated_basis_values The basis values tabulated for the given /// cells at the given coordinates /// @returns The mpc data (exluding slave indices) template dolfinx_mpc::mpc_data compute_master_contributions( std::span local_rems, std::span local_colliding_cell, MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent, 3>> normals, const dolfinx::fem::FunctionSpace& V, MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> tabulated_basis_values) { const double tol = 1e-6; auto mesh = V.mesh(); const std::int32_t block_size = V.dofmap()->index_map_bs(); std::shared_ptr imap = V.dofmap()->index_map; const int bs = V.dofmap()->index_map_bs(); const std::int32_t size_local = imap->size_local(); MPI_Comm comm = mesh->comm(); int rank = -1; MPI_Comm_rank(comm, &rank); // Count number of masters for in local contribution if found, else add to // array that is communicated to other processes const std::size_t num_slaves_local = local_rems.size(); std::vector num_masters_local(num_slaves_local, 0); assert(num_slaves_local == local_colliding_cell.size()); for (std::size_t i = 0; i < num_slaves_local; ++i) { if (const std::int32_t cell = local_colliding_cell[i]; cell != -1) { auto cell_blocks = V.dofmap()->cell_dofs(cell); for (std::size_t j = 0; j < cell_blocks.size(); ++j) { for (int b = 0; b < bs; b++) { // NOTE: Assuming 0 value size if (const T val = normals(i, b) / normals(i, local_rems[i]) * tabulated_basis_values(i, j); std::abs(val) > tol) { num_masters_local[i]++; } } } } } std::vector masters_offsets(num_slaves_local + 1); masters_offsets[0] = 0; std::inclusive_scan(num_masters_local.begin(), num_masters_local.end(), masters_offsets.begin() + 1); std::vector masters_other_side(masters_offsets.back()); std::vector coefficients_other_side(masters_offsets.back()); std::vector owners_other_side(masters_offsets.back()); std::span ghost_owners = imap->owners(); // Temporary array holding global indices std::vector global_blocks; // Reuse num_masters_local for insertion std::ranges::fill(num_masters_local, 0); for (std::size_t i = 0; i < num_slaves_local; ++i) { if (const std::int32_t cell = local_colliding_cell[i]; cell != -1) { auto cell_blocks = V.dofmap()->cell_dofs(cell); global_blocks.resize(cell_blocks.size()); imap->local_to_global(cell_blocks, global_blocks); // Compute coefficients for each master for (std::size_t j = 0; j < cell_blocks.size(); ++j) { const std::int32_t cell_block = cell_blocks[j]; for (int b = 0; b < bs; b++) { // NOTE: Assuming 0 value size if (const T val = normals(i, b) / normals(i, local_rems[i]) * tabulated_basis_values(i, j); std::abs(val) > tol) { const std::int32_t m_pos = masters_offsets[i] + num_masters_local[i]; masters_other_side[m_pos] = global_blocks[j] * block_size + b; coefficients_other_side[m_pos] = val; owners_other_side[m_pos] = cell_block < size_local ? rank : ghost_owners[cell_block - size_local]; num_masters_local[i]++; } } } } } // Do not add in slaves data to mpc_data, as we allready know the slaves dolfinx_mpc::mpc_data mpc_local; mpc_local.masters = masters_other_side; mpc_local.coeffs = coefficients_other_side; mpc_local.offsets = masters_offsets; mpc_local.owners = owners_other_side; return mpc_local; } /// Find slave dofs topologically /// @param[in] V The function space /// @param[in] meshtags The meshtags for the set of entities /// @param[in] marker The marker values in the mesh tag /// @returns The degrees of freedom located on all entities of V that are /// tagged with the marker template std::vector locate_slave_dofs(const dolfinx::fem::FunctionSpace& V, const dolfinx::mesh::MeshTags& meshtags, std::int32_t slave_marker) { const std::int32_t edim = meshtags.dim(); // Extract slave_facets std::vector slave_facets; slave_facets.reserve(meshtags.indices().size()); for (std::size_t i = 0; i < meshtags.indices().size(); ++i) if (meshtags.values()[i] == slave_marker) slave_facets.push_back(meshtags.indices()[i]); // Find all dofs on slave facets if (V.element()->num_sub_elements() == 0) { std::vector slave_dofs = dolfinx::fem::locate_dofs_topological( *V.mesh()->topology(), *V.dofmap(), edim, std::span(slave_facets)); return slave_dofs; } else { // NOTE: Assumption that we are only working with vector spaces, which is // ordered as xyz,xyzgeometry auto V_sub = V.sub({0}); auto [V0, map] = V_sub.collapse(); auto sub_dofmap = V_sub.dofmap(); std::array, 2> slave_dofs = dolfinx::fem::locate_dofs_topological(*V.mesh()->topology(), {*sub_dofmap, *V0.dofmap()}, edim, std::span(slave_facets)); return slave_dofs[0]; } } /// Compute contributions to slip MPC from slave facet side, i.e. dot(u, /// n)|_slave_facet /// @param[in] local_slaves The slave dofs (local index) /// @param[in] local_slave_blocks The corresponding blocks for each slave /// @param[in] normals The normal vectors, shape (local_slaves.size(), 3). /// Storage flattened row major. /// @param[in] imap The index map /// @param[in] block_size The block size of the index map /// @param[in] rank The rank of current process /// @returns A mpc_data struct with slaves, masters, coeffs and owners template dolfinx_mpc::mpc_data compute_block_contributions( const std::vector& local_slaves, const std::vector& local_slave_blocks, std::span normals, const std::shared_ptr imap, std::int32_t block_size, int rank) { assert(normals.size() % 3 == 0); assert(normals.size() / 3 == local_slave_blocks.size()); std::vector dofs(block_size); // Count number of masters for each local slave (only contributions from) // the same block as the actual slave dof std::vector num_masters_in_cell(local_slaves.size()); for (std::size_t i = 0; i < local_slaves.size(); ++i) { std::iota(dofs.begin(), dofs.end(), local_slave_blocks[i] * block_size); const std::int32_t local_slave = local_slaves[i]; for (std::int32_t j = 0; j < block_size; ++j) if ((dofs[j] != local_slave) && std::abs(normals[3 * i + j]) > 1e-6) num_masters_in_cell[i]++; } std::vector masters_offsets(local_slaves.size() + 1); masters_offsets[0] = 0; std::inclusive_scan(num_masters_in_cell.begin(), num_masters_in_cell.end(), masters_offsets.begin() + 1); // Reuse num masters as fill position array std::ranges::fill(num_masters_in_cell, 0); // Compute coeffs and owners for local cells std::vector global_slave_blocks(local_slaves.size()); imap->local_to_global(local_slave_blocks, global_slave_blocks); std::vector masters_in_cell(masters_offsets.back()); std::vector coefficients_in_cell(masters_offsets.back()); const std::vector owners_in_cell(masters_offsets.back(), rank); for (std::size_t i = 0; i < local_slaves.size(); ++i) { const std::int32_t local_slave = local_slaves[i]; std::iota(dofs.begin(), dofs.end(), local_slave_blocks[i] * block_size); auto local_max = std::ranges::find(dofs, local_slave); const auto max_index = std::distance(dofs.begin(), local_max); for (std::int32_t j = 0; j < block_size; j++) { if ((dofs[j] != local_slave) && std::abs(normals[3 * i + j]) > 1e-6) { T coeff_j = -normals[3 * i + j] / normals[3 * i + max_index]; coefficients_in_cell[masters_offsets[i] + num_masters_in_cell[i]] = coeff_j; masters_in_cell[masters_offsets[i] + num_masters_in_cell[i]] = global_slave_blocks[i] * block_size + j; num_masters_in_cell[i]++; } } } dolfinx_mpc::mpc_data mpc; mpc.slaves = local_slaves; mpc.masters = masters_in_cell; mpc.coeffs = coefficients_in_cell; mpc.offsets = masters_offsets; mpc.owners = owners_in_cell; return mpc; } /// Concatatenate to mpc_data structures with same number of offsets template dolfinx_mpc::mpc_data concatenate(dolfinx_mpc::mpc_data& mpc0, dolfinx_mpc::mpc_data& mpc1) { assert(mpc0.offsets.size() == mpc1.offsets.size()); std::vector& offsets0 = mpc0.offsets; std::vector& offsets1 = mpc1.offsets; std::vector& masters0 = mpc0.masters; std::vector& masters1 = mpc1.masters; std::vector& coeffs0 = mpc0.coeffs; std::vector& coeffs1 = mpc1.coeffs; std::vector& owners0 = mpc0.owners; std::vector& owners1 = mpc1.owners; const std::size_t num_slaves = offsets0.size() - 1; // Concatenate the two constraints as one std::vector num_masters_per_slave(num_slaves, 0); for (std::size_t i = 0; i < num_slaves; i++) { num_masters_per_slave[i] = offsets0[i + 1] - offsets0[i] + offsets1[i + 1] - offsets1[i]; } std::vector masters_offsets(offsets0.size()); masters_offsets[0] = 0; std::inclusive_scan(num_masters_per_slave.begin(), num_masters_per_slave.end(), masters_offsets.begin() + 1); // Reuse num_masters_per_slave for indexing std::ranges::fill(num_masters_per_slave, 0); std::vector masters_out(masters_offsets.back()); std::vector coefficients_out(masters_offsets.back()); std::vector owners_out(masters_offsets.back()); for (std::size_t i = 0; i < num_slaves; ++i) { for (std::int32_t j = offsets0[i]; j < offsets0[i + 1]; ++j) { masters_out[masters_offsets[i] + num_masters_per_slave[i]] = masters0[j]; coefficients_out[masters_offsets[i] + num_masters_per_slave[i]] = coeffs0[j]; owners_out[masters_offsets[i] + num_masters_per_slave[i]] = owners0[j]; num_masters_per_slave[i]++; } for (std::int32_t j = offsets1[i]; j < offsets1[i + 1]; ++j) { masters_out[masters_offsets[i] + num_masters_per_slave[i]] = masters1[j]; coefficients_out[masters_offsets[i] + num_masters_per_slave[i]] = coeffs1[j]; owners_out[masters_offsets[i] + num_masters_per_slave[i]] = owners1[j]; num_masters_per_slave[i]++; } } // Structure storing mpc arrays dolfinx_mpc::mpc_data mpc; mpc.masters = masters_out; mpc.coeffs = coefficients_out; mpc.owners = owners_out; mpc.offsets = masters_offsets; return mpc; } } // namespace impl namespace dolfinx_mpc { /// Create a slip condition between two sets of facets /// @param[in] V The mpc function space /// @param[in] meshtags The meshtag /// @param[in] slave_marker Tag for the first interface /// @param[in] master_marker Tag for the other interface /// @param[in] nh Function containing the normal at the slave marker interface /// @param[in] eps2 The tolerance for the squared distance to be considered a /// collision template mpc_data create_contact_slip_condition( const dolfinx::fem::FunctionSpace& V, const dolfinx::mesh::MeshTags& meshtags, std::int32_t slave_marker, std::int32_t master_marker, const dolfinx::fem::Function& nh, const U eps2 = 1e-20) { dolfinx::common::Timer timer("~MPC: Create slip constraint"); std::shared_ptr> mesh = V.mesh(); MPI_Comm comm = mesh->comm(); int rank = -1; MPI_Comm_rank(comm, &rank); // Extract some const information from function-space const std::shared_ptr imap = V.dofmap()->index_map; assert(mesh->topology() == meshtags.topology()); const int tdim = mesh->topology()->dim(); const int gdim = mesh->geometry().dim(); const int fdim = tdim - 1; const int block_size = V.dofmap()->index_map_bs(); std::int32_t size_local = V.dofmap()->index_map->size_local(); mesh->topology_mutable()->create_connectivity(fdim, tdim); mesh->topology_mutable()->create_connectivity(tdim, tdim); mesh->topology_mutable()->create_entity_permutations(); // Find all slave dofs and split them into locally owned and ghosted blocks std::vector local_slave_blocks; { std::vector slave_dofs = impl::locate_slave_dofs(V, meshtags, slave_marker); local_slave_blocks.reserve(slave_dofs.size()); std::ranges::for_each(slave_dofs, [&local_slave_blocks, bs = block_size, sl = size_local](const std::int32_t dof) { std::div_t div = std::div(dof, bs); if (div.quot < sl) local_slave_blocks.push_back(div.quot); }); } // Data structures to hold information about slave data local to process std::vector local_slaves(local_slave_blocks.size()); std::vector local_rems(local_slave_blocks.size()); dolfinx_mpc::mpc_data mpc_local; // Find all local contributions to MPC, meaning: // 1. Degrees of freedom from the same block as the slave // 2. Degrees of freedom from the other interface // Helper function // Determine component of each block has the largest normal value, and use // it as slave dofs to avoid zero division in constraint // Note that this function casts the normal array from being potentially // complex to real valued std::vector dofs(block_size); std::span normal_array = nh.x()->array(); const auto largest_normal_component = [&dofs, block_size, &normal_array, gdim](const std::int32_t block, std::span normal) { std::iota(dofs.begin(), dofs.end(), block * block_size); for (int j = 0; j < gdim; ++j) normal[j] = std::real(normal_array[dofs[j]]); U norm = std::sqrt(normal[0] * normal[0] + normal[1] * normal[1] + normal[2] * normal[2]); std::ranges::for_each(normal, [norm](auto& n) { return std::abs(n / norm); }); return std::distance( normal.begin(), std::ranges::max_element(normal, [](T a, T b) { return std::norm(a) < std::norm(b); })); }; // Determine which dof in local slave block is the actual slave std::vector normals(3 * local_slave_blocks.size(), 0); assert(block_size == gdim); for (std::size_t i = 0; i < local_slave_blocks.size(); ++i) { const std::int32_t slave = local_slave_blocks[i]; const auto block = largest_normal_component( slave, std::span(std::next(normals.begin(), 3 * i), 3)); local_slaves[i] = block_size * slave + block; local_rems[i] = block; } // Compute local contributions to constraint using helper function // i.e. compute dot(u, n) on slave side mpc_data mpc_in_cell = impl::compute_block_contributions( local_slaves, local_slave_blocks, normals, imap, block_size, rank); dolfinx::geometry::BoundingBoxTree bb_tree = impl::create_boundingbox_tree( *mesh, meshtags, master_marker, std::sqrt(eps2)); // Compute contributions on other side local to process mpc_data mpc_master_local; // Create map from slave dof blocks to a cell containing them std::vector slave_cells = dolfinx_mpc::create_block_to_cell_map( *mesh->topology(), *V.dofmap(), local_slave_blocks); std::vector slave_coordinates; std::array coord_shape; { std::tie(slave_coordinates, coord_shape) = dolfinx_mpc::tabulate_dof_coordinates(V, local_slave_blocks, slave_cells); std::vector local_cell_collisions = dolfinx_mpc::find_local_collisions(*mesh, bb_tree, slave_coordinates, eps2); auto [basis, basis_shape] = dolfinx_mpc::evaluate_basis_functions( V, slave_coordinates, local_cell_collisions); assert(basis_shape.back() == 1); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> basis_span(basis.data(), basis_shape[0], basis_shape[1]); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent, 3>> normal_span(normals.data(), local_slave_blocks.size(), 3); mpc_master_local = impl::compute_master_contributions( local_rems, local_cell_collisions, normal_span, V, basis_span); } // // Find slave indices were contributions are not found on the process std::vector& l_offsets = mpc_master_local.offsets; std::vector slave_indices_remote; slave_indices_remote.reserve(local_rems.size()); for (std::size_t i = 0; i < local_rems.size(); i++) { if (l_offsets[i + 1] - l_offsets[i] == 0) slave_indices_remote.push_back((int)i); } // Structure storing mpc arrays mpc_local mpc_local = impl::concatenate(mpc_in_cell, mpc_master_local); // If serial, we gather the resulting mpc data as one constraint if (int mpi_size = dolfinx::MPI::size(comm); mpi_size == 1) { if (!slave_indices_remote.empty()) { throw std::runtime_error( "No masters found on contact surface (when executed in serial). " "Please make sure that the surfaces are in contact, or increase the " "tolerance eps2."); } // Serial assumptions mpc_local.slaves = local_slaves; return mpc_local; } // Create slave_dofs->master facets and master->slave dofs neighborhood comms const bool has_slave = !local_slave_blocks.empty(); std::array neighborhood_comms = create_neighborhood_comms(comm, meshtags, has_slave, master_marker); // Get the slave->master recv from and send to ranks int indegree(-1); int outdegree(-2); int weighted(-1); MPI_Dist_graph_neighbors_count(neighborhood_comms[0], &indegree, &outdegree, &weighted); // Convert slaves missing master contributions to global index // and prepare data (coordinates and normals) to send to other procs const std::array send_shape = {slave_indices_remote.size(), 3}; std::vector coordinates_send(send_shape.front() * send_shape.back()); std::vector normals_send(send_shape.front() * send_shape.back()); std::vector send_rems(slave_indices_remote.size()); for (std::size_t i = 0; i < slave_indices_remote.size(); ++i) { const std::int32_t slave_idx = slave_indices_remote[i]; send_rems[i] = local_rems[slave_idx]; std::ranges::copy_n(std::next(slave_coordinates.begin(), 3 * slave_idx), 3, std::next(coordinates_send.begin(), 3 * i)); std::ranges::copy_n(std::next(normals.begin(), 3 * slave_idx), 3, std::next(normals_send.begin(), 3 * i)); } // Figure out how much data to receive from each neighbor const std::size_t out_collision_slaves = slave_indices_remote.size(); std::vector num_slaves_recv(indegree + 1); MPI_Neighbor_allgather( &out_collision_slaves, 1, dolfinx::MPI::mpi_type(), num_slaves_recv.data(), 1, dolfinx::MPI::mpi_type(), neighborhood_comms[0]); num_slaves_recv.pop_back(); // Compute displacements for data to receive std::vector disp(indegree + 1, 0); std::partial_sum(num_slaves_recv.begin(), num_slaves_recv.end(), disp.begin() + 1); // Send data to neighbors and receive data std::vector recv_rems(disp.back()); MPI_Neighbor_allgatherv(send_rems.data(), (int)send_rems.size(), dolfinx::MPI::mpi_type(), recv_rems.data(), num_slaves_recv.data(), disp.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[0]); // Multiply recv size by three to accommodate vector coordinates and // function data std::vector num_slaves_recv3; num_slaves_recv3.reserve(indegree); std::ranges::transform(num_slaves_recv, std::back_inserter(num_slaves_recv3), [](std::int32_t num_slaves) { return 3 * num_slaves; }); std::vector disp3(indegree + 1, 0); std::partial_sum(num_slaves_recv3.begin(), num_slaves_recv3.end(), disp3.begin() + 1); // Send slave normal and coordinate to neighbors std::vector recv_coords(disp.back() * 3); MPI_Neighbor_allgatherv(coordinates_send.data(), (int)coordinates_send.size(), dolfinx::MPI::mpi_type(), recv_coords.data(), num_slaves_recv3.data(), disp3.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[0]); std::vector slave_normals(disp.back() * 3); MPI_Neighbor_allgatherv(normals_send.data(), (int)normals_send.size(), dolfinx::MPI::mpi_type(), slave_normals.data(), num_slaves_recv3.data(), disp3.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[0]); // Compute off-process contributions mpc_data remote_data; { std::vector remote_cell_collisions = dolfinx_mpc::find_local_collisions(*mesh, bb_tree, recv_coords, eps2); auto [recv_basis_values, shape] = dolfinx_mpc::evaluate_basis_functions( V, recv_coords, remote_cell_collisions); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> basis_span(recv_basis_values.data(), shape[0], shape[1]); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent, 3>> normal_span(slave_normals.data(), disp.back(), 3); remote_data = impl::compute_master_contributions( recv_rems, remote_cell_collisions, normal_span, V, basis_span); } // Get info about reverse communicator auto [src_ranks_rev, dest_ranks_rev] = dolfinx_mpc::compute_neighborhood(neighborhood_comms[1]); const std::size_t indegree_rev = src_ranks_rev.size(); // Count number of masters found on the process and convert the offsets // to be per process std::vector num_collision_masters(indegree + 1, 0); std::vector num_out_offsets; num_out_offsets.reserve(indegree); std::ranges::transform(num_slaves_recv, std::back_inserter(num_out_offsets), [](std::int32_t num_slaves) { return num_slaves + 1; }); const std::int32_t num_offsets = std::accumulate(num_out_offsets.begin(), num_out_offsets.end(), 0); std::vector offsets_remote(num_offsets); std::int32_t counter = 0; for (std::int32_t i = 0; i < indegree; ++i) { const std::int32_t first_pos = disp[i]; const std::int32_t first_offset = remote_data.offsets[first_pos]; num_collision_masters[i] += remote_data.offsets[disp[i + 1]] - first_offset; offsets_remote[first_pos + counter++] = 0; for (std::int32_t j = first_pos; j < disp[i + 1]; ++j) offsets_remote[j + counter] = remote_data.offsets[j + 1] - first_offset; } // Communicate number of incoming masters to each process after collision // detection std::vector inc_num_collision_masters(indegree_rev + 1); MPI_Neighbor_alltoall(num_collision_masters.data(), 1, MPI_INT, inc_num_collision_masters.data(), 1, MPI_INT, neighborhood_comms[1]); inc_num_collision_masters.pop_back(); num_collision_masters.pop_back(); // Create displacement vector for masters and coefficients std::vector disp_inc_masters(indegree_rev + 1, 0); std::partial_sum(inc_num_collision_masters.begin(), inc_num_collision_masters.end(), disp_inc_masters.begin() + 1); // Compute send offsets for masters and coefficients std::vector send_disp_masters(indegree + 1, 0); std::partial_sum(num_collision_masters.begin(), num_collision_masters.end(), send_disp_masters.begin() + 1); // Create displacement vector for incoming offsets std::vector inc_disp_offsets(indegree_rev + 1); std::vector num_inc_offsets(indegree_rev, (int)slave_indices_remote.size() + 1); std::partial_sum(num_inc_offsets.begin(), num_inc_offsets.end(), inc_disp_offsets.begin() + 1); // Compute send offsets for master offsets std::vector send_disp_offsets(indegree + 1, 0); std::partial_sum(num_out_offsets.begin(), num_out_offsets.end(), send_disp_offsets.begin() + 1); // Get offsets for master dofs from remote process std::vector requests(4); std::vector remote_colliding_offsets(inc_disp_offsets.back()); MPI_Ineighbor_alltoallv( offsets_remote.data(), num_out_offsets.data(), send_disp_offsets.data(), dolfinx::MPI::mpi_type(), remote_colliding_offsets.data(), num_inc_offsets.data(), inc_disp_offsets.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[1], &requests[0]); // Receive colliding masters and relevant data from other processor std::vector remote_colliding_masters(disp_inc_masters.back()); MPI_Ineighbor_alltoallv( remote_data.masters.data(), num_collision_masters.data(), send_disp_masters.data(), dolfinx::MPI::mpi_type(), remote_colliding_masters.data(), inc_num_collision_masters.data(), disp_inc_masters.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[1], &requests[1]); std::vector remote_colliding_coeffs(disp_inc_masters.back()); MPI_Ineighbor_alltoallv( remote_data.coeffs.data(), num_collision_masters.data(), send_disp_masters.data(), dolfinx::MPI::mpi_type(), remote_colliding_coeffs.data(), inc_num_collision_masters.data(), disp_inc_masters.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[1], &requests[2]); std::vector remote_colliding_owners(disp_inc_masters.back()); MPI_Ineighbor_alltoallv( remote_data.owners.data(), num_collision_masters.data(), send_disp_masters.data(), dolfinx::MPI::mpi_type(), remote_colliding_owners.data(), inc_num_collision_masters.data(), disp_inc_masters.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[1], &requests[3]); // Wait for offsets to be sent std::vector status(4); MPI_Wait(&requests[0], &status[0]); std::vector slave_found(slave_indices_remote.size(), false); std::vector num_inc_masters(slave_indices_remote.size()); // Iterate through the processors and find one set of inputs per slave that // was sent to the other processes for (std::size_t i = 0; i < src_ranks_rev.size(); ++i) { [[maybe_unused]] const std::int32_t num_offsets_on_proc = inc_disp_offsets[i + 1] - inc_disp_offsets[i]; assert(num_offsets_on_proc == std::int32_t(slave_indices_remote.size()) + 1); for (std::size_t c = 0; c < slave_indices_remote.size(); c++) { const std::int32_t slave_min = remote_colliding_offsets[inc_disp_offsets[i] + c]; const std::int32_t slave_max = remote_colliding_offsets[inc_disp_offsets[i] + c + 1]; if (const std::int32_t num_inc = slave_max - slave_min; !(slave_found[c]) && (num_inc > 0)) { slave_found[c] = true; num_inc_masters[c] = num_inc; } } } if (auto not_found = std::ranges::find(slave_found, false); not_found != slave_found.end()) { std::runtime_error( "Masters not found on contact surface with local search or remote " "search. Consider running the code in serial to make sure that one can " "detect the contact surface, or increase eps2."); } /// Wait for all communication to finish MPI_Waitall(4, requests.data(), status.data()); // Move the masters, coeffs and owners from the input adjacency list // to one where each node corresponds to an entry in slave_indices_remote std::vector offproc_offsets(slave_indices_remote.size() + 1, 0); std::partial_sum(num_inc_masters.begin(), num_inc_masters.end(), offproc_offsets.begin() + 1); std::vector offproc_masters(offproc_offsets.back()); std::vector offproc_coeffs(offproc_offsets.back()); std::vector offproc_owners(offproc_offsets.back()); std::ranges::fill(slave_found, false); for (std::size_t i = 0; i < src_ranks_rev.size(); ++i) { const std::int32_t proc_start = disp_inc_masters[i]; [[maybe_unused]] const std::int32_t num_offsets_on_proc = inc_disp_offsets[i + 1] - inc_disp_offsets[i]; assert(num_offsets_on_proc == std::int32_t(slave_indices_remote.size()) + 1); for (std::size_t c = 0; c < slave_indices_remote.size(); c++) { assert(std::int32_t(remote_colliding_offsets.size()) > std::int32_t(inc_disp_offsets[i] + c)); assert(std::int32_t(remote_colliding_offsets.size()) > std::int32_t(inc_disp_offsets[i] + c + 1)); assert(std::int32_t(inc_disp_offsets[i] + c) < inc_disp_offsets[i + 1]); const std::int32_t slave_min = remote_colliding_offsets[inc_disp_offsets[i] + c]; const std::int32_t slave_max = remote_colliding_offsets[inc_disp_offsets[i] + c + 1]; assert(c < slave_found.size()); if (!(slave_found[c]) && (slave_max - slave_min > 0)) { slave_found[c] = true; std::ranges::copy( remote_colliding_masters.begin() + proc_start + slave_min, remote_colliding_masters.begin() + proc_start + slave_max, offproc_masters.begin() + offproc_offsets[c]); std::ranges::copy( remote_colliding_coeffs.begin() + proc_start + slave_min, remote_colliding_coeffs.begin() + proc_start + slave_max, offproc_coeffs.begin() + offproc_offsets[c]); std::ranges::copy( remote_colliding_owners.begin() + proc_start + slave_min, remote_colliding_owners.begin() + proc_start + slave_max, offproc_owners.begin() + offproc_offsets[c]); } } } // Merge local data with incoming data // First count number of local masters std::vector& masters_offsets = mpc_local.offsets; std::vector& masters_out = mpc_local.masters; std::vector& coefficients_out = mpc_local.coeffs; std::vector& owners_out = mpc_local.owners; std::vector num_masters_per_slave(local_slaves.size(), 0); for (std::size_t i = 0; i < local_slaves.size(); ++i) num_masters_per_slave[i] += masters_offsets[i + 1] - masters_offsets[i]; // Then add the remote masters for (std::size_t i = 0; i < slave_indices_remote.size(); ++i) num_masters_per_slave[slave_indices_remote[i]] += offproc_offsets[i + 1] - offproc_offsets[i]; // Create new offset array std::vector local_offsets(local_slaves.size() + 1, 0); std::partial_sum(num_masters_per_slave.begin(), num_masters_per_slave.end(), local_offsets.begin() + 1); // Reuse num_masters_per_slave for input indices std::vector local_masters(local_offsets.back()); std::vector local_owners(local_offsets.back()); std::vector local_coeffs(local_offsets.back()); // Insert local contributions { std::vector loc_pos(local_slaves.size(), 0); for (std::size_t i = 0; i < local_slaves.size(); ++i) { const std::int32_t master_min = masters_offsets[i]; const std::int32_t master_max = masters_offsets[i + 1]; std::ranges::copy(masters_out.begin() + master_min, masters_out.begin() + master_max, local_masters.begin() + local_offsets[i] + loc_pos[i]); std::ranges::copy(coefficients_out.begin() + master_min, coefficients_out.begin() + master_max, local_coeffs.begin() + local_offsets[i] + loc_pos[i]); std::ranges::copy(owners_out.begin() + master_min, owners_out.begin() + master_max, local_owners.begin() + local_offsets[i] + loc_pos[i]); loc_pos[i] += master_max - master_min; } // Insert remote contributions for (std::size_t i = 0; i < slave_indices_remote.size(); ++i) { const std::int32_t master_min = offproc_offsets[i]; const std::int32_t master_max = offproc_offsets[i + 1]; const std::int32_t slave_index = slave_indices_remote[i]; std::ranges::copy(offproc_masters.begin() + master_min, offproc_masters.begin() + master_max, local_masters.begin() + local_offsets[slave_index] + loc_pos[slave_index]); std::ranges::copy(offproc_coeffs.begin() + master_min, offproc_coeffs.begin() + master_max, local_coeffs.begin() + local_offsets[slave_index] + loc_pos[slave_index]); std::ranges::copy(offproc_owners.begin() + master_min, offproc_owners.begin() + master_max, local_owners.begin() + local_offsets[slave_index] + loc_pos[slave_index]); loc_pos[slave_index] += master_max - master_min; } } // Distribute ghost data dolfinx_mpc::mpc_data ghost_data = dolfinx_mpc::distribute_ghost_data( local_slaves, local_masters, local_coeffs, local_owners, num_masters_per_slave, *imap, block_size); // Add ghost data to existing arrays const std::vector& ghost_slaves = ghost_data.slaves; local_slaves.insert(std::end(local_slaves), std::cbegin(ghost_slaves), std::cend(ghost_slaves)); const std::vector& ghost_masters = ghost_data.masters; local_masters.insert(std::end(local_masters), std::cbegin(ghost_masters), std::cend(ghost_masters)); const std::vector& ghost_num = ghost_data.offsets; num_masters_per_slave.insert(std::end(num_masters_per_slave), std::cbegin(ghost_num), std::cend(ghost_num)); const std::vector& ghost_coeffs = ghost_data.coeffs; local_coeffs.insert(std::end(local_coeffs), std::cbegin(ghost_coeffs), std::cend(ghost_coeffs)); const std::vector& ghost_owner_ranks = ghost_data.owners; local_owners.insert(std::end(local_owners), std::cbegin(ghost_owner_ranks), std::cend(ghost_owner_ranks)); // Compute offsets std::vector offsets(num_masters_per_slave.size() + 1, 0); std::partial_sum(num_masters_per_slave.begin(), num_masters_per_slave.end(), offsets.begin() + 1); dolfinx_mpc::mpc_data output; output.offsets = offsets; output.masters = local_masters; output.coeffs = local_coeffs; output.owners = local_owners; output.slaves = local_slaves; return output; } /// Create a contact condition between two sets of facets /// @param[in] The mpc function space /// @param[in] meshtags The meshtag /// @param[in] slave_marker Tag for the first interface /// @param[in] master_marker Tag for the other interface /// @param[in] eps2 The tolerance for the squared distance to be considered a /// collision template mpc_data create_contact_inelastic_condition( const dolfinx::fem::FunctionSpace& V, dolfinx::mesh::MeshTags meshtags, std::int32_t slave_marker, std::int32_t master_marker, const U eps2 = 1e-20) { dolfinx::common::Timer timer("~MPC: Inelastic condition"); MPI_Comm comm = V.mesh()->comm(); int rank = -1; MPI_Comm_rank(comm, &rank); // Extract some const information from function-space const std::shared_ptr imap = V.dofmap()->index_map; const int tdim = V.mesh()->topology()->dim(); const int fdim = tdim - 1; const int block_size = V.dofmap()->index_map_bs(); std::int32_t size_local = V.dofmap()->index_map->size_local(); // Create entity permutations needed in evaluate_basis_functions V.mesh()->topology_mutable()->create_entity_permutations(); // Create connectivities needed for evaluate_basis_functions and // select_colliding cells V.mesh()->topology_mutable()->create_connectivity(fdim, tdim); V.mesh()->topology_mutable()->create_connectivity(tdim, tdim); std::vector slave_blocks = impl::locate_slave_dofs(V, meshtags, slave_marker); std::ranges::for_each(slave_blocks, [block_size](std::int32_t& d) { d /= block_size; }); // Vector holding what blocks local to process are slaves std::vector local_blocks; // Array holding ghost slaves blocks (masters,coeffs and offsets will be // received) std::vector ghost_blocks; // Map slave blocks to arrays holding local bocks and ghost blocks std::ranges::for_each( slave_blocks, [size_local, &local_blocks, &ghost_blocks](const std::int32_t block) { if (block < size_local) local_blocks.push_back(block); else ghost_blocks.push_back(block); }); // Map local blocks to global indices std::vector local_blocks_as_glob(local_blocks.size()); imap->local_to_global(local_blocks, local_blocks_as_glob); // Create slave_dofs->master facets and master->slave dofs neighborhood // comms const bool has_slave = !local_blocks.empty(); std::array neighborhood_comms = create_neighborhood_comms(comm, meshtags, has_slave, master_marker); // Create communicator local_blocks -> ghost_block MPI_Comm slave_to_ghost = create_owner_to_ghost_comm(local_blocks, ghost_blocks, imap); /// Compute which rank (relative to neighbourhood) to send each ghost to std::span ghost_owners = imap->owners(); // Create new index-map where there are only ghosts for slaves std::shared_ptr slave_index_map; { std::vector slave_ranks(ghost_blocks.size()); for (std::size_t i = 0; i < ghost_blocks.size(); ++i) slave_ranks[i] = ghost_owners[ghost_blocks[i] - size_local]; std::vector ghosts_as_global(ghost_blocks.size()); imap->local_to_global(ghost_blocks, ghosts_as_global); slave_index_map = std::make_shared( comm, imap->size_local(), ghosts_as_global, slave_ranks); } // Create boundingboxtree for master surface auto facet_to_cell = V.mesh()->topology()->connectivity(fdim, tdim); assert(facet_to_cell); dolfinx::geometry::BoundingBoxTree bb_tree = impl::create_boundingbox_tree( *V.mesh(), meshtags, master_marker, std::sqrt(eps2)); // Tabulate slave block coordinates and find colliding cells std::vector slave_cells = dolfinx_mpc::create_block_to_cell_map( *V.mesh()->topology(), *V.dofmap(), local_blocks); std::vector slave_coordinates; { std::array c_shape; std::tie(slave_coordinates, c_shape) = dolfinx_mpc::tabulate_dof_coordinates(V, local_blocks, slave_cells); } // Loop through all masters on current processor and check if they // collide with a local master facet std::map> local_owners; std::map> local_masters; std::map> local_coeffs; std::vector blocks_wo_local_collision; std::vector collision_to_local; { std::vector colliding_cells = dolfinx_mpc::find_local_collisions(*V.mesh(), bb_tree, slave_coordinates, eps2); auto [basis_values, basis_shape] = dolfinx_mpc::evaluate_basis_functions( V, slave_coordinates, colliding_cells); assert(basis_shape.back() == 1); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> basis(basis_values.data(), basis_shape[0], basis_shape[1]); std::vector master_block_global; std::vector l_master; std::vector coeff; for (std::size_t i = 0; i < local_blocks.size(); ++i) { if (const auto& cell = colliding_cells[i]; cell != -1) { auto cell_blocks = V.dofmap()->cell_dofs(cell); l_master.reserve(cell_blocks.size()); coeff.reserve(cell_blocks.size()); assert(l_master.empty()); assert(coeff.empty()); // Store block and non-zero basis value for (std::size_t j = 0; j < cell_blocks.size(); ++j) { if (const T c = basis(i, j); std::abs(c) > 1e-6) { coeff.push_back(c); l_master.push_back(cell_blocks[j]); } } // If no local contributions found add to remote list if (l_master.empty()) { blocks_wo_local_collision.push_back(local_blocks_as_glob[i]); collision_to_local.push_back(i); } else { // Convert local block to global block const std::size_t num_masters = l_master.size(); master_block_global.resize(num_masters); imap->local_to_global(l_master, master_block_global); // Insert local contributions in each block for (int j = 0; j < block_size; ++j) { const std::int32_t local_slave = local_blocks[i] * block_size + j; for (std::size_t k = 0; k < num_masters; k++) { local_masters[local_slave].push_back( master_block_global[k] * block_size + j); local_coeffs[local_slave].push_back(coeff[k]); local_owners[local_slave].push_back( l_master[k] < size_local ? rank : ghost_owners[l_master[k] - size_local]); } } l_master.clear(); coeff.clear(); } } else { blocks_wo_local_collision.push_back(local_blocks_as_glob[i]); collision_to_local.push_back(i); } } } // Extract coordinates and normals to distribute std::vector distribute_coordinates(blocks_wo_local_collision.size() * 3); for (std::size_t i = 0; i < collision_to_local.size(); ++i) { std::ranges::copy_n( std::next(slave_coordinates.begin(), 3 * collision_to_local[i]), 3, std::next(distribute_coordinates.begin(), 3 * i)); } dolfinx_mpc::mpc_data mpc; // If serial, we only have to gather slaves, masters, coeffs in 1D // arrays if (int mpi_size = dolfinx::MPI::size(comm); mpi_size == 1) { if (!blocks_wo_local_collision.empty()) { throw std::runtime_error( "No masters found on contact surface (when executed in serial). " "Please make sure that the surfaces are in contact, or increase " "the " "tolerance eps2."); } std::vector masters_out; std::vector coeffs_out; std::vector offsets_out = {0}; std::vector slaves_out; // Flatten the maps to 1D arrays (assuming all slaves are local // slaves) std::vector slaves; slaves.reserve(block_size * local_blocks.size()); std::ranges::for_each( local_blocks, [block_size, tdim, &masters_out, &local_masters, &coeffs_out, &local_coeffs, &offsets_out, &slaves](const std::int32_t block) { for (std::int32_t j = 0; j < block_size; ++j) { const std::int32_t slave = block * block_size + j; slaves.push_back(slave); masters_out.insert(masters_out.end(), local_masters[slave].begin(), local_masters[slave].end()); coeffs_out.insert(coeffs_out.end(), local_coeffs[slave].begin(), local_coeffs[slave].end()); offsets_out.push_back((std::int32_t)masters_out.size()); } }); std::vector owners(masters_out.size()); std::ranges::fill(owners, 0); mpc.slaves = slaves; mpc.masters = masters_out; mpc.offsets = offsets_out; mpc.owners = owners; mpc.coeffs = coeffs_out; return mpc; } // Get the slave->master recv from and send to ranks int indegree(-1); int outdegree(-2); int weighted(-1); MPI_Dist_graph_neighbors_count(neighborhood_comms[0], &indegree, &outdegree, &weighted); // Figure out how much data to receive from each neighbor const auto num_colliding_blocks = (int)blocks_wo_local_collision.size(); std::vector num_slave_blocks(indegree + 1); MPI_Neighbor_allgather( &num_colliding_blocks, 1, dolfinx::MPI::mpi_type(), num_slave_blocks.data(), 1, dolfinx::MPI::mpi_type(), neighborhood_comms[0]); num_slave_blocks.pop_back(); // Compute displacements for data to receive std::vector disp(indegree + 1, 0); std::partial_sum(num_slave_blocks.begin(), num_slave_blocks.end(), disp.begin() + 1); // Send data to neighbors and receive data std::vector remote_slave_blocks(disp.back()); MPI_Neighbor_allgatherv( blocks_wo_local_collision.data(), num_colliding_blocks, dolfinx::MPI::mpi_type(), remote_slave_blocks.data(), num_slave_blocks.data(), disp.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[0]); // Multiply recv size by three to accommodate block coordinates std::vector num_block_coordinates(indegree); for (std::size_t i = 0; i < num_slave_blocks.size(); ++i) num_block_coordinates[i] = num_slave_blocks[i] * 3; std::vector coordinate_disp(indegree + 1, 0); std::partial_sum(num_block_coordinates.begin(), num_block_coordinates.end(), coordinate_disp.begin() + 1); // Send slave coordinates to neighbors std::vector recv_coords(disp.back() * 3); MPI_Neighbor_allgatherv(distribute_coordinates.data(), (int)distribute_coordinates.size(), dolfinx::MPI::mpi_type(), recv_coords.data(), num_block_coordinates.data(), coordinate_disp.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[0]); // Vector for processes with slaves, mapping slaves with // collision on this process std::vector> collision_slaves(indegree); std::vector>> collision_masters(indegree); std::vector>> collision_coeffs( indegree); std::vector>> collision_owners(indegree); std::vector>> collision_block_offsets(indegree); { std::vector remote_cell_collisions = dolfinx_mpc::find_local_collisions(*V.mesh(), bb_tree, recv_coords, eps2); auto [basis, basis_shape] = dolfinx_mpc::evaluate_basis_functions( V, recv_coords, remote_cell_collisions); assert(basis_shape.back() == 1); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> basis_span(basis.data(), basis_shape[0], basis_shape[1]); // TODO: Rework this so it is the same as the code on owning process. // Preferably get rid of all the std::map's // Work arrays for loop std::vector r_master; std::vector r_coeff; std::vector remote_master_global; for (std::int32_t i = 0; i < indegree; ++i) { for (std::int32_t j = disp[i]; j < disp[i + 1]; ++j) { const std::int64_t slave_loc = remote_slave_blocks[j]; // Initialize number of masters for each incoming slave to 0 collision_block_offsets[i][remote_slave_blocks[j]] = std::vector(tdim, 0); if (const auto& cell = remote_cell_collisions[j]; cell != -1) { auto cell_blocks = V.dofmap()->cell_dofs(cell); r_master.reserve(cell_blocks.size()); r_coeff.reserve(cell_blocks.size()); assert(r_master.empty()); assert(r_coeff.empty()); // Store block and non-zero basis values for (std::size_t k = 0; k < cell_blocks.size(); ++k) { if (const T c = basis_span(j, k); std::abs(c) > 1e-6) { r_coeff.push_back(c); r_master.push_back(cell_blocks[k]); } } // If no local contributions do nothing if (!r_master.empty()) { const std::size_t num_masters = r_master.size(); remote_master_global.resize(num_masters); imap->local_to_global(r_master, remote_master_global); // Insert local contributions in each block assert(block_size == tdim); for (int l = 0; l < tdim; ++l) { for (std::size_t k = 0; k < num_masters; k++) { collision_masters[i][slave_loc].push_back( remote_master_global[k] * block_size + l); collision_coeffs[i][slave_loc].push_back(r_coeff[k]); collision_owners[i][slave_loc].push_back( r_master[k] < size_local ? rank : ghost_owners[r_master[k] - size_local]); collision_block_offsets[i][slave_loc][l]++; } } r_master.clear(); r_coeff.clear(); collision_slaves[i].push_back(slave_loc); } } } } } // Flatten data structures before send/recv std::vector num_found_slave_blocks(indegree + 1); for (std::int32_t i = 0; i < indegree; ++i) num_found_slave_blocks[i] = (std::int32_t)collision_slaves[i].size(); // Get info about reverse communicator (masters->slaves) auto [src_ranks_rev, dest_ranks_rev] = dolfinx_mpc::compute_neighborhood(neighborhood_comms[1]); const std::size_t indegree_rev = src_ranks_rev.size(); // Communicate number of incoming slaves and masters after coll // detection std::vector inc_num_found_slave_blocks(indegree_rev + 1); MPI_Neighbor_alltoall(num_found_slave_blocks.data(), 1, MPI_INT, inc_num_found_slave_blocks.data(), 1, MPI_INT, neighborhood_comms[1]); inc_num_found_slave_blocks.pop_back(); num_found_slave_blocks.pop_back(); std::vector num_collision_masters(indegree + 1); std::vector found_slave_blocks; std::vector found_masters; std::vector offset_for_blocks; std::vector offsets_in_blocks; std::vector found_owners; std::vector found_coefficients; for (std::int32_t i = 0; i < indegree; ++i) { std::int32_t master_offset = 0; std::vector& slaves_i = collision_slaves[i]; found_slave_blocks.insert(found_slave_blocks.end(), slaves_i.begin(), slaves_i.end()); for (auto slave : slaves_i) { std::vector& masters_ij = collision_masters[i][slave]; num_collision_masters[i] += masters_ij.size(); found_masters.insert(found_masters.end(), masters_ij.begin(), masters_ij.end()); std::vector& coeffs_ij = collision_coeffs[i][slave]; found_coefficients.insert(found_coefficients.end(), coeffs_ij.begin(), coeffs_ij.end()); std::vector& owners_ij = collision_owners[i][slave]; found_owners.insert(found_owners.end(), owners_ij.begin(), owners_ij.end()); std::vector& blocks_ij = collision_block_offsets[i][slave]; offsets_in_blocks.insert(offsets_in_blocks.end(), blocks_ij.begin(), blocks_ij.end()); master_offset += masters_ij.size(); offset_for_blocks.push_back(master_offset); } } std::vector num_inc_masters(indegree_rev + 1); MPI_Neighbor_alltoall(num_collision_masters.data(), 1, MPI_INT, num_inc_masters.data(), 1, MPI_INT, neighborhood_comms[1]); num_inc_masters.pop_back(); num_collision_masters.pop_back(); // Create displacement vector for slaves and masters std::vector disp_inc_slave_blocks(indegree_rev + 1, 0); std::partial_sum(inc_num_found_slave_blocks.begin(), inc_num_found_slave_blocks.end(), disp_inc_slave_blocks.begin() + 1); std::vector disp_inc_masters(indegree_rev + 1, 0); std::partial_sum(num_inc_masters.begin(), num_inc_masters.end(), disp_inc_masters.begin() + 1); // Compute send offsets std::vector send_disp_slave_blocks(indegree + 1, 0); std::partial_sum(num_found_slave_blocks.begin(), num_found_slave_blocks.end(), send_disp_slave_blocks.begin() + 1); std::vector send_disp_masters(indegree + 1, 0); std::partial_sum(num_collision_masters.begin(), num_collision_masters.end(), send_disp_masters.begin() + 1); // Receive colliding blocks from other processor std::vector remote_colliding_blocks( disp_inc_slave_blocks.back()); MPI_Neighbor_alltoallv( found_slave_blocks.data(), num_found_slave_blocks.data(), send_disp_slave_blocks.data(), dolfinx::MPI::mpi_type(), remote_colliding_blocks.data(), inc_num_found_slave_blocks.data(), disp_inc_slave_blocks.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[1]); std::vector recv_blocks_as_local( remote_colliding_blocks.size()); imap->global_to_local(remote_colliding_blocks, recv_blocks_as_local); std::vector remote_colliding_offsets( disp_inc_slave_blocks.back()); MPI_Neighbor_alltoallv( offset_for_blocks.data(), num_found_slave_blocks.data(), send_disp_slave_blocks.data(), dolfinx::MPI::mpi_type(), remote_colliding_offsets.data(), inc_num_found_slave_blocks.data(), disp_inc_slave_blocks.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[1]); // Receive colliding masters and relevant data from other processor std::vector remote_colliding_masters(disp_inc_masters.back()); MPI_Neighbor_alltoallv( found_masters.data(), num_collision_masters.data(), send_disp_masters.data(), dolfinx::MPI::mpi_type(), remote_colliding_masters.data(), num_inc_masters.data(), disp_inc_masters.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[1]); std::vector remote_colliding_coeffs(disp_inc_masters.back()); MPI_Neighbor_alltoallv(found_coefficients.data(), num_collision_masters.data(), send_disp_masters.data(), dolfinx::MPI::mpi_type(), remote_colliding_coeffs.data(), num_inc_masters.data(), disp_inc_masters.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[1]); std::vector remote_colliding_owners(disp_inc_masters.back()); MPI_Neighbor_alltoallv( found_owners.data(), num_collision_masters.data(), send_disp_masters.data(), dolfinx::MPI::mpi_type(), remote_colliding_owners.data(), num_inc_masters.data(), disp_inc_masters.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[1]); // Create receive displacement of data per slave block std::vector recv_num_found_blocks(indegree_rev); for (std::size_t i = 0; i < recv_num_found_blocks.size(); ++i) recv_num_found_blocks[i] = inc_num_found_slave_blocks[i] * tdim; std::vector inc_block_disp(indegree_rev + 1, 0); std::partial_sum(recv_num_found_blocks.begin(), recv_num_found_blocks.end(), inc_block_disp.begin() + 1); // Create send displacement of data per slave block std::vector num_found_blocks(indegree); for (std::int32_t i = 0; i < indegree; ++i) num_found_blocks[i] = tdim * num_found_slave_blocks[i]; std::vector send_block_disp(indegree + 1, 0); std::partial_sum(num_found_blocks.begin(), num_found_blocks.end(), send_block_disp.begin() + 1); // Send the block information to slave processor std::vector block_dofs_recv(inc_block_disp.back()); MPI_Neighbor_alltoallv( offsets_in_blocks.data(), num_found_blocks.data(), send_block_disp.data(), dolfinx::MPI::mpi_type(), block_dofs_recv.data(), recv_num_found_blocks.data(), inc_block_disp.data(), dolfinx::MPI::mpi_type(), neighborhood_comms[1]); // Iterate through the processors for (std::size_t i = 0; i < src_ranks_rev.size(); ++i) { // Find offsets for masters on given proc std::vector master_offsets = {0}; const std::int32_t min = disp_inc_slave_blocks[i]; const std::int32_t max = disp_inc_slave_blocks[i + 1]; master_offsets.insert(master_offsets.end(), remote_colliding_offsets.begin() + min, remote_colliding_offsets.begin() + max); // Extract the number of masters per topological dimensions const std::int32_t min_dim = inc_block_disp[i]; const std::int32_t max_dim = inc_block_disp[i + 1]; const std::vector num_dofs_per_block( block_dofs_recv.begin() + min_dim, block_dofs_recv.begin() + max_dim); // Loop through slaves and add them if they havent already been added for (std::size_t j = 0; j < master_offsets.size() - 1; ++j) { // Get the slave block const std::int32_t local_block = recv_blocks_as_local[min + j]; std::vector block_offsets(tdim + 1, 0); std::partial_sum(num_dofs_per_block.begin() + j * tdim, num_dofs_per_block.begin() + (j + 1) * tdim, block_offsets.begin() + 1); for (std::int32_t k = 0; k < tdim; ++k) { std::int32_t local_slave = local_block * block_size + k; // Skip if already found on other incoming processor if (local_masters[local_slave].empty()) { std::vector masters_( remote_colliding_masters.begin() + disp_inc_masters[i] + master_offsets[j] + block_offsets[k], remote_colliding_masters.begin() + disp_inc_masters[i] + master_offsets[j] + block_offsets[k + 1]); local_masters[local_slave].insert(local_masters[local_slave].end(), masters_.begin(), masters_.end()); std::vector coeffs_( remote_colliding_coeffs.begin() + disp_inc_masters[i] + master_offsets[j] + block_offsets[k], remote_colliding_coeffs.begin() + disp_inc_masters[i] + master_offsets[j] + block_offsets[k + 1]); local_coeffs[local_slave].insert(local_coeffs[local_slave].end(), coeffs_.begin(), coeffs_.end()); std::vector owners_( remote_colliding_owners.begin() + disp_inc_masters[i] + master_offsets[j] + block_offsets[k], remote_colliding_owners.begin() + disp_inc_masters[i] + master_offsets[j] + block_offsets[k + 1]); local_owners[local_slave].insert(local_owners[local_slave].end(), owners_.begin(), owners_.end()); } } } } for (auto block : local_blocks) { for (int d = 0; d < block_size; d++) { if (local_masters[block * block_size + d].empty()) { throw std::runtime_error( "No masters found on contact surface for slave. Please run in " "serial " "to check that there is contact, or increase eps2."); } } } // Distribute data for ghosted slaves (the coeffs, owners and offsets) std::vector ghost_slaves(tdim * ghost_blocks.size()); for (std::size_t i = 0; i < ghost_blocks.size(); ++i) for (std::int32_t j = 0; j < tdim; ++j) ghost_slaves[i * tdim + j] = ghost_blocks[i] * block_size + j; // Compute source and dest ranks of communicator auto neighbour_ranks = dolfinx_mpc::compute_neighborhood(slave_to_ghost); const std::vector& src_ranks_ghost = neighbour_ranks.first; const std::vector& dest_ranks_ghost = neighbour_ranks.second; // Count number of incoming slaves std::vector inc_num_slaves(src_ranks_ghost.size(), 0); std::ranges::for_each(ghost_slaves, [block_size, size_local, &ghost_owners, &inc_num_slaves, &src_ranks_ghost](std::int32_t slave) { const std::int32_t owner = ghost_owners[slave / block_size - size_local]; const auto it = std::ranges::find(src_ranks_ghost, owner); const auto index = std::distance(src_ranks_ghost.begin(), it); inc_num_slaves[index]++; }); // Count number of outgoing slaves and masters dolfinx::graph::AdjacencyList shared_indices = slave_index_map->index_to_dest_ranks(); std::vector out_num_slaves(dest_ranks_ghost.size(), 0); std::vector out_num_masters(dest_ranks_ghost.size() + 1, 0); // Create mappings from ghosted process to the data to recv // (can include repeats of data) std::map> proc_to_ghost; std::map> proc_to_ghost_masters; std::map> proc_to_ghost_coeffs; std::map> proc_to_ghost_owners; std::map> proc_to_ghost_offsets; std::vector loc_block(1); std::vector glob_block(1); std::ranges::for_each( local_blocks, [block_size, &local_masters, &local_coeffs, &local_owners, &shared_indices, &dest_ranks_ghost, &loc_block, &glob_block, &out_num_masters, &out_num_slaves, &imap, &proc_to_ghost, &proc_to_ghost_masters, &proc_to_ghost_coeffs, &proc_to_ghost_owners, &proc_to_ghost_offsets, tdim](const auto block) { for (std::int32_t j = 0; j < tdim; ++j) { const std::int32_t slave = block * block_size + j; const std::vector& masters_i = local_masters[slave]; const std::vector& coeffs_i = local_coeffs[slave]; const std::vector& owners_i = local_owners[slave]; const auto num_masters = (std::int32_t)masters_i.size(); for (auto proc : shared_indices.links(slave / block_size)) { const auto it = std::ranges::find(dest_ranks_ghost, proc); std::int32_t index = std::distance(dest_ranks_ghost.begin(), it); out_num_masters[index] += num_masters; out_num_slaves[index]++; // Map slaves to global dof to be recognized by recv proc std::div_t div = std::div(slave, block_size); loc_block[0] = div.quot; imap->local_to_global(loc_block, glob_block); glob_block[0] = glob_block[0] * block_size + div.rem; proc_to_ghost[index].push_back(glob_block[0]); // Add master data in process-wise fashion proc_to_ghost_masters[index].insert( proc_to_ghost_masters[index].end(), masters_i.begin(), masters_i.end()); proc_to_ghost_coeffs[index].insert( proc_to_ghost_coeffs[index].end(), coeffs_i.begin(), coeffs_i.end()); proc_to_ghost_owners[index].insert( proc_to_ghost_owners[index].end(), owners_i.begin(), owners_i.end()); proc_to_ghost_offsets[index].push_back( proc_to_ghost_masters[index].size()); } } }); // Flatten map of global slave ghost dofs to use alltoallv std::vector out_ghost_slaves; std::vector out_ghost_masters; std::vector out_ghost_coeffs; std::vector out_ghost_owners; std::vector out_ghost_offsets; std::vector num_send_slaves(dest_ranks_ghost.size()); std::vector num_send_masters(dest_ranks_ghost.size()); for (std::size_t i = 0; i < dest_ranks_ghost.size(); ++i) { num_send_slaves[i] = proc_to_ghost[i].size(); num_send_masters[i] = proc_to_ghost_masters[i].size(); out_ghost_slaves.insert(out_ghost_slaves.end(), proc_to_ghost[i].begin(), proc_to_ghost[i].end()); out_ghost_masters.insert(out_ghost_masters.end(), proc_to_ghost_masters[i].begin(), proc_to_ghost_masters[i].end()); out_ghost_coeffs.insert(out_ghost_coeffs.end(), proc_to_ghost_coeffs[i].begin(), proc_to_ghost_coeffs[i].end()); out_ghost_owners.insert(out_ghost_owners.end(), proc_to_ghost_owners[i].begin(), proc_to_ghost_owners[i].end()); out_ghost_offsets.insert(out_ghost_offsets.end(), proc_to_ghost_offsets[i].begin(), proc_to_ghost_offsets[i].end()); } // Receive global slave dofs for ghosts structured as on src proc // Compute displacements for data to send and receive std::vector disp_recv_ghost_slaves(src_ranks_ghost.size() + 1, 0); std::partial_sum(inc_num_slaves.begin(), inc_num_slaves.end(), disp_recv_ghost_slaves.begin() + 1); std::vector disp_send_ghost_slaves(dest_ranks_ghost.size() + 1, 0); std::partial_sum(num_send_slaves.begin(), num_send_slaves.end(), disp_send_ghost_slaves.begin() + 1); std::vector in_ghost_slaves(disp_recv_ghost_slaves.back()); MPI_Neighbor_alltoallv( out_ghost_slaves.data(), num_send_slaves.data(), disp_send_ghost_slaves.data(), dolfinx::MPI::mpi_type(), in_ghost_slaves.data(), inc_num_slaves.data(), disp_recv_ghost_slaves.data(), dolfinx::MPI::mpi_type(), slave_to_ghost); std::vector in_ghost_offsets(disp_recv_ghost_slaves.back()); MPI_Neighbor_alltoallv( out_ghost_offsets.data(), num_send_slaves.data(), disp_send_ghost_slaves.data(), dolfinx::MPI::mpi_type(), in_ghost_offsets.data(), inc_num_slaves.data(), disp_recv_ghost_slaves.data(), dolfinx::MPI::mpi_type(), slave_to_ghost); // Communicate size of communication of masters std::vector inc_num_masters(src_ranks_ghost.size() + 1); MPI_Neighbor_alltoall(out_num_masters.data(), 1, MPI_INT, inc_num_masters.data(), 1, MPI_INT, slave_to_ghost); inc_num_masters.pop_back(); out_num_masters.pop_back(); // Send and receive the masters (the proc owning the master) and the // corresponding coeffs from the processor owning the slave std::vector disp_recv_ghost_masters(src_ranks_ghost.size() + 1, 0); std::partial_sum(inc_num_masters.begin(), inc_num_masters.end(), disp_recv_ghost_masters.begin() + 1); std::vector disp_send_ghost_masters(dest_ranks_ghost.size() + 1, 0); std::partial_sum(num_send_masters.begin(), num_send_masters.end(), disp_send_ghost_masters.begin() + 1); std::vector in_ghost_masters(disp_recv_ghost_masters.back()); MPI_Neighbor_alltoallv( out_ghost_masters.data(), num_send_masters.data(), disp_send_ghost_masters.data(), dolfinx::MPI::mpi_type(), in_ghost_masters.data(), inc_num_masters.data(), disp_recv_ghost_masters.data(), dolfinx::MPI::mpi_type(), slave_to_ghost); std::vector in_ghost_coeffs(disp_recv_ghost_masters.back()); MPI_Neighbor_alltoallv(out_ghost_coeffs.data(), num_send_masters.data(), disp_send_ghost_masters.data(), dolfinx::MPI::mpi_type(), in_ghost_coeffs.data(), inc_num_masters.data(), disp_recv_ghost_masters.data(), dolfinx::MPI::mpi_type(), slave_to_ghost); std::vector in_ghost_owners(disp_recv_ghost_masters.back()); MPI_Neighbor_alltoallv( out_ghost_owners.data(), num_send_masters.data(), disp_send_ghost_masters.data(), dolfinx::MPI::mpi_type(), in_ghost_owners.data(), inc_num_masters.data(), disp_recv_ghost_masters.data(), dolfinx::MPI::mpi_type(), slave_to_ghost); // Accumulate offsets of masters from different processors std::vector ghost_offsets = {0}; for (std::size_t i = 0; i < src_ranks_ghost.size(); ++i) { const std::int32_t min = disp_recv_ghost_slaves[i]; const std::int32_t max = disp_recv_ghost_slaves[i + 1]; std::vector inc_offset; inc_offset.insert(inc_offset.end(), in_ghost_offsets.begin() + min, in_ghost_offsets.begin() + max); for (std::int32_t& offset : inc_offset) offset += *(ghost_offsets.end() - 1); ghost_offsets.insert(ghost_offsets.end(), inc_offset.begin(), inc_offset.end()); } // Flatten local slaves data std::vector slaves; slaves.reserve(tdim * local_blocks.size()); std::vector masters; masters.reserve(slaves.size()); std::vector coeffs_out; coeffs_out.reserve(slaves.size()); std::vector owners_out; owners_out.reserve(slaves.size()); std::vector offsets = {0}; offsets.reserve(slaves.size() + 1); std::ranges::for_each( local_blocks, [block_size, tdim, &masters, &local_masters, &coeffs_out, &local_coeffs, &owners_out, &local_owners, &offsets, &slaves](const std::int32_t block) { for (std::int32_t j = 0; j < tdim; ++j) { const std::int32_t slave = block * block_size + j; slaves.push_back(slave); masters.insert(masters.end(), local_masters[slave].begin(), local_masters[slave].end()); coeffs_out.insert(coeffs_out.end(), local_coeffs[slave].begin(), local_coeffs[slave].end()); offsets.push_back((std::int32_t)masters.size()); owners_out.insert(owners_out.end(), local_owners[slave].begin(), local_owners[slave].end()); } }); // Extend local data with ghost entries const std::int32_t num_loc_slaves = slaves.size(); const std::int32_t num_local_masters = masters.size(); const std::int32_t num_ghost_slaves = in_ghost_slaves.size(); const std::int32_t num_ghost_masters = in_ghost_masters.size(); masters.reserve(num_local_masters + num_ghost_masters); coeffs_out.reserve(num_local_masters + num_ghost_masters); owners_out.reserve(num_local_masters + num_ghost_masters); offsets.reserve(num_local_masters + num_ghost_masters + 1); for (std::int32_t i = 0; i < num_ghost_slaves; i++) { for (std::int32_t j = ghost_offsets[i]; j < ghost_offsets[i + 1]; j++) { masters.push_back(in_ghost_masters[j]); owners_out.push_back(in_ghost_owners[j]); coeffs_out.push_back(in_ghost_coeffs[j]); } offsets.push_back((std::int32_t)masters.size()); } // Map ghosts to local data and copy into slave array std::vector local_ghosts = map_dofs_global_to_local(V, in_ghost_slaves); slaves.resize(num_loc_slaves + num_ghost_slaves); std::ranges::copy(local_ghosts, slaves.begin() + num_loc_slaves); mpc.slaves = slaves; mpc.masters = masters; mpc.offsets = offsets; mpc.owners = owners_out; mpc.coeffs = coeffs_out; timer.stop(); return mpc; } //----------------------------------------------------------------------------- } // namespace dolfinx_mpc dolfinx_mpc-0.9.1/cpp/DOLFINX_MPCConfig.cmake.in000066400000000000000000000005341476141270300211530ustar00rootroot00000000000000# - Build details for DOLFINX_MPC: An extension to dolfinx to use multi-point constraint # @PACKAGE_INIT@ include(CMakeFindDependencyMacro) find_dependency(DOLFINX REQUIRED) find_dependency(MPI REQUIRED) if (NOT TARGET dolfinx_mpc) include("${CMAKE_CURRENT_LIST_DIR}/DOLFINX_MPCTargets.cmake") endif() check_required_components(DOLFINX_MPC) dolfinx_mpc-0.9.1/cpp/MultiPointConstraint.h000066400000000000000000000200121476141270300211270ustar00rootroot00000000000000// Copyright (C) 2019-2021 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #pragma once #include "mpc_helpers.h" #include #include #include #include #include #include #include #include namespace dolfinx_mpc { template class MultiPointConstraint { public: /// Create contact constraint /// /// @param[in] V The function space /// @param[in] slaves List of local slave dofs /// @param[in] masters Array of all masters /// @param[in] coeffs Coefficients corresponding to each master /// @param[in] owners Owners for each master /// @param[in] offsets Offsets for masters /// @tparam The floating type of the mesh MultiPointConstraint(std::shared_ptr> V, std::span slaves, std::span masters, std::span coeffs, std::span owners, std::span offsets) : _slaves(), _is_slave(), _cell_to_slaves_map(), _num_local_slaves(), _master_map(), _coeff_map(), _owner_map(), _mpc_constants(), _V() { assert(slaves.size() == offsets.size() - 1); assert(masters.size() == coeffs.size()); assert(coeffs.size() == owners.size()); assert(offsets.back() == owners.size()); // Create list indicating which dofs on the process are slaves const dolfinx::fem::DofMap& dofmap = *(V->dofmap()); const std::int32_t num_dofs_local = dofmap.index_map_bs() * (dofmap.index_map->size_local() + dofmap.index_map->num_ghosts()); std::vector _slave_data(num_dofs_local, 0); _mpc_constants = std::vector(num_dofs_local, 0); for (auto dof : slaves) { _slave_data[dof] = 1; // FIXME: Add input vector for this data _mpc_constants[dof] = 1; } _is_slave = std::move(_slave_data); // Create a map for cells owned by the process to the slaves _cell_to_slaves_map = create_cell_to_dofs_map(*V, slaves); // Create adjacency list with all local dofs, where the slave dofs maps to // its masters std::vector _num_masters(num_dofs_local); std::ranges::fill(_num_masters, 0); for (std::int32_t i = 0; i < slaves.size(); i++) _num_masters[slaves[i]] = offsets[i + 1] - offsets[i]; std::vector masters_offsets(num_dofs_local + 1); masters_offsets[0] = 0; std::inclusive_scan(_num_masters.begin(), _num_masters.end(), masters_offsets.begin() + 1); // Reuse num masters as fill position array std::ranges::fill(_num_masters, 0); std::vector _master_data(masters.size()); std::vector _coeff_data(masters.size()); std::vector _owner_data(masters.size()); /// Create adjacency lists spanning all local dofs mapping to master dofs, /// its owner and the corresponding coefficient for (std::size_t i = 0; i < slaves.size(); i++) { for (std::int32_t j = 0; j < offsets[i + 1] - offsets[i]; j++) { _master_data[masters_offsets[slaves[i]] + _num_masters[slaves[i]]] = masters[offsets[i] + j]; _coeff_data[masters_offsets[slaves[i]] + _num_masters[slaves[i]]] = coeffs[offsets[i] + j]; _owner_data[masters_offsets[slaves[i]] + _num_masters[slaves[i]]] = owners[offsets[i] + j]; _num_masters[slaves[i]]++; } } _coeff_map = std::make_shared>( _coeff_data, masters_offsets); _owner_map = std::make_shared>( _owner_data, masters_offsets); // Create a vector containing all the slave dofs (sorted) std::vector sorted_slaves(slaves.size()); std::int32_t c = 0; for (std::size_t i = 0; i < _is_slave.size(); i++) if (_is_slave[i]) sorted_slaves[c++] = i; _slaves = std::move(sorted_slaves); const std::int32_t num_local = dofmap.index_map_bs() * dofmap.index_map->size_local(); auto it = std::ranges::lower_bound(_slaves, num_local); _num_local_slaves = std::distance(_slaves.begin(), it); // Create new function space with extended index map _V = std::make_shared>( create_extended_functionspace(*V, _master_data, _owner_data)); // Map global masters to local index in extended function space std::vector masters_local = map_dofs_global_to_local(*_V, _master_data); _master_map = std::make_shared>( masters_local, masters_offsets); } //----------------------------------------------------------------------------- /// Backsubstitute slave/master constraint for a given function void backsubstitution(std::span vector) { for (auto slave : _slaves) { // Zero out intial data in slave dof vector[slave] = 0.0; // Accumulate master contributions auto masters = _master_map->links(slave); auto coeffs = _coeff_map->links(slave); assert(masters.size() == coeffs.size()); for (std::size_t k = 0; k < masters.size(); ++k) vector[slave] += coeffs[k] * vector[masters[k]]; //+ _mpc_constants[slave]; } }; /// Homogenize slave DoFs (particularly useful for nonlinear problems) void homogenize(std::span vector) const { for (auto slave : _slaves) vector[slave] = 0.0; }; /// Return map from cell to slaves contained in that cell std::shared_ptr> cell_to_slaves() const { return _cell_to_slaves_map; } /// Return map from slave to masters (local_index) std::shared_ptr> masters() const { return _master_map; } /// Return map from slave to coefficients std::shared_ptr> coefficients() const { return _coeff_map; } /// Return map from slave to masters (global index) std::shared_ptr> owners() const { return _owner_map; } /// Return of local dofs + num ghosts indicating if a dof is a slave std::span is_slave() const { return std::span(_is_slave); } /// Return the constant values for the constraint const std::vector& constant_values() const { return _mpc_constants; } /// Return an array of all slave indices (sorted and local to process) const std::vector& slaves() const { return _slaves; } /// Return number of slaves owned by process const std::int32_t num_local_slaves() const { return _num_local_slaves; } /// Return the MPC FunctionSpace std::shared_ptr> function_space() const { return _V; } private: // MPC function space std::shared_ptr> _V; // Array including all slaves (local + ghosts) std::vector _slaves; std::vector _is_slave; std::vector _mpc_constants; // Map from slave cell to index in _slaves for a given slave cell std::shared_ptr> _cell_to_slaves_map; // Number of slaves owned by the process std::int32_t _num_local_slaves; // Map from slave (local to process) to masters (local to process) std::shared_ptr> _master_map; // Map from slave (local to process)to coefficients std::shared_ptr> _coeff_map; // Map from slave( local to process) to rank of process owning master std::shared_ptr> _owner_map; }; } // namespace dolfinx_mpcdolfinx_mpc-0.9.1/cpp/PeriodicConstraint.h000066400000000000000000000753411476141270300206000ustar00rootroot00000000000000// Copyright (C) 2022 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #include "ContactConstraint.h" #include namespace impl { /// Create a periodic MPC condition given a set of slave degrees of freedom /// (blocked wrt. the input function space) /// @param[in] V The input function space (possibly a collapsed sub space) /// @param[in] relation Function relating coordinates of the slave surface to /// the master surface /// @param[in] scale Scaling of the periodic condition /// @param[in] parent_map Map from collapsed space to parent space (Identity if /// not collapsed) /// @param[in] parent_space The parent space (The same space as V if not /// collapsed) /// @returns The multi point constraint template dolfinx_mpc::mpc_data _create_periodic_condition( const dolfinx::fem::FunctionSpace& V, std::span slave_blocks, const std::function(std::span)>& relation, T scale, const std::function& parent_map, const dolfinx::fem::FunctionSpace& parent_space) { // Map a list of indices in collapsed space back to the parent space auto sub_to_parent = [&parent_map](const std::vector& sub_dofs) { std::vector parent_dofs(sub_dofs.size()); std::ranges::transform(sub_dofs, parent_dofs.begin(), [&parent_map](auto& dof) { return parent_map(dof); }); return parent_dofs; }; // Map a list of parent dofs (local to process) to its global dof auto parent_dofmap = parent_space.dofmap(); auto parent_to_global = [&parent_dofmap](const std::vector& dofs) { std::vector parent_blocks; std::vector parent_rems; parent_blocks.reserve(dofs.size()); parent_rems.reserve(dofs.size()); const auto bs = parent_dofmap->index_map_bs(); // Split parent dofs into blocks and rems std::ranges::for_each(dofs, [&parent_blocks, &parent_rems, &bs](auto dof) { std::div_t div = std::div(dof, bs); parent_blocks.push_back(div.quot); parent_rems.push_back(div.rem); }); // Map blocks to global index std::vector parents_glob(dofs.size()); parent_dofmap->index_map->local_to_global(parent_blocks, parents_glob); // Unroll block size for (std::size_t i = 0; i < dofs.size(); i++) parents_glob[i] = parents_glob[i] * bs + parent_rems[i]; return parents_glob; }; if (const std::size_t value_size = V.value_size() / V.element()->block_size(); value_size > 1) throw std::runtime_error( "Periodic conditions for vector valued spaces are not " "implemented"); // Tolerance for adding scaled basis values to MPC. Any scaled basis // value with lower absolute value than the tolerance is ignored const U tol = 500 * std::numeric_limits::epsilon(); auto mesh = V.mesh(); auto dofmap = V.dofmap(); auto imap = dofmap->index_map; const int bs = dofmap->index_map_bs(); const int size_local = imap->size_local(); /// Compute which rank (relative to neighbourhood) to send each ghost to std::span ghost_owners = imap->owners(); // Only work with local blocks std::vector local_blocks; local_blocks.reserve(slave_blocks.size()); std::ranges::for_each(slave_blocks, [&local_blocks, size_local](auto block) { if (block < size_local) local_blocks.push_back(block); }); // Create map from slave dof blocks to a cell containing them std::vector slave_cells = dolfinx_mpc::create_block_to_cell_map( *V.mesh()->topology(), *V.dofmap(), local_blocks); // Compute relation(slave_blocks) std::vector mapped_T_b(local_blocks.size() * 3); const std::array T_shape = {local_blocks.size(), 3}; { MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent, 3>> mapped_T(mapped_T_b.data(), T_shape); // Tabulate dof coordinates for each dof auto [x, x_shape] = dolfinx_mpc::tabulate_dof_coordinates( V, local_blocks, slave_cells, true); // Map all slave coordinates using the relation std::vector mapped_x = relation(x); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, 3, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent>> x_(mapped_x.data(), x_shape); for (std::size_t i = 0; i < mapped_T.extent(0); ++i) for (std::size_t j = 0; j < mapped_T.extent(1); ++j) mapped_T(i, j) = x_(j, i); } // Get mesh info const int tdim = mesh->topology()->dim(); auto cell_imap = mesh->topology()->index_map(tdim); const int num_cells_local = cell_imap->size_local(); // Create bounding-box tree over owned cells std::vector r(num_cells_local); std::iota(r.begin(), r.end(), 0); dolfinx::geometry::BoundingBoxTree tree(*mesh.get(), tdim, r, tol); auto process_tree = tree.create_global_tree(mesh->comm()); auto colliding_bbox_processes = dolfinx::geometry::compute_collisions(process_tree, mapped_T_b); std::vector local_cell_collisions = dolfinx_mpc::find_local_collisions(*mesh, tree, mapped_T_b, tol); dolfinx::common::Timer t0("~~Periodic: Local cell and eval basis"); auto [basis_values, basis_shape] = dolfinx_mpc::evaluate_basis_functions( V, mapped_T_b, local_cell_collisions); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> tabulated_basis_values(basis_values.data(), basis_shape); t0.stop(); // Create output arrays std::vector slaves; slaves.reserve(slave_blocks.size() * bs); std::vector masters; masters.reserve(slave_blocks.size() * bs); std::vector owners; owners.reserve(slave_blocks.size() * bs); // FIXME: This should really be templated // Requires changes all over the place for mpc_data std::vector coeffs; coeffs.reserve(slave_blocks.size() * bs); std::vector num_masters_per_slave; num_masters_per_slave.reserve(slave_blocks.size() * bs); // Temporary array holding global indices std::vector sub_dofs; const int rank = dolfinx::MPI::rank(mesh->comm()); // Create counter for blocks that will be sent to other processes const int num_procs = dolfinx::MPI::size(mesh->comm()); std::vector off_process_counter(num_procs, 0); for (std::size_t i = 0; i < local_cell_collisions.size(); i++) { if (const std::int32_t cell = local_cell_collisions[i]; cell != -1) { // Map local dofs on master cell to global indices auto cell_blocks = dofmap->cell_dofs(cell); // Unroll local master dofs sub_dofs.resize(cell_blocks.size() * bs); for (std::size_t j = 0; j < cell_blocks.size(); j++) for (int b = 0; b < bs; b++) sub_dofs[j * bs + b] = cell_blocks[j] * bs + b; auto parent_dofs = sub_to_parent(sub_dofs); auto global_parent_dofs = parent_to_global(parent_dofs); // Check if basis values are not zero, and add master, coeff and // owner info for each dof in the block for (int b = 0; b < bs; b++) { slaves.push_back(parent_map(local_blocks[i] * bs + b)); int num_masters = 0; for (std::size_t j = 0; j < cell_blocks.size(); j++) { const std::int32_t cell_block = cell_blocks[j]; // NOTE: Assuming 0 value size if (const T val = scale * tabulated_basis_values(i, j, 0); std::abs(val) > tol) { num_masters++; masters.push_back(global_parent_dofs[j * bs + b]); coeffs.push_back(val); // NOTE: Assuming same ownership of dofs in collapsed sub space if (cell_block < size_local) owners.push_back(rank); else owners.push_back(ghost_owners[cell_block - size_local]); } } num_masters_per_slave.push_back(num_masters); } } else { // Count number of incoming blocks from other processes auto procs = colliding_bbox_processes.links((int)i); for (auto proc : procs) { if (rank == proc) continue; off_process_counter[proc]++; } } } // Communicate s_to_m std::vector s_to_m_ranks; std::vector s_to_m_indicator(num_procs, 0); std::vector num_out_slaves; for (int i = 0; i < num_procs; i++) { if (off_process_counter[i] > 0 && i != rank) { s_to_m_indicator[i] = 1; s_to_m_ranks.push_back(i); num_out_slaves.push_back(off_process_counter[i]); } } // Push back to avoid null_ptr num_out_slaves.push_back(0); std::vector indicator(num_procs); MPI_Alltoall(s_to_m_indicator.data(), 1, MPI_INT8_T, indicator.data(), 1, MPI_INT8_T, mesh->comm()); std::vector m_to_s_ranks; for (int i = 0; i < num_procs; i++) if (indicator[i]) m_to_s_ranks.push_back(i); // Create neighborhood communicator // Slave block owners -> Process with possible masters std::vector s_to_m_weights(s_to_m_ranks.size(), 1); std::vector m_to_s_weights(m_to_s_ranks.size(), 1); auto slave_to_master = MPI_COMM_NULL; MPI_Dist_graph_create_adjacent( mesh->comm(), (int)m_to_s_ranks.size(), m_to_s_ranks.data(), m_to_s_weights.data(), (int)s_to_m_ranks.size(), s_to_m_ranks.data(), s_to_m_weights.data(), MPI_INFO_NULL, false, &slave_to_master); int indegree(-1); int outdegree(-2); int weighted(-1); MPI_Dist_graph_neighbors_count(slave_to_master, &indegree, &outdegree, &weighted); assert(indegree == (int)m_to_s_ranks.size()); assert(outdegree == (int)s_to_m_ranks.size()); // Compute number of receiving slaves std::vector num_recv_slaves(indegree + 1); MPI_Neighbor_alltoall( num_out_slaves.data(), 1, dolfinx::MPI::mpi_type(), num_recv_slaves.data(), 1, dolfinx::MPI::mpi_type(), slave_to_master); num_out_slaves.pop_back(); num_recv_slaves.pop_back(); // Prepare data structures for sending information std::vector disp_out(outdegree + 1, 0); std::partial_sum(num_out_slaves.begin(), num_out_slaves.end(), disp_out.begin() + 1); // Prepare data structures for receiving information std::vector disp_in(indegree + 1, 0); std::partial_sum(num_recv_slaves.begin(), num_recv_slaves.end(), disp_in.begin() + 1); // Array holding all dofs (index local to process) for coordinates sent // out std::vector searching_dofs(bs * disp_out.back()); // Communicate coordinates of slave blocks std::vector out_placement(outdegree, 0); std::vector coords_out(disp_out.back() * 3); for (std::size_t i = 0; i < local_cell_collisions.size(); i++) { if (local_cell_collisions[i] == -1) { auto procs = colliding_bbox_processes.links((int)i); for (auto proc : procs) { if (rank == proc) continue; // Find position in neighborhood communicator auto it = std::ranges::find(s_to_m_ranks, proc); assert(it != s_to_m_ranks.end()); auto dist = std::distance(s_to_m_ranks.begin(), it); const std::int32_t insert_location = disp_out[dist] + out_placement[dist]++; // Copy coordinates and dofs to output arrays std::ranges::copy_n(std::next(mapped_T_b.begin(), 3 * i), 3, std::next(coords_out.begin(), 3 * insert_location)); for (int b = 0; b < bs; b++) { searching_dofs[insert_location * bs + b] = parent_map(local_blocks[i] * bs + b); } } } } // Communciate coordinates with other process const std::array cr_shape = {(std::size_t)disp_in.back(), 3}; std::vector coords_recvb(cr_shape.front() * cr_shape.back()); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent, 3>> coords_recv(coords_recvb.data(), cr_shape); // Take into account that we send three values per slave auto m_3 = [](auto& num) { num *= 3; }; std::ranges::for_each(disp_out, m_3); std::ranges::for_each(num_out_slaves, m_3); std::ranges::for_each(disp_in, m_3); std::ranges::for_each(num_recv_slaves, m_3); // Communicate coordinates MPI_Neighbor_alltoallv( coords_out.data(), num_out_slaves.data(), disp_out.data(), dolfinx::MPI::mpi_type(), coords_recvb.data(), num_recv_slaves.data(), disp_in.data(), dolfinx::MPI::mpi_type(), slave_to_master); // Reset in_displacements to be per block for later usage auto d_3 = [](auto& num) { num /= 3; }; std::ranges::for_each(disp_in, d_3); // Reset out_displacments to be for every slave auto m_bs_d_3 = [bs](auto& num) { num = num * bs / 3; }; std::ranges::for_each(disp_out, m_bs_d_3); std::ranges::for_each(num_out_slaves, m_bs_d_3); // Create remote arrays std::vector masters_remote; masters_remote.reserve(coords_recvb.size()); std::vector owners_remote; owners_remote.reserve(coords_recvb.size()); std::vector coeffs_remote; coeffs_remote.reserve(coords_recvb.size()); std::vector num_masters_per_slave_remote; num_masters_per_slave_remote.reserve(bs * coords_recvb.size() / 3); std::vector remote_cell_collisions = dolfinx_mpc::find_local_collisions(*mesh, tree, coords_recvb, tol); auto [remote_basis_valuesb, r_basis_shape] = dolfinx_mpc::evaluate_basis_functions(V, coords_recvb, remote_cell_collisions); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> remote_basis_values(remote_basis_valuesb.data(), r_basis_shape); // Find remote masters and count how many to send to each process std::vector num_remote_masters(indegree, 0); std::vector num_remote_slaves(indegree); for (int i = 0; i < indegree; i++) { // Count number of masters added and number of slaves for output std::int32_t r_masters = 0; num_remote_slaves[i] = bs * (disp_in[i + 1] - disp_in[i]); for (std::int32_t j = disp_in[i]; j < disp_in[i + 1]; j++) { if (const std::int32_t cell = remote_cell_collisions[j]; cell == -1) { for (int b = 0; b < bs; b++) num_masters_per_slave_remote.push_back(0); } else { // Compute basis functions at mapped point // Check if basis values are not zero, and add master, coeff and // owner info for each dof in the block auto cell_blocks = dofmap->cell_dofs(cell); // Unroll local master dofs sub_dofs.resize(cell_blocks.size() * bs); for (std::size_t k = 0; k < cell_blocks.size(); k++) for (int b = 0; b < bs; b++) sub_dofs[k * bs + b] = cell_blocks[k] * bs + b; auto parent_dofs = sub_to_parent(sub_dofs); auto global_parent_dofs = parent_to_global(parent_dofs); for (int b = 0; b < bs; b++) { int num_masters = 0; for (std::size_t k = 0; k < cell_blocks.size(); k++) { // NOTE: Assuming value_size 0 if (const T val = scale * remote_basis_values(j, k, 0); std::abs(val) > tol) { num_masters++; masters_remote.push_back(global_parent_dofs[k * bs + b]); coeffs_remote.push_back(val); // NOTE assuming same ownership in collapsed sub as parent space if (cell_blocks[k] < size_local) owners_remote.push_back(rank); else { owners_remote.push_back( ghost_owners[cell_blocks[k] - size_local]); } } } r_masters += num_masters; num_masters_per_slave_remote.push_back(num_masters); } } } num_remote_masters[i] += r_masters; } // Create inverse communicator // Procs with possible masters -> slave block owners auto master_to_slave = MPI_COMM_NULL; MPI_Dist_graph_create_adjacent( mesh->comm(), (int)s_to_m_ranks.size(), s_to_m_ranks.data(), s_to_m_weights.data(), (int)m_to_s_ranks.size(), m_to_s_ranks.data(), m_to_s_weights.data(), MPI_INFO_NULL, false, &master_to_slave); // Send data back to owning process dolfinx_mpc::recv_data recv_data = dolfinx_mpc::send_master_data_to_owner( master_to_slave, num_remote_masters, num_remote_slaves, num_out_slaves, num_masters_per_slave_remote, masters_remote, coeffs_remote, owners_remote); // Append found slaves/master pairs dolfinx_mpc::append_master_data( recv_data, searching_dofs, slaves, masters, coeffs, owners, num_masters_per_slave, parent_space.dofmap()->index_map->size_local(), parent_space.dofmap()->index_map_bs()); // Distribute ghost data dolfinx_mpc::mpc_data ghost_data = dolfinx_mpc::distribute_ghost_data( slaves, masters, coeffs, owners, num_masters_per_slave, *parent_space.dofmap()->index_map, parent_space.dofmap()->index_map_bs()); // Add ghost data to existing arrays std::vector& ghost_slaves = ghost_data.slaves; slaves.insert(std::end(slaves), std::begin(ghost_slaves), std::end(ghost_slaves)); std::vector& ghost_masters = ghost_data.masters; masters.insert(std::end(masters), std::begin(ghost_masters), std::end(ghost_masters)); std::vector& ghost_num = ghost_data.offsets; num_masters_per_slave.insert(std::end(num_masters_per_slave), std::begin(ghost_num), std::end(ghost_num)); std::vector& ghost_coeffs = ghost_data.coeffs; coeffs.insert(std::end(coeffs), std::begin(ghost_coeffs), std::end(ghost_coeffs)); std::vector& ghost_owner_ranks = ghost_data.owners; owners.insert(std::end(owners), std::begin(ghost_owner_ranks), std::end(ghost_owner_ranks)); // Compute offsets std::vector offsets(num_masters_per_slave.size() + 1, 0); std::partial_sum(num_masters_per_slave.begin(), num_masters_per_slave.end(), offsets.begin() + 1); dolfinx_mpc::mpc_data output; output.offsets = offsets; output.masters = masters; output.coeffs = coeffs; output.owners = owners; output.slaves = slaves; return output; } /// Create a periodic MPC condition given a geometrical relation between the /// slave and master surface /// @param[in] V The input function space (possibly a sub space) /// @param[in] indicator Function marking tabulated degrees of freedom /// @param[in] relation Function relating coordinates of the slave surface to /// the master surface /// @param[in] bcs List of Dirichlet BCs on the input space /// @param[in] scale Scaling of the periodic condition /// @param[in] collapse If true, the list of marked dofs is in the collapsed /// input space /// @returns The multi point constraint template dolfinx_mpc::mpc_data geometrical_condition( const std::shared_ptr> V, const std::function( MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, 3, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent>>)>& indicator, const std::function(std::span)>& relation, const std::vector>>& bcs, T scale, bool collapse) { std::vector reduced_blocks; if (collapse) { // Locate dofs in sub and parent space std::pair, std::vector> sub_space = V->collapse(); const dolfinx::fem::FunctionSpace& V_sub = sub_space.first; const std::vector& parent_map = sub_space.second; std::array, 2> slave_blocks = dolfinx::fem::locate_dofs_geometrical({*V.get(), V_sub}, indicator); reduced_blocks.reserve(slave_blocks[0].size()); // Remove blocks in Dirichlet bcs std::vector bc_marker = dolfinx_mpc::is_bc(*V, slave_blocks[0], bcs); for (std::size_t i = 0; i < bc_marker.size(); i++) if (!bc_marker[i]) reduced_blocks.push_back(slave_blocks[1][i]); // Create sub space to parent map auto sub_map = [&parent_map](const std::int32_t& i) { return parent_map[i]; }; return _create_periodic_condition(V_sub, std::span(reduced_blocks), relation, scale, sub_map, *V); } else { std::vector slave_blocks = dolfinx::fem::locate_dofs_geometrical(*V, indicator); reduced_blocks.reserve(slave_blocks.size()); // Remove blocks in Dirichlet bcs std::vector bc_marker = dolfinx_mpc::is_bc(*V, slave_blocks, bcs); for (std::size_t i = 0; i < bc_marker.size(); i++) if (!bc_marker[i]) reduced_blocks.push_back(slave_blocks[i]); auto sub_map = [](const std::int32_t& dof) { return dof; }; return _create_periodic_condition(*V, std::span(reduced_blocks), relation, scale, sub_map, *V); } } /// Create a periodic MPC on a given set of mesh entities, mapped to the /// master surface by a relation function. /// @param[in] V The input function space (possibly a sub space) /// @param[in] meshtag Meshtag with set of entities /// @param[in] tag The value of the mesh tag entities that should bec /// considered as entities /// @param[in] relation Function relating coordinates of the slave surface to /// the master surface /// @param[in] bcs List of Dirichlet BCs on the input space /// @param[in] scale Scaling of the periodic condition /// @param[in] collapse If true, the list of marked dofs is in the collapsed /// input space /// @returns The multi point constraint template dolfinx_mpc::mpc_data topological_condition( const std::shared_ptr> V, const std::shared_ptr> meshtag, const std::int32_t tag, const std::function(std::span)>& relation, const std::vector>>& bcs, T scale, bool collapse) { std::vector entities = meshtag->find(tag); V->mesh()->topology_mutable()->create_connectivity( meshtag->dim(), V->mesh()->topology()->dim()); if (collapse) { // Locate dofs in sub and parent space std::pair, std::vector> sub_space = V->collapse(); const dolfinx::fem::FunctionSpace& V_sub = sub_space.first; const std::vector& parent_map = sub_space.second; std::array, 2> slave_blocks = dolfinx::fem::locate_dofs_topological(*V->mesh()->topology_mutable(), {*V->dofmap(), *V_sub.dofmap()}, meshtag->dim(), entities); // Remove DirichletBC dofs from sub space std::vector bc_marker = dolfinx_mpc::is_bc(*V, slave_blocks[0], bcs); std::vector reduced_blocks; for (std::size_t i = 0; i < bc_marker.size(); i++) if (!bc_marker[i]) reduced_blocks.push_back(slave_blocks[1][i]); // Create sub space to parent map const auto sub_map = [&parent_map](const std::int32_t& i) { return parent_map[i]; }; // Create mpc on sub space dolfinx_mpc::mpc_data sub_data = _create_periodic_condition( V_sub, std::span(reduced_blocks), relation, scale, sub_map, *V); return sub_data; } else { std::vector slave_blocks = dolfinx::fem::locate_dofs_topological(*V->mesh()->topology_mutable(), *V->dofmap(), meshtag->dim(), entities); const std::vector bc_marker = dolfinx_mpc::is_bc(*V, slave_blocks, bcs); std::vector reduced_blocks; for (std::size_t i = 0; i < bc_marker.size(); i++) if (!bc_marker[i]) reduced_blocks.push_back(slave_blocks[i]); // Identity map const auto sub_map = [](const std::int32_t& dof) { return dof; }; return _create_periodic_condition(*V, std::span(reduced_blocks), relation, scale, sub_map, *V); } }; } // namespace impl namespace dolfinx_mpc { mpc_data create_periodic_condition_geometrical( const std::shared_ptr> V, const std::function( MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const double, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, 3, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent>>)>& indicator, const std::function(std::span)>& relation, const std::vector>>& bcs, double scale, bool collapse) { return impl::geometrical_condition(V, indicator, relation, bcs, scale, collapse); } mpc_data> create_periodic_condition_geometrical( const std::shared_ptr> V, const std::function( MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const double, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, 3, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent>>)>& indicator, const std::function(std::span)>& relation, const std::vector< std::shared_ptr>>>& bcs, std::complex scale, bool collapse) { return impl::geometrical_condition, double>( V, indicator, relation, bcs, scale, collapse); } mpc_data create_periodic_condition_topological( const std::shared_ptr> V, const std::shared_ptr> meshtag, const std::int32_t tag, const std::function(std::span)>& relation, const std::vector>>& bcs, double scale, bool collapse) { return impl::topological_condition(V, meshtag, tag, relation, bcs, scale, collapse); } mpc_data> create_periodic_condition_topological( const std::shared_ptr> V, const std::shared_ptr> meshtag, const std::int32_t tag, const std::function(std::span)>& relation, const std::vector< std::shared_ptr>>>& bcs, std::complex scale, bool collapse) { return impl::topological_condition, double>( V, meshtag, tag, relation, bcs, scale, collapse); } mpc_data create_periodic_condition_geometrical( const std::shared_ptr> V, const std::function( MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const float, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, 3, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent>>)>& indicator, const std::function(std::span)>& relation, const std::vector>>& bcs, float scale, bool collapse) { return impl::geometrical_condition(V, indicator, relation, bcs, scale, collapse); } mpc_data> create_periodic_condition_geometrical( const std::shared_ptr> V, const std::function( MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const float, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, 3, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent>>)>& indicator, const std::function(std::span)>& relation, const std::vector< std::shared_ptr>>>& bcs, std::complex scale, bool collapse) { return impl::geometrical_condition, float>( V, indicator, relation, bcs, scale, collapse); } mpc_data create_periodic_condition_topological( const std::shared_ptr> V, const std::shared_ptr> meshtag, const std::int32_t tag, const std::function(std::span)>& relation, const std::vector>>& bcs, float scale, bool collapse) { return impl::topological_condition(V, meshtag, tag, relation, bcs, scale, collapse); } mpc_data> create_periodic_condition_topological( const std::shared_ptr> V, const std::shared_ptr> meshtag, const std::int32_t tag, const std::function(std::span)>& relation, const std::vector< std::shared_ptr>>>& bcs, std::complex scale, bool collapse) { return impl::topological_condition, float>( V, meshtag, tag, relation, bcs, scale, collapse); } } // namespace dolfinx_mpcdolfinx_mpc-0.9.1/cpp/README.md000066400000000000000000000000311476141270300160630ustar00rootroot00000000000000# C++ part of DolfinX-MPCdolfinx_mpc-0.9.1/cpp/SlipConstraint.h000066400000000000000000000142021476141270300177360ustar00rootroot00000000000000// Copyright (C) 2019-2021 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #include "ContactConstraint.h" #include #include #include namespace dolfinx_mpc { // template mpc_data create_slip_condition( std::shared_ptr>& space, const dolfinx::mesh::MeshTags& meshtags, std::int32_t marker, const dolfinx::fem::Function& v, std::vector>> bcs, const bool sub_space) { // Map from collapsed sub space to parent space std::function parent_map; // Interpolate input function into suitable space std::shared_ptr> n; if (sub_space) { std::pair, std::vector> collapsed_space = space->collapse(); auto V_ptr = std::make_shared>( std::move(collapsed_space.first)); n = std::make_shared>(V_ptr); n->interpolate(v); parent_map = [sub_map = collapsed_space.second](const std::int32_t dof) { return sub_map[dof]; }; } else { parent_map = [](const std::int32_t dof) { return dof; }; n = std::make_shared>(space); n->interpolate(v); } // Info from parent space auto W_imap = space->dofmap()->index_map; std::span W_ghost_owners = W_imap->owners(); const int W_bs = space->dofmap()->index_map_bs(); const int W_local_size = W_imap->size_local(); const std::vector slave_facets = meshtags.find(marker); auto mesh = space->mesh(); MPI_Comm comm = mesh->comm(); const int rank = dolfinx::MPI::rank(comm); // Array containing blocks of the MPC slaves std::vector slave_blocks; std::int32_t num_normal_components; // Find blocks in collapsed space and remove DirichletBC dofs if (sub_space) { // Get all degrees of freedom in the sub-space on the given facets std::array, 2> entity_dofs = dolfinx::fem::locate_dofs_topological( *space->mesh()->topology_mutable(), {*space->dofmap(), *n->function_space()->dofmap()}, meshtags.dim(), slave_facets); // Remove Dirichlet BC dofs const std::vector bc_marker = dolfinx_mpc::is_bc(*space, entity_dofs[0], bcs); num_normal_components = n->function_space()->dofmap()->index_map_bs(); slave_blocks.reserve(entity_dofs[0].size()); for (std::size_t i = 0; i < bc_marker.size(); i++) if (!bc_marker[i]) slave_blocks.push_back(entity_dofs[1][i] / num_normal_components); // Erase blocks duplicate blocks dolfinx::radix_sort(std::span(slave_blocks)); slave_blocks.erase(std::unique(slave_blocks.begin(), slave_blocks.end()), slave_blocks.end()); } else { num_normal_components = W_bs; std::vector all_slave_blocks = dolfinx::fem::locate_dofs_topological( *space->mesh()->topology_mutable(), *space->dofmap(), meshtags.dim(), slave_facets); // Remove Dirichlet BC dofs const std::vector bc_marker = dolfinx_mpc::is_bc(*space, all_slave_blocks, bcs); for (std::size_t i = 0; i < bc_marker.size(); i++) if (!bc_marker[i]) slave_blocks.push_back(all_slave_blocks[i]); } const std::span& n_vec = n->x()->array(); // Arrays holding MPC data std::vector slaves; std::vector masters; std::vector coeffs; std::vector owners; std::vector offsets(1, 0); // Temporary arrays used to hold information about masters std::vector pair_m; for (auto block : slave_blocks) { std::span normal( std::next(n_vec.begin(), block * num_normal_components), num_normal_components); // Determine slave dof by component with biggest normal vector (to avoid // issues with grids aligned with coordiante system) auto max_el = std::ranges::max_element( normal, [](T a, T b) { return std::norm(a) < std::norm(b); }); auto slave_index = std::distance(normal.begin(), max_el); assert(slave_index < num_normal_components); std::int32_t parent_slave = parent_map(block * num_normal_components + slave_index); slaves.push_back(parent_slave); std::vector parent_masters; std::vector pair_c; std::vector pair_o; for (std::int32_t i = 0; i < num_normal_components; ++i) { if (i != slave_index) { const std::int32_t parent_dof = parent_map(block * num_normal_components + i); T coeff = -normal[i] / normal[slave_index]; parent_masters.push_back(parent_dof); pair_c.push_back(coeff); const std::int32_t m_rank = parent_dof < W_local_size * W_bs ? rank : W_ghost_owners[parent_dof / W_bs - W_local_size]; pair_o.push_back(m_rank); } } // Convert local parent dof to local parent block std::vector parent_blocks = parent_masters; std::ranges::for_each(parent_blocks, [W_bs](auto& b) { b /= W_bs; }); // Map blocks from local to global pair_m.resize(parent_masters.size()); W_imap->local_to_global(parent_blocks, pair_m); // Convert global parent block to the dofs for (std::size_t i = 0; i < parent_masters.size(); ++i) { std::div_t div = std::div(parent_masters[i], W_bs); pair_m[i] = pair_m[i] * W_bs + div.rem; } masters.insert(masters.end(), pair_m.begin(), pair_m.end()); coeffs.insert(coeffs.end(), pair_c.begin(), pair_c.end()); offsets.push_back((std::int32_t)masters.size()); owners.insert(owners.end(), pair_o.begin(), pair_o.end()); } mpc_data data; data.slaves = slaves; data.masters = masters; data.offsets = offsets; data.owners = owners; data.coeffs = coeffs; return data; } } // namespace dolfinx_mpcdolfinx_mpc-0.9.1/cpp/assemble_matrix.cpp000066400000000000000000000775501476141270300205130ustar00rootroot00000000000000// Copyright (C) 2020-2022 Jorgen S. Dokken, Nathan Sime, and Connor D. Pierce // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #include "assemble_matrix.h" #include #include #include #include #include using mdspan2_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const std::int32_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; namespace { /// Given an assembled element matrix Ae, remove all entries (i,j) where both i /// and j corresponds to a slave degree of freedom /// @param[in,out] Ae_stripped The matrix Ae stripped of all other entries /// @param[in] Ae The element matrix /// @param[in] num_dofs The number of degrees of freedom in each row and column /// (blocked) /// @param[in] bs The block size for the rows and columns /// @param[in] is_slave Marker indicating if a dof (local to process) is a slave /// degree of freedom /// @param[in] dofs Map from index local to cell to index local to process for /// rows rows and columns /// @returns The matrix stripped of slave contributions template void fill_stripped_matrix( MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< T, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> Ae_stripped, MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< T, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> Ae, const std::array& num_dofs, const std::array& bs, const std::array, 2>& is_slave, const std::array, 2>& dofs) { const auto& [row_bs, col_bs] = bs; const auto& [num_row_dofs, num_col_dofs] = num_dofs; const auto& [row_dofs, col_dofs] = dofs; const auto& [slave_rows, slave_cols] = is_slave; assert(Ae_stripped.extent(0) == Ae.extent(0)); assert(Ae_stripped.extent(1) == Ae.extent(1)); // Strip Ae of all entries where both i and j are slaves bool slave_row; bool slave_col; for (std::uint32_t i = 0; i < num_row_dofs; i++) { const int row_block = row_dofs[i] * row_bs; for (int row = 0; row < row_bs; row++) { slave_row = slave_rows[row_block + row]; const int l_row = i * row_bs + row; for (std::uint32_t j = 0; j < num_col_dofs; j++) { const int col_block = col_dofs[j] * col_bs; for (int col = 0; col < col_bs; col++) { slave_col = slave_cols[col_block + col]; const int l_col = j * col_bs + col; Ae_stripped(l_row, l_col) = (slave_row && slave_col) ? T(0.0) : Ae(l_row, l_col); } } } } }; /// Modify local element matrix Ae with MPC contributions, and insert non-local /// contributions in the correct places /// /// @param[in] mat_set Function that sets a local matrix into specified /// positions of the global matrix A /// @param[in] num_dofs The number of degrees of freedom in each row and column /// (blocked) /// @param[in, out] Ae The local element matrix /// @param[in] dofs The local indices of the row and column dofs (blocked) /// @param[in] bs The row and column block size /// @param[in] slaves The row and column slave indices (local to process) /// @param[in] masters Row and column map from the slave indices (local to /// process) to the master dofs (local to process) /// @param[in] coefficients row and column map from the slave indices (local to /// process) to the corresponding coefficients /// @param[in] is_slave Marker indicating if a dof (local to process) is a slave /// dof /// @param[in] scratch_memory Memory used in computations of additional element /// matrices and rows. Should be at least 2 * num_rows(Ae) * num_cols(Ae) + /// num_cols(Ae) + num_rows(Ae) template void modify_mpc_cell( const std::function, std::span, std::span)>& mat_set, const std::array& num_dofs, MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< T, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> Ae, const std::array, 2>& dofs, const std::array& bs, const std::array, 2>& slaves, const std::array< std::shared_ptr>, 2>& masters, const std::array>, 2>& coeffs, const std::array, 2>& is_slave, std::span scratch_memory) { std::array num_flattened_masters = {0, 0}; std::array, 2> local_index; for (int axis = 0; axis < 2; ++axis) { // NOTE: Should this be moved into the MPC constructor? // Locate which local dofs are slave dofs and compute the local index of the // slave local_index[axis] = dolfinx_mpc::compute_local_slave_index( slaves[axis], num_dofs[axis], bs[axis], dofs[axis], is_slave[axis]); // Count number of masters in flattened structure for the rows and columns for (std::uint32_t i = 0; i < num_dofs[axis]; i++) { for (int j = 0; j < bs[axis]; j++) { const std::int32_t dof = dofs[axis][i] * bs[axis] + j; if (is_slave[axis][dof]) num_flattened_masters[axis] += masters[axis]->links(dof).size(); } } } const int ndim0 = bs[0] * num_dofs[0]; const int ndim1 = bs[1] * num_dofs[1]; assert(scratch_memory.size() >= std::size_t(2 * ndim0 * ndim1 + ndim0 + ndim1)); std::ranges::fill(scratch_memory, T(0)); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< T, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> Ae_original(scratch_memory.data(), ndim0, ndim1); // Copy Ae into new matrix for distirbution of master dofs std::ranges::copy_n(Ae.data_handle(), ndim0 * ndim1, Ae_original.data_handle()); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< T, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> Ae_stripped(std::next(scratch_memory.data(), ndim0 * ndim1), ndim0, ndim1); // Build matrix where all slave-slave entries are 0 for usage to row and // column addition fill_stripped_matrix(Ae_stripped, Ae, num_dofs, bs, is_slave, dofs); // Zero out slave entries in element matrix // Zero slave row std::ranges::for_each(local_index[0], [&Ae, ndim1](const auto dof) { std::ranges::fill_n( std::next(Ae.data_handle(), ndim1 * dof), ndim1, 0.0); }); // Zero slave column std::ranges::for_each(local_index[1], [&Ae, ndim0](const auto dof) { for (int row = 0; row < ndim0; ++row) Ae(row, dof) = 0.0; }); // Flatten slaves, masters and coeffs for efficient // modification of the matrices std::array, 2> flattened_masters; std::array, 2> flattened_slaves; std::array, 2> flattened_coeffs; for (std::int8_t axis = 0; axis < 2; axis++) { flattened_masters[axis].reserve(num_flattened_masters[axis]); flattened_slaves[axis].reserve(num_flattened_masters[axis]); flattened_coeffs[axis].reserve(num_flattened_masters[axis]); for (std::size_t i = 0; i < slaves[axis].size(); i++) { auto _masters = masters[axis]->links(slaves[axis][i]); auto _coeffs = coeffs[axis]->links(slaves[axis][i]); for (std::size_t j = 0; j < _masters.size(); j++) { flattened_slaves[axis].push_back(local_index[axis][i]); flattened_masters[axis].push_back(_masters[j]); flattened_coeffs[axis].push_back(_coeffs[j]); } } } for (std::int8_t axis = 0; axis < 2; ++axis) assert(num_flattened_masters[axis] == flattened_masters[axis].size()); // Data structures used for insertion of master contributions std::array row; std::array col; std::array A0; auto Arow = scratch_memory.subspan(2 * ndim0 * ndim1, ndim0); auto Acol = scratch_memory.subspan(2 * ndim0 * ndim1 + ndim0, ndim1); // Loop over all masters for the MPC applied to rows. // Insert contributions in columns std::vector unrolled_dofs(ndim1); for (std::size_t i = 0; i < num_flattened_masters[0]; ++i) { // Use the standard transpose for type double, Hermitian transpose // for type std::complex. Do this outside the j-loop so // std::conj() is only computed once per entry in flattened_masters. T coeff_i; if constexpr (std::is_scalar_v) coeff_i = flattened_coeffs[0][i]; else coeff_i = std::conj(flattened_coeffs[0][i]); // Unroll dof blocks and add column contribution for (std::uint32_t j = 0; j < num_dofs[1]; ++j) for (int k = 0; k < bs[1]; ++k) { Acol[j * bs[1] + k] = coeff_i * Ae_stripped(flattened_slaves[0][i], j * bs[1] + k); unrolled_dofs[j * bs[1] + k] = dofs[1][j] * bs[1] + k; } // Insert modified entries row[0] = flattened_masters[0][i]; mat_set(row, unrolled_dofs, Acol); // Loop through other masters on the same cell and add in contribution for (std::size_t j = 0; j < num_flattened_masters[1]; ++j) { col[0] = flattened_masters[1][j]; A0[0] = coeff_i * flattened_coeffs[1][j] * Ae_original(flattened_slaves[0][i], flattened_slaves[1][j]); mat_set(row, col, A0); } } // Loop over all masters for the MPC applied to columns. // Insert contributions in rows unrolled_dofs.resize(ndim0); for (std::size_t i = 0; i < num_flattened_masters[1]; ++i) { // Unroll dof blocks and compute row contribution for (std::uint32_t j = 0; j < num_dofs[0]; ++j) for (int k = 0; k < bs[0]; ++k) { Arow[j * bs[0] + k] = flattened_coeffs[1][i] * Ae_stripped(j * bs[0] + k, flattened_slaves[1][i]); unrolled_dofs[j * bs[0] + k] = dofs[0][j] * bs[0] + k; } // Insert modified entries col[0] = flattened_masters[1][i]; mat_set(unrolled_dofs, col, Arow); } } // namespace //----------------------------------------------------------------------------- template void assemble_exterior_facets( const std::function, std::span, const std::span)>& mat_add_block_values, const std::function, std::span, const std::span)>& mat_add_values, const dolfinx::mesh::Mesh& mesh, std::span facets, const std::function&, const std::span&, std::int32_t, int)>& apply_dof_transformation, const dolfinx::fem::DofMap& dofmap0, const std::function< void(const std::span&, const std::span&, std::int32_t, int)>& apply_dof_transformation_to_transpose, const dolfinx::fem::DofMap& dofmap1, const std::vector& bc0, const std::vector& bc1, const std::function& kernel, const std::span coeffs, int cstride, const std::vector& constants, const std::span& cell_info, const std::shared_ptr>& mpc0, const std::shared_ptr>& mpc1) { // Get MPC data const std::array< std::shared_ptr>, 2> masters = {mpc0->masters(), mpc1->masters()}; const std::array>, 2> coefficients = {mpc0->coefficients(), mpc1->coefficients()}; const std::array, 2> is_slave = {mpc0->is_slave(), mpc1->is_slave()}; const std::array< std::shared_ptr>, 2> cell_to_slaves = {mpc0->cell_to_slaves(), mpc1->cell_to_slaves()}; // Get mesh data MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const std::int32_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> x_dofmap = mesh.geometry().dofmap(); const int num_dofs_g = x_dofmap.extent(1); std::span x_g = mesh.geometry().x(); // Iterate over all facets std::vector coordinate_dofs(3 * num_dofs_g); const auto num_dofs0 = (std::uint32_t)dofmap0.map().extent(1); const auto num_dofs1 = (std::uint32_t)dofmap1.map().extent(1); int bs0 = dofmap0.bs(); int bs1 = dofmap1.bs(); const std::uint32_t ndim0 = bs0 * num_dofs0; const std::uint32_t ndim1 = bs1 * num_dofs1; const std::array num_dofs = {num_dofs0, num_dofs1}; const std::array bs = {bs0, bs1}; std::vector Aeb(ndim0 * ndim1); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< T, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> Ae(Aeb.data(), ndim0, ndim1); const std::span _Ae(Aeb); std::vector scratch_memory(2 * ndim0 * ndim1 + ndim0 + ndim1); for (std::size_t l = 0; l < facets.size(); l += 2) { const std::int32_t cell = facets[l]; const int local_facet = facets[l + 1]; // Get cell vertex coordinates auto x_dofs = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( x_dofmap, cell, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); for (std::size_t i = 0; i < x_dofs.size(); ++i) { std::ranges::copy_n(std::next(x_g.begin(), 3 * x_dofs[i]), 3, std::next(coordinate_dofs.begin(), 3 * i)); } // Tabulate tensor std::ranges::fill(Aeb, 0); kernel(Aeb.data(), coeffs.data() + l / 2 * cstride, constants.data(), coordinate_dofs.data(), &local_facet, nullptr); apply_dof_transformation(_Ae, cell_info, cell, ndim1); apply_dof_transformation_to_transpose(_Ae, cell_info, cell, ndim0); // Zero rows/columns for essential bcs auto dmap0 = dofmap0.cell_dofs(cell); auto dmap1 = dofmap1.cell_dofs(cell); if (!bc0.empty()) { for (std::uint32_t i = 0; i < num_dofs0; ++i) { for (int k = 0; k < bs0; ++k) { if (bc0[bs0 * dmap0[i] + k]) { // Zero row bs0 * i + k const int row = bs0 * i + k; std::ranges::fill_n(std::next(Ae.data_handle(), ndim1 * row), ndim1, 0.0); } } } } if (!bc1.empty()) { for (std::size_t j = 0; j < num_dofs1; ++j) { for (int k = 0; k < bs1; ++k) { if (bc1[bs1 * dmap1[j] + k]) { // Zero column bs1 * j + k const int col = bs1 * j + k; for (std::uint32_t row = 0; row < ndim0; ++row) Aeb[row * ndim1 + col] = 0.0; } } } } // Modify local element matrix Ae and insert contributions into master // locations if ((cell_to_slaves[0]->num_links(cell) > 0) || (cell_to_slaves[1]->num_links(cell) > 0)) { const std::array, 2> slaves = {cell_to_slaves[0]->links(cell), cell_to_slaves[1]->links(cell)}; const std::array, 2> dofs = {dmap0, dmap1}; modify_mpc_cell(mat_add_values, num_dofs, Ae, dofs, bs, slaves, masters, coefficients, is_slave, scratch_memory); } mat_add_block_values(dmap0, dmap1, Aeb); } } // namespace //----------------------------------------------------------------------------- template void assemble_cells_impl( const std::function, std::span, const std::span)>& mat_add_block_values, const std::function, std::span, const std::span)>& mat_add_values, const dolfinx::mesh::Geometry& geometry, std::span active_cells, std::function, const std::span, const std::int32_t, const int)> apply_dof_transformation, const dolfinx::fem::DofMap& dofmap0, std::function, const std::span, const std::int32_t, const int)> apply_dof_transformation_to_transpose, const dolfinx::fem::DofMap& dofmap1, const std::vector& bc0, const std::vector& bc1, const std::function& kernel, const std::span& coeffs, int cstride, const std::vector& constants, const std::span& cell_info, const std::shared_ptr>& mpc0, const std::shared_ptr>& mpc1) { // Get MPC data const std::array< std::shared_ptr>, 2> masters = {mpc0->masters(), mpc1->masters()}; const std::array>, 2> coefficients = {mpc0->coefficients(), mpc1->coefficients()}; const std::array, 2> is_slave = {mpc0->is_slave(), mpc1->is_slave()}; const std::array< const std::shared_ptr>, 2> cell_to_slaves = {mpc0->cell_to_slaves(), mpc1->cell_to_slaves()}; // Prepare cell geometry MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const std::int32_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> x_dofmap = geometry.dofmap(); const std::size_t num_dofs_g = x_dofmap.extent(1); std::span x_g = geometry.x(); // Iterate over active cells std::vector coordinate_dofs(3 * num_dofs_g); const auto num_dofs0 = (std::uint32_t)dofmap0.map().extent(1); const auto num_dofs1 = (std::uint32_t)dofmap1.map().extent(1); const std::array bs = {dofmap0.bs(), dofmap1.bs()}; const std::uint32_t ndim0 = num_dofs0 * bs.front(); const std::uint32_t ndim1 = num_dofs1 * bs.back(); const std::array num_dofs = {num_dofs0, num_dofs1}; std::vector Aeb(ndim0 * ndim1); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< T, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> Ae(Aeb.data(), ndim0, ndim1); const std::span _Ae(Aeb); std::vector scratch_memory(2 * ndim0 * ndim1 + ndim0 + ndim1); for (std::size_t c = 0; c < active_cells.size(); c++) { const std::int32_t cell = active_cells[c]; // Get cell coordinates/geometry auto x_dofs = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( x_dofmap, cell, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); for (std::size_t i = 0; i < x_dofs.size(); ++i) { std::ranges::copy_n(std::next(x_g.begin(), 3 * x_dofs[i]), 3, std::next(coordinate_dofs.begin(), 3 * i)); } // Tabulate tensor std::ranges::fill(Aeb, 0); kernel(Aeb.data(), coeffs.data() + c * cstride, constants.data(), coordinate_dofs.data(), nullptr, nullptr); apply_dof_transformation(_Ae, cell_info, cell, ndim1); apply_dof_transformation_to_transpose(_Ae, cell_info, cell, ndim0); // Zero rows/columns for essential bcs std::span dofs0 = dofmap0.cell_dofs(cell); std::span dofs1 = dofmap1.cell_dofs(cell); if (!bc0.empty()) { for (std::uint32_t i = 0; i < num_dofs0; ++i) { for (std::int32_t k = 0; k < bs.front(); ++k) { if (bc0[bs.front() * dofs0[i] + k]) std::ranges::fill_n( std::next(Aeb.begin(), ndim1 * (bs.front() * i + k)), ndim1, T(0)); } } } if (!bc1.empty()) { for (std::uint32_t j = 0; j < num_dofs1; ++j) for (std::int32_t k = 0; k < bs.back(); ++k) if (bc1[bs.back() * dofs1[j] + k]) for (std::size_t l = 0; l < ndim0; ++l) Aeb[l * ndim1 + bs.back() * j + k] = 0; } // Modify local element matrix Ae and insert contributions into master // locations if ((cell_to_slaves[0]->num_links(cell) > 0) || (cell_to_slaves[1]->num_links(cell) > 0)) { const std::array, 2> slaves = {cell_to_slaves[0]->links(cell), cell_to_slaves[1]->links(cell)}; const std::array, 2> dofs = {dofs0, dofs1}; modify_mpc_cell(mat_add_values, num_dofs, Ae, dofs, bs, slaves, masters, coefficients, is_slave, scratch_memory); } mat_add_block_values(dofs0, dofs1, _Ae); } } //----------------------------------------------------------------------------- template void assemble_matrix_impl( const std::function, std::span, const std::span)>& mat_add_block_values, const std::function, std::span, const std::span)>& mat_add_values, const dolfinx::fem::Form& a, const std::vector& bc0, const std::vector& bc1, const std::shared_ptr>& mpc0, const std::shared_ptr>& mpc1) { auto mesh = a.mesh(); assert(mesh); // Get dofmap data std::shared_ptr dofmap0 = a.function_spaces().at(0)->dofmap(); std::shared_ptr dofmap1 = a.function_spaces().at(1)->dofmap(); assert(dofmap0); assert(dofmap1); // Prepare constants const std::vector constants = pack_constants(a); // Prepare coefficients auto coeff_vec = dolfinx::fem::allocate_coefficient_storage(a); dolfinx::fem::pack_coefficients(a, coeff_vec); auto coefficients = dolfinx::fem::make_coefficients_span(coeff_vec); auto element0 = a.function_spaces().at(0)->element(); auto element1 = a.function_spaces().at(1)->element(); std::function, const std::span, const std::int32_t, const int)> apply_dof_transformation = element0->template dof_transformation_fn( dolfinx::fem::doftransform::standard); std::function, const std::span, const std::int32_t, const int)> apply_dof_transformation_to_transpose = element1->template dof_transformation_right_fn( dolfinx::fem::doftransform::transpose); const bool needs_transformation_data = element0->needs_dof_transformations() or element1->needs_dof_transformations() or a.needs_facet_permutations(); std::span cell_info; if (needs_transformation_data) { mesh->topology_mutable()->create_entity_permutations(); cell_info = std::span(mesh->topology()->get_cell_permutation_info()); } for (int i : a.integral_ids(dolfinx::fem::IntegralType::cell)) { const auto& fn = a.kernel(dolfinx::fem::IntegralType::cell, i); const auto& [coeffs, cstride] = coefficients.at({dolfinx::fem::IntegralType::cell, i}); std::span active_cells = a.domain(dolfinx::fem::IntegralType::cell, i); assemble_cells_impl( mat_add_block_values, mat_add_values, mesh->geometry(), active_cells, apply_dof_transformation, *dofmap0, apply_dof_transformation_to_transpose, *dofmap1, bc0, bc1, fn, coeffs, cstride, constants, cell_info, mpc0, mpc1); } for (int i : a.integral_ids(dolfinx::fem::IntegralType::exterior_facet)) { const auto& fn = a.kernel(dolfinx::fem::IntegralType::exterior_facet, i); const auto& [coeffs, cstride] = coefficients.at({dolfinx::fem::IntegralType::exterior_facet, i}); std::span facets = a.domain(dolfinx::fem::IntegralType::exterior_facet, i); assemble_exterior_facets(mat_add_block_values, mat_add_values, *mesh, facets, apply_dof_transformation, *dofmap0, apply_dof_transformation_to_transpose, *dofmap1, bc0, bc1, fn, coeffs, cstride, constants, cell_info, mpc0, mpc1); } // if (a.num_integrals(dolfinx::fem::IntegralType::interior_facet) > 0) // { // throw std::runtime_error("Not implemented yet"); // // const int tdim = mesh->topology()->dim(); // // mesh->topology_mutable().create_connectivity(tdim - 1, tdim); // // mesh->topology_mutable().create_entity_permutations(); // // std::function get_perm; // // if (a.needs_facet_permutations()) // // { // // mesh->topology_mutable().create_entity_permutations(); // // const std::vector& perms // // = mesh->topology()->get_facet_permutations(); // // get_perm = [&perms](std::size_t i) { return perms[i]; }; // // } // // else // // get_perm = [](std::size_t) { return 0; }; // } } //----------------------------------------------------------------------------- template void _assemble_matrix( const std::function, std::span, const std::span&)>& mat_add_block, const std::function, std::span, const std::span&)>& mat_add, const dolfinx::fem::Form& a, const std::shared_ptr>& mpc0, const std::shared_ptr>& mpc1, const std::vector>>& bcs, const T diagval) { dolfinx::common::Timer timer("~MPC: Assemble matrix (C++)"); // Index maps for dof ranges std::shared_ptr map0 = a.function_spaces().at(0)->dofmap()->index_map; std::shared_ptr map1 = a.function_spaces().at(1)->dofmap()->index_map; int bs0 = a.function_spaces().at(0)->dofmap()->index_map_bs(); int bs1 = a.function_spaces().at(1)->dofmap()->index_map_bs(); // Build dof markers std::vector dof_marker0, dof_marker1; std::int32_t dim0 = bs0 * (map0->size_local() + map0->num_ghosts()); std::int32_t dim1 = bs1 * (map1->size_local() + map1->num_ghosts()); for (std::size_t k = 0; k < bcs.size(); ++k) { assert(bcs[k]); assert(bcs[k]->function_space()); if (a.function_spaces().at(0)->contains(*bcs[k]->function_space())) { dof_marker0.resize(dim0, false); bcs[k]->mark_dofs(dof_marker0); } if (a.function_spaces().at(1)->contains(*bcs[k]->function_space())) { dof_marker1.resize(dim1, false); bcs[k]->mark_dofs(dof_marker1); } } // Assemble assemble_matrix_impl(mat_add_block, mat_add, a, dof_marker0, dof_marker1, mpc0, mpc1); // Add diagval on diagonal for slave dofs if (mpc0->function_space() == mpc1->function_space()) { const std::vector& slaves = mpc0->slaves(); const std::int32_t num_local_slaves = mpc0->num_local_slaves(); std::vector diag_dof(1); std::vector diag_value(1); diag_value[0] = diagval; for (std::int32_t i = 0; i < num_local_slaves; ++i) { diag_dof[0] = slaves[i]; mat_add(diag_dof, diag_dof, diag_value); } } timer.stop(); } } // namespace //----------------------------------------------------------------------------- void dolfinx_mpc::assemble_matrix( const std::function, std::span, const std::span&)>& mat_add_block, const std::function, std::span, const std::span&)>& mat_add, const dolfinx::fem::Form& a, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc0, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc1, const std::vector>>& bcs, const double diagval) { _assemble_matrix(mat_add_block, mat_add, a, mpc0, mpc1, bcs, diagval); } //----------------------------------------------------------------------------- void dolfinx_mpc::assemble_matrix( const std::function< int(std::span, std::span, const std::span>&)>& mat_add_block, const std::function< int(std::span, std::span, const std::span>&)>& mat_add, const dolfinx::fem::Form>& a, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, double>>& mpc0, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, double>>& mpc1, const std::vector< std::shared_ptr>>>& bcs, const std::complex diagval) { _assemble_matrix(mat_add_block, mat_add, a, mpc0, mpc1, bcs, diagval); } //----------------------------------------------------------------------------- void dolfinx_mpc::assemble_matrix( const std::function, std::span, const std::span&)>& mat_add_block, const std::function, std::span, const std::span&)>& mat_add, const dolfinx::fem::Form& a, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc0, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc1, const std::vector>>& bcs, const float diagval) { _assemble_matrix(mat_add_block, mat_add, a, mpc0, mpc1, bcs, diagval); } //----------------------------------------------------------------------------- void dolfinx_mpc::assemble_matrix( const std::function< int(std::span, std::span, const std::span>&)>& mat_add_block, const std::function< int(std::span, std::span, const std::span>&)>& mat_add, const dolfinx::fem::Form>& a, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, float>>& mpc0, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, float>>& mpc1, const std::vector< std::shared_ptr>>>& bcs, const std::complex diagval) { _assemble_matrix(mat_add_block, mat_add, a, mpc0, mpc1, bcs, diagval); } dolfinx_mpc-0.9.1/cpp/assemble_matrix.h000066400000000000000000000131071476141270300201440ustar00rootroot00000000000000// Copyright (C) 2020-2021 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #pragma once #include "MultiPointConstraint.h" #include #include #include namespace dolfinx_mpc { template class MultiPointConstraint; /// Assemble bilinear form into a matrix /// @param[in] mat_add_block The function for adding block values into the /// matrix /// @param[in] mat_add The function for adding values into the matrix /// @param[in] a The bilinear from to assemble /// @param[in] bcs Boundary conditions to apply. For boundary condition /// dofs the row and column are zeroed. The diagonal entry is not set. /// @param[in] diagval Value to set on diagonal of matrix for slave dofs and /// Dirichlet BC (default=1) void assemble_matrix( const std::function, std::span, const std::span&)>& mat_add_block, const std::function, std::span, const std::span&)>& mat_add, const dolfinx::fem::Form& a, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc0, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc1, const std::vector>>& bcs, const double diagval = 1.0); //----------------------------------------------------------------------------- /// Assemble bilinear form into a matrix /// @param[in] mat_add_block The function for adding block values into the /// matrix /// @param[in] mat_add The function for adding values into the matrix /// @param[in] a The bilinear from to assemble /// @param[in] bcs Boundary conditions to apply. For boundary condition /// dofs the row and column are zeroed. The diagonal entry is not set. /// @param[in] diagval Value to set on diagonal of matrix for slave dofs and /// Dirichlet BC (default=1) void assemble_matrix( const std::function< int(std::span, std::span, const std::span>&)>& mat_add_block, const std::function< int(std::span, std::span, const std::span>&)>& mat_add, const dolfinx::fem::Form>& a, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, double>>& mpc0, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, double>>& mpc1, const std::vector< std::shared_ptr>>>& bcs, const std::complex diagval = 1.0); /// Assemble bilinear form into a matrix /// @param[in] mat_add_block The function for adding block values into the /// matrix /// @param[in] mat_add The function for adding values into the matrix /// @param[in] a The bilinear from to assemble /// @param[in] bcs Boundary conditions to apply. For boundary condition /// dofs the row and column are zeroed. The diagonal entry is not set. /// @param[in] diagval Value to set on diagonal of matrix for slave dofs and /// Dirichlet BC (default=1) void assemble_matrix( const std::function, std::span, const std::span&)>& mat_add_block, const std::function, std::span, const std::span&)>& mat_add, const dolfinx::fem::Form& a, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc0, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc1, const std::vector>>& bcs, const float diagval = 1.0); //----------------------------------------------------------------------------- /// Assemble bilinear form into a matrix /// @param[in] mat_add_block The function for adding block values into the /// matrix /// @param[in] mat_add The function for adding values into the matrix /// @param[in] a The bilinear from to assemble /// @param[in] bcs Boundary conditions to apply. For boundary condition /// dofs the row and column are zeroed. The diagonal entry is not set. /// @param[in] diagval Value to set on diagonal of matrix for slave dofs and /// Dirichlet BC (default=1) void assemble_matrix( const std::function< int(std::span, std::span, const std::span>&)>& mat_add_block, const std::function< int(std::span, std::span, const std::span>&)>& mat_add, const dolfinx::fem::Form>& a, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, float>>& mpc0, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, float>>& mpc1, const std::vector< std::shared_ptr>>>& bcs, const std::complex diagval = 1.0); } // namespace dolfinx_mpcdolfinx_mpc-0.9.1/cpp/assemble_utils.cpp000066400000000000000000000016201476141270300203300ustar00rootroot00000000000000// Copyright (C) 2022 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #include "assemble_utils.h" #include #include std::vector dolfinx_mpc::compute_local_slave_index( std::span slaves, const std::uint32_t num_dofs, const int bs, std::span cell_dofs, std::span is_slave) { std::vector local_index(slaves.size()); for (std::uint32_t i = 0; i < num_dofs; i++) for (int j = 0; j < bs; j++) { const std::int32_t dof = cell_dofs[i] * bs + j; assert((std::uint32_t)dof < is_slave.size()); if (is_slave[dof]) { auto it = std::ranges::find(slaves, dof); const auto slave_index = std::distance(slaves.begin(), it); local_index[slave_index] = i * bs + j; } } return local_index; };dolfinx_mpc-0.9.1/cpp/assemble_utils.h000066400000000000000000000017001476141270300177740ustar00rootroot00000000000000// Copyright (C) 2022 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #pragma once #include #include #include namespace dolfinx_mpc { /// For a set of unrolled dofs (slaves) compute the index (local to the cell /// dofs) /// @param[in] slaves List of unrolled dofs /// @param[in] num_dofs Number of dofs (blocked) /// @param[in] bs The block size /// @param[in] cell_dofs The cell dofs (blocked) /// @param[in] is_slave Array indicating if any dof (unrolled, local to process) /// is a slave /// @returns Map from position in slaves array to dof local to the cell std::vector compute_local_slave_index(std::span slaves, const std::uint32_t num_dofs, const int bs, std::span cell_dofs, std::span is_slave); } // namespace dolfinx_mpc dolfinx_mpc-0.9.1/cpp/assemble_vector.cpp000066400000000000000000000257321476141270300205040ustar00rootroot00000000000000// Copyright (C) 2021 Jorgen S. Dokken & Nathan Sime // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #include "assemble_vector.h" #include #include #include #include #include using mdspan2_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const std::int32_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; namespace { /// Assemble an integration kernel over a set of active entities, described /// through into vector of type T, and apply the multipoint constraint /// @param[in, out] b The vector to assemble into /// @param[in] active_entities The set of active entities. /// @param[in] dofmap The dofmap /// @param[in] mpc The multipoint constraint /// @param[in] fetch_cells Function that fetches the cell index for an entity /// in active_entities /// @param[in] assemble_local_element_matrix Function f(be, index) that /// assembles into a local element matrix for a given entity /// @tparam T Scalar type for vector /// @tparam e stride Stride for each entity in active_entities template void _assemble_entities_impl( std::span b, std::span active_entities, const dolfinx::fem::DofMap& dofmap, const std::shared_ptr>& mpc, const std::function)> fetch_cells, const std::function, std::span, std::size_t)> assemble_local_element_vector) { // Get MPC data const std::shared_ptr> masters = mpc->masters(); const std::shared_ptr> coefficients = mpc->coefficients(); std::span is_slave = mpc->is_slave(); const std::shared_ptr> cell_to_slaves = mpc->cell_to_slaves(); // NOTE: Assertion that all links have the same size (no P refinement) const std::size_t num_dofs = dofmap.map().extent(1); int bs = dofmap.bs(); std::vector be(bs * num_dofs); const std::span _be(be); std::vector be_copy(bs * num_dofs); const std::span _be_copy(be_copy); // Assemble over all entities for (std::size_t e = 0; e < active_entities.size(); e += estride) { std::span entity = active_entities.subspan(e, estride); // Assemble into element vector assemble_local_element_vector(_be, entity, e / estride); const std::int32_t cell = fetch_cells(entity); auto dofs = dofmap.cell_dofs(cell); // Modify local element matrix if entity is connected to a slave cell std::span slaves = cell_to_slaves->links(cell); if (!slaves.empty()) { // Modify element vector for MPC and insert into b for non-local // contributions std::ranges::copy(be, be_copy.begin()); dolfinx_mpc::modify_mpc_vec(b, _be, _be_copy, dofs, num_dofs, bs, is_slave, slaves, masters, coefficients); } // Add local contribution to b for (std::size_t i = 0; i < num_dofs; ++i) for (int k = 0; k < bs; ++k) b[bs * dofs[i] + k] += be[bs * i + k]; } } template void _assemble_vector( std::span b, const dolfinx::fem::Form& L, const std::shared_ptr>& mpc) { const auto mesh = L.mesh(); assert(mesh); // Get dofmap data std::shared_ptr dofmap = L.function_spaces().at(0)->dofmap(); assert(dofmap); // Prepare constants & coefficients const std::vector constants = pack_constants(L); auto coeff_vec = dolfinx::fem::allocate_coefficient_storage(L); dolfinx::fem::pack_coefficients(L, coeff_vec); auto coefficients = dolfinx::fem::make_coefficients_span(coeff_vec); // Prepare cell geometry MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const std::int32_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> x_dofmap = mesh->geometry().dofmap(); std::span x_g = mesh->geometry().x(); // Prepare dof tranformation data auto element = L.function_spaces().at(0)->element(); const std::function&, const std::span&, std::int32_t, int)> dof_transform = element->template dof_transformation_fn( dolfinx::fem::doftransform::standard); const bool needs_transformation_data = element->needs_dof_transformations() or L.needs_facet_permutations(); std::span cell_info; if (needs_transformation_data) { mesh->topology_mutable()->create_entity_permutations(); cell_info = std::span(mesh->topology()->get_cell_permutation_info()); } const std::size_t num_dofs_g = x_dofmap.extent(1); std::vector coordinate_dofs(3 * num_dofs_g); if (L.num_integrals(dolfinx::fem::IntegralType::cell) > 0) { const auto fetch_cell = [&](std::span entity) { return entity.front(); }; for (int i : L.integral_ids(dolfinx::fem::IntegralType::cell)) { const auto& coeffs = coefficients.at({dolfinx::fem::IntegralType::cell, i}); const auto& fn = L.kernel(dolfinx::fem::IntegralType::cell, i); /// Assemble local cell kernels into a vector /// @param[in] be The local element vector /// @param[in] cell The cell index /// @param[in] index The index of the cell in the active_cells (To fetch /// the appropriate coefficients) const auto assemble_local_cell_vector = [&](std::span be, std::span entity, std::int32_t index) { auto cell = entity.front(); // Fetch the coordinates of the cell auto x_dofs = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( x_dofmap, cell, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); for (std::size_t i = 0; i < x_dofs.size(); ++i) { std::ranges::copy_n(std::next(x_g.begin(), 3 * x_dofs[i]), 3, std::next(coordinate_dofs.begin(), 3 * i)); } // Tabulate tensor std::ranges::fill(be, 0); fn(be.data(), coeffs.first.data() + index * coeffs.second, constants.data(), coordinate_dofs.data(), nullptr, nullptr); // Apply any required transformations dof_transform(be, cell_info, cell, 1); }; // Assemble over all active cells std::span active_cells = L.domain(dolfinx::fem::IntegralType::cell, i); _assemble_entities_impl(b, active_cells, *dofmap, mpc, fetch_cell, assemble_local_cell_vector); } } // Prepare permutations for exterior and interior facet integrals if (L.num_integrals(dolfinx::fem::IntegralType::exterior_facet) > 0) { // Create lambda function fetching cell index from exterior facet entity auto fetch_cell = [](auto entity) { return entity.front(); }; // Assemble exterior facet integral kernels for (int i : L.integral_ids(dolfinx::fem::IntegralType::exterior_facet)) { const auto& fn = L.kernel(dolfinx::fem::IntegralType::exterior_facet, i); const auto& coeffs = coefficients.at({dolfinx::fem::IntegralType::exterior_facet, i}); /// Assemble local exterior facet kernels into a vector /// @param[in] be The local element vector /// @param[in] entity The entity, given as a cell index and the local /// index relative to the cell /// @param[in] index The index of entity in active_facets const auto assemble_local_exterior_facet_vector = [&](std::span be, std::span entity, std::size_t index) { // Fetch the coordinates of the cell const std::int32_t cell = entity[0]; const int local_facet = entity[1]; auto x_dofs = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( x_dofmap, cell, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); for (std::size_t i = 0; i < x_dofs.size(); ++i) { std::ranges::copy_n(std::next(x_g.begin(), 3 * x_dofs[i]), 3, std::next(coordinate_dofs.begin(), 3 * i)); } // Tabulate tensor std::ranges::fill(be, 0); fn(be.data(), coeffs.first.data() + index * coeffs.second, constants.data(), coordinate_dofs.data(), &local_facet, nullptr); // Apply any required transformations dof_transform(be, cell_info, cell, 1); }; // Assemble over all active cells std::span active_facets = L.domain(dolfinx::fem::IntegralType::exterior_facet, i); _assemble_entities_impl(b, active_facets, *dofmap, mpc, fetch_cell, assemble_local_exterior_facet_vector); } } if (L.num_integrals(dolfinx::fem::IntegralType::interior_facet) > 0) { throw std::runtime_error( "Interior facet integrals currently not supported"); // std::function get_perm; // if (L.needs_facet_permutations()) // { // const int tdim = mesh->topology()->dim(); // mesh->topology_mutable().create_connectivity(tdim - 1, tdim); // mesh->topology_mutable().create_entity_permutations(); // const std::vector& perms // = mesh->topology()->get_facet_permutations(); // get_perm = [&perms](std::size_t i) { return perms[i]; }; // } // else // get_perm = [](std::size_t) { return 0; }; } } } // namespace //----------------------------------------------------------------------------- void dolfinx_mpc::assemble_vector( std::span b, const dolfinx::fem::Form& L, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc) { _assemble_vector(b, L, mpc); } void dolfinx_mpc::assemble_vector( std::span> b, const dolfinx::fem::Form>& L, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, double>>& mpc) { _assemble_vector>(b, L, mpc); } void dolfinx_mpc::assemble_vector( std::span b, const dolfinx::fem::Form& L, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc) { _assemble_vector(b, L, mpc); } void dolfinx_mpc::assemble_vector( std::span> b, const dolfinx::fem::Form>& L, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, float>>& mpc) { _assemble_vector>(b, L, mpc); } //----------------------------------------------------------------------------- dolfinx_mpc-0.9.1/cpp/assemble_vector.h000066400000000000000000000104761476141270300201500ustar00rootroot00000000000000// Copyright (C) 2021 Jorgen S. Dokken, Nathan Sime, and Connor D. Pierce // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #pragma once #include "MultiPointConstraint.h" #include "assemble_utils.h" #include #include #include using mdspan2_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const std::int32_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; namespace dolfinx_mpc { template class MultiPointConstraint; /// Given a local element vector, move all slave contributions to the global /// (local to process) vector. /// @param [in, out] b The global (local to process) vector /// @param [in, out] b_local The local element vector /// @param [in] b_local_copy Copy of the local element vector /// @param [in] dofs The cell dofs (blocked) /// @param [in] num_dofs The number of degrees of freedom in the local vector /// @param [in] bs The element block size /// @param [in] is_slave Vector indicating if a dof is a slave /// @param [in] slaves The slave dofs (local to process) /// @param [in] masters Adjacency list with master dofs /// @param [in] coeffs Adjacency list with the master coefficients template void modify_mpc_vec( const std::span& b, const std::span& b_local, const std::span& b_local_copy, std::span dofs, const int num_dofs, const int bs, std::span is_slave, std::span slaves, const std::shared_ptr>& masters, const std::shared_ptr>& coeffs) { // NOTE: Should this be moved into the MPC constructor? // Get local index of slaves in cell std::vector local_index = compute_local_slave_index(slaves, num_dofs, bs, dofs, is_slave); // Move contribution from each slave to corresponding master dof for (std::size_t i = 0; i < local_index.size(); i++) { auto masters_i = masters->links(slaves[i]); auto coeffs_i = coeffs->links(slaves[i]); assert(masters_i.size() == coeffs_i.size()); for (std::size_t j = 0; j < masters_i.size(); j++) { if constexpr (std::is_scalar_v) // Use standard transpose for type double b[masters_i[j]] += coeffs_i[j] * b_local_copy[local_index[i]]; else // Use Hermitian transpose for type std::complex b[masters_i[j]] += std::conj(coeffs_i[j]) * b_local_copy[local_index[i]]; b_local[local_index[i]] = 0; } } } /// Assemble a linear form into a vector /// @param[in] b The vector to be assembled. It will not be zeroed before /// assembly. /// @param[in] L The linear forms to assemble into b /// @param[in] mpc The multi-point constraint void assemble_vector( std::span b, const dolfinx::fem::Form& L, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc); /// Assemble a linear form into a vector /// @param[in] b The vector to be assembled. It will not be zeroed before /// assembly. /// @param[in] L The linear forms to assemble into b /// @param[in] mpc The multi-point constraint void assemble_vector( std::span> b, const dolfinx::fem::Form>& L, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, double>>& mpc); /// Assemble a linear form into a vector /// @param[in] b The vector to be assembled. It will not be zeroed before /// assembly. /// @param[in] L The linear forms to assemble into b /// @param[in] mpc The multi-point constraint void assemble_vector( std::span b, const dolfinx::fem::Form& L, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc); /// Assemble a linear form into a vector /// @param[in] b The vector to be assembled. It will not be zeroed before /// assembly. /// @param[in] L The linear forms to assemble into b /// @param[in] mpc The multi-point constraint void assemble_vector( std::span> b, const dolfinx::fem::Form>& L, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, float>>& mpc); } // namespace dolfinx_mpc dolfinx_mpc-0.9.1/cpp/dolfinx_mpc.h000066400000000000000000000003451476141270300172670ustar00rootroot00000000000000#pragma once // DOLFINX_MPC interface #include #include #include #include #include #include #include dolfinx_mpc-0.9.1/cpp/lifting.h000066400000000000000000000560351476141270300164300ustar00rootroot00000000000000// Copyright (C) 2021 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #pragma once #include "MultiPointConstraint.h" #include "assemble_vector.h" #include #include #include #include #include #include #include namespace impl { /// Implementation of bc application (lifting) for an given a set of integration /// entities /// @tparam T The scalar type /// @tparam E_DESC Description of the set of entities /// @param[in, out] b The vector to apply lifting to /// @param[in] active_entities Set of active entities (either cells, exterior /// facets or interior facets in their specified format) /// @param[in] dofmap0 The dofmap for the rows of the matrix /// @param[in] dofmap0 The dofmap for the columns of the matrix /// @param[in] bc_values1 Array of Dirichlet condition values for dofs local to /// process /// @param[in] bc_markers1 Array indicating what dofs local to process is in a /// DirichletBC /// @param[in] mpc1 Multipoint constraints to apply to the rows of the vector /// @param[in] fetch_cells Function that fetches the cell index for each active /// entity /// @param[in] lift_local_vector Function that lift local matrix Ae into local /// vector be, i.e. be <- be - scale * (A (g - x0)) /// @tparam T Scalartype of local vector /// @tparam estride Stride in actiave entities template void lift_bc_entities( std::span b, std::span active_entities, const dolfinx::fem::DofMap& dofmap0, const dolfinx::fem::DofMap& dofmap1, std::span bc_values1, std::span bc_markers1, const dolfinx_mpc::MultiPointConstraint& mpc1, const std::function)> fetch_cells, const std::function, std::span, const int, const int, std::span, std::size_t)> lift_local_vector) { const int bs0 = dofmap0.bs(); const int bs1 = dofmap1.bs(); // Get MPC data const std::shared_ptr> masters = mpc1.masters(); const std::shared_ptr> coefficients = mpc1.coefficients(); std::span is_slave = mpc1.is_slave(); const std::shared_ptr> cell_to_slaves = mpc1.cell_to_slaves(); const int num_dofs0 = dofmap0.map().extent(1); std::vector be; std::vector be_copy; std::vector Ae; // Assemble over all entities for (std::size_t e = 0; e < active_entities.size(); e += estride) { auto entity = active_entities.subspan(e, estride); const std::int32_t cell = fetch_cells(entity); // Size data structure for assembly auto dmap0 = dofmap0.cell_dofs(cell); auto dmap1 = dofmap1.cell_dofs(cell); const int num_rows = bs0 * dmap0.size(); const int num_cols = bs1 * dmap1.size(); be.resize(num_rows); Ae.resize(num_rows * num_cols); // Check if bc is applied to entity bool has_bc = false; std::ranges::for_each(dmap1, [&bc_markers1, bs1, &has_bc](const auto dof) { for (int k = 0; k < bs1; ++k) { assert(bs1 * dof + k < (int)bc_markers1.size()); if (bc_markers1[bs1 * dof + k]) { has_bc = true; break; } } }); if (!has_bc) continue; // Lift into local element vector const std::span _be(be); const std::span _Ae(Ae); lift_local_vector(_be, _Ae, num_rows, num_cols, entity, e / estride); // Modify local element matrix if entity is connected to a slave cell std::span slaves = cell_to_slaves->links(cell); if (slaves.size() > 0) { // Modify element vector for MPC and insert into b for non-local // contributions be_copy.resize(num_rows); std::ranges::copy(be, be_copy.begin()); const std::span _be_copy(be_copy); dolfinx_mpc::modify_mpc_vec(b, _be, _be_copy, dmap0, dmap0.size(), bs0, is_slave, slaves, masters, coefficients); } // Add local contribution to b for (int i = 0; i < num_dofs0; ++i) for (int k = 0; k < bs0; ++k) b[bs0 * dmap0[i] + k] += be[bs0 * i + k]; } }; /// Modify b such that: /// /// b <- b - scale * K^T (A (g - x0)) /// /// The boundary conditions bcs are on the trial spaces V_j. /// The forms in [a] must have the same test space as L (from /// which b was built), but the trial space may differ /// @param[in,out] b The vector to be modified /// @param[in] a The bilinear forms, where a is the form that /// generates A /// @param[in] bcs List of boundary conditions /// @param[in] x0 The function to subtract /// @param[in] scale Scale of lifting /// @param[in] mpc1 The multi point constraints template void apply_lifting( std::span b, const std::shared_ptr> a, const std::vector>>& bcs, const std::span& x0, T scale, const std::shared_ptr>& mpc1) { const std::vector constants = pack_constants(*a); auto coeff_vec = dolfinx::fem::allocate_coefficient_storage(*a); dolfinx::fem::pack_coefficients(*a, coeff_vec); auto coefficients = dolfinx::fem::make_coefficients_span(coeff_vec); std::span is_slave = mpc1->is_slave(); // Create 1D arrays of bc values and bc indicator std::vector bc_markers1; std::vector bc_values1; assert(a->function_spaces().at(1)); auto V1 = a->function_spaces().at(1); auto map1 = V1->dofmap()->index_map; const int bs1 = V1->dofmap()->index_map_bs(); assert(map1); const int crange = bs1 * (map1->size_local() + map1->num_ghosts()); bc_markers1.assign(crange, false); bc_values1.assign(crange, 0.0); for (const std::shared_ptr>& bc : bcs) { bc->mark_dofs(bc_markers1); bc->set(bc_values1, std::nullopt, 1); } // Extract dofmaps for columns and rows of a assert(a->function_spaces().at(0)); auto dofmap1 = V1->dofmap(); auto element1 = V1->element(); auto dofmap0 = a->function_spaces()[0]->dofmap(); const int bs0 = a->function_spaces()[0]->dofmap()->bs(); auto element0 = a->function_spaces()[0]->element(); const bool needs_transformation_data = element0->needs_dof_transformations() or element1->needs_dof_transformations() or a->needs_facet_permutations(); auto mesh = a->function_spaces()[0]->mesh(); // Prepare cell geometry MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const std::int32_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> x_dofmap = mesh->geometry().dofmap(); std::span x_g = mesh->geometry().x(); const int tdim = mesh->topology()->dim(); const std::size_t num_dofs_g = x_dofmap.extent(1); std::vector coordinate_dofs(3 * num_dofs_g); std::span cell_info; if (needs_transformation_data) { mesh->topology_mutable()->create_entity_permutations(); cell_info = std::span(mesh->topology()->get_cell_permutation_info()); } // Get dof-transformations for the element matrix const std::function&, const std::span&, std::int32_t, int)> dof_transform = element0->template dof_transformation_fn( dolfinx::fem::doftransform::standard); const std::function&, const std::span&, std::int32_t, int)> dof_transform_to_transpose = element1->template dof_transformation_right_fn( dolfinx::fem::doftransform::transpose); // Loop over cell integrals and lift bc if (a->num_integrals(dolfinx::fem::IntegralType::cell) > 0) { const auto fetch_cells = [&](std::span entity) { return entity.front(); }; for (int i : a->integral_ids(dolfinx::fem::IntegralType::cell)) { const auto& coeffs = coefficients.at({dolfinx::fem::IntegralType::cell, i}); const auto& kernel = a->kernel(dolfinx::fem::IntegralType::cell, i); // Function that lift bcs for cell kernels const auto lift_bcs_cell = [&](std::span be, std::span Ae, std::int32_t num_rows, std::int32_t num_cols, std::span entity, std::size_t index) { auto cell = entity.front(); // Fetch the coordinates of the cell auto x_dofs = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( x_dofmap, cell, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); for (std::size_t i = 0; i < x_dofs.size(); ++i) { std::ranges::copy_n(std::next(x_g.begin(), 3 * x_dofs[i]), 3, std::next(coordinate_dofs.begin(), 3 * i)); } // Tabulate tensor std::ranges::fill(Ae, 0); kernel(Ae.data(), coeffs.first.data() + index * coeffs.second, constants.data(), coordinate_dofs.data(), nullptr, nullptr); dof_transform(Ae, cell_info, cell, num_cols); dof_transform_to_transpose(Ae, cell_info, cell, num_rows); auto dmap1 = dofmap1->cell_dofs(cell); std::ranges::fill(be, 0); for (std::size_t j = 0; j < dmap1.size(); ++j) { for (std::int32_t k = 0; k < bs1; k++) { const std::int32_t jj = bs1 * dmap1[j] + k; assert(jj < (int)bc_markers1.size()); // Add this in once we have interface for rhs of MPCs // MPCs overwrite Dirichlet conditions // if (is_slave[jj]) // { // // Lift MPC values // const T val = mpc_consts[jj]; // for (int m = 0; m < num_rows; ++m) // be[m] -= Ae[m * num_cols + bs1 * j + k] * val; // } // else if (bc_markers1[jj]) { const T bc = bc_values1[jj]; const T _x0 = x0.empty() ? 0.0 : x0[jj]; for (int m = 0; m < num_rows; ++m) be[m] -= Ae[m * num_cols + bs1 * j + k] * scale * (bc - _x0); } } } }; // Assemble over all active cells std::span cells = a->domain(dolfinx::fem::IntegralType::cell, i); lift_bc_entities(b, cells, *dofmap0, *dofmap1, std::span(bc_values1), std::span(bc_markers1), *mpc1, fetch_cells, lift_bcs_cell); } } // Prepare permutations for exterior and interior facet integrals if (a->num_integrals(dolfinx::fem::IntegralType::exterior_facet) > 0) { // Create lambda function fetching cell index from exterior facet entity const auto fetch_cell = [&](std::span entity) { return entity.front(); }; // Get number of cells per facet to be able to get the facet permutation const int tdim = mesh->topology()->dim(); for (int i : a->integral_ids(dolfinx::fem::IntegralType::exterior_facet)) { const auto& coeffs = coefficients.at({dolfinx::fem::IntegralType::exterior_facet, i}); const auto& kernel = a->kernel(dolfinx::fem::IntegralType::exterior_facet, i); /// Assemble local exterior facet kernels into a vector /// @param[in] be The local element vector /// @param[in] entity The entity, given as a cell index and the local /// index relative to the cell /// @param[in] index The index of the facet in the active_facets (To fetch /// the appropriate coefficients) const auto lift_bc_exterior_facet = [&](std::span be, std::span Ae, int num_rows, int num_cols, std::span entity, std::size_t index) { // Fetch the coordiantes of the cell const std::int32_t cell = entity[0]; const int local_facet = entity[1]; // Fetch the coordinates of the cell auto x_dofs = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( x_dofmap, cell, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); for (std::size_t i = 0; i < x_dofs.size(); ++i) { std::ranges::copy_n(std::next(x_g.begin(), 3 * x_dofs[i]), 3, std::next(coordinate_dofs.begin(), 3 * i)); } // Tabulate tensor std::ranges::fill(Ae, 0); kernel(Ae.data(), coeffs.first.data() + index * coeffs.second, constants.data(), coordinate_dofs.data(), &local_facet, nullptr); dof_transform(Ae, cell_info, cell, num_cols); dof_transform_to_transpose(Ae, cell_info, cell, num_rows); auto dmap1 = dofmap1->cell_dofs(cell); std::ranges::fill(be, 0); for (std::size_t j = 0; j < dmap1.size(); ++j) { for (std::int32_t k = 0; k < bs1; k++) { const std::int32_t jj = bs1 * dmap1[j] + k; assert(jj < (int)bc_markers1.size()); // Add this in once we have interface for rhs of MPCs // MPCs overwrite Dirichlet conditions // if (is_slave[jj]) // { // // Lift MPC values // const T val = mpc_consts[jj]; // for (int m = 0; m < num_rows; ++m) // be[m] -= Ae[m * num_cols + bs1 * j + k] * val; // } // else if (bc_markers1[jj]) { const T bc = bc_values1[jj]; const T _x0 = x0.empty() ? 0.0 : x0[jj]; for (int m = 0; m < num_rows; ++m) be[m] -= Ae[m * num_cols + bs1 * j + k] * scale * (bc - _x0); } } } }; // Assemble over all active cells std::span active_facets = a->domain(dolfinx::fem::IntegralType::exterior_facet, i); impl::lift_bc_entities(b, active_facets, *dofmap0, *dofmap1, bc_values1, bc_markers1, *mpc1, fetch_cell, lift_bc_exterior_facet); } } if (a->num_integrals(dolfinx::fem::IntegralType::interior_facet) > 0) { throw std::runtime_error( "Interior facet integrals currently not supported"); // std::function get_perm; // if (a->needs_facet_permutations()) // { // mesh->topology_mutable().create_connectivity(tdim - 1, tdim); // mesh->topology_mutable().create_entity_permutations(); // const std::vector& perms // = mesh->topology()->get_facet_permutations(); // get_perm = [&perms](std::size_t i) { return perms[i]; }; // } // else // get_perm = [](std::size_t) { return 0; }; } } } // namespace impl namespace dolfinx_mpc { /// Modify b such that: /// /// b <- b - scale * K^T (A_j (g_j 0 x0_j)) /// /// where j is a block (nest) row index and K^T is the reduction matrix stemming /// from the multi point constraint. For non - blocked problems j = 0. /// The boundary conditions bcs1 are on the trial spaces V_j. /// The forms in [a] must have the same test space as L (from /// which b was built), but the trial space may differ. If x0 is not /// supplied, then it is treated as 0. /// @param[in,out] b The vector to be modified /// @param[in] a The bilinear formss, where a[j] is the form that /// generates A[j] /// @param[in] bcs List of boundary conditions for each block, i.e. bcs1[2] /// are the boundary conditions applied to the columns of a[2] / x0[2] /// block. /// @param[in] x0 The vectors used in the lifitng. /// @param[in] scale Scaling to apply /// @param[in] mpc The multi point constraints void apply_lifting( std::span b, const std::vector>> a, const std::vector< std::vector>>>& bcs1, const std::vector>& x0, double scale, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc) { if (!x0.empty() and x0.size() != a.size()) { throw std::runtime_error( "Mismatch in size between x0 and bilinear form in assembler."); } if (a.size() != bcs1.size()) { throw std::runtime_error( "Mismatch in size between a and bcs in assembler."); } for (std::size_t j = 0; j < a.size(); ++j) { if (x0.empty()) { impl::apply_lifting(b, a[j], bcs1[j], std::span(), scale, mpc); } else { impl::apply_lifting(b, a[j], bcs1[j], x0[j], scale, mpc); } } } /// Modify b such that: /// /// b <- b - scale * K^T (A_j (g_j 0 x0_j)) /// /// where j is a block (nest) row index and K^T is the reduction matrix stemming /// from the multi point constraint. For non - blocked problems j = 0. /// The boundary conditions bcs1 are on the trial spaces V_j. /// The forms in [a] must have the same test space as L (from /// which b was built), but the trial space may differ. If x0 is not /// supplied, then it is treated as 0. /// @param[in,out] b The vector to be modified /// @param[in] a The bilinear formss, where a[j] is the form that /// generates A[j] /// @param[in] bcs List of boundary conditions for each block, i.e. bcs1[2] /// are the boundary conditions applied to the columns of a[2] / x0[2] /// block. /// @param[in] x0 The vectors used in the lifitng. /// @param[in] scale Scaling to apply /// @param[in] mpc The multi point constraints void apply_lifting( std::span> b, const std::vector< std::shared_ptr>>> a, const std::vector>>>>& bcs1, const std::vector>>& x0, std::complex scale, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, double>>& mpc) { if (!x0.empty() and x0.size() != a.size()) { throw std::runtime_error( "Mismatch in size between x0 and bilinear form in assembler."); } if (a.size() != bcs1.size()) { throw std::runtime_error( "Mismatch in size between a and bcs in assembler."); } for (std::size_t j = 0; j < a.size(); ++j) { if (x0.empty()) { impl::apply_lifting>( b, a[j], bcs1[j], std::span>(), scale, mpc); } else { impl::apply_lifting>(b, a[j], bcs1[j], x0[j], scale, mpc); } } } /// Modify b such that: /// /// b <- b - scale * K^T (A_j (g_j 0 x0_j)) /// /// where j is a block (nest) row index and K^T is the reduction matrix stemming /// from the multi point constraint. For non - blocked problems j = 0. /// The boundary conditions bcs1 are on the trial spaces V_j. /// The forms in [a] must have the same test space as L (from /// which b was built), but the trial space may differ. If x0 is not /// supplied, then it is treated as 0. /// @param[in,out] b The vector to be modified /// @param[in] a The bilinear formss, where a[j] is the form that /// generates A[j] /// @param[in] bcs List of boundary conditions for each block, i.e. bcs1[2] /// are the boundary conditions applied to the columns of a[2] / x0[2] /// block. /// @param[in] x0 The vectors used in the lifitng. /// @param[in] scale Scaling to apply /// @param[in] mpc The multi point constraints void apply_lifting( std::span b, const std::vector>> a, const std::vector< std::vector>>>& bcs1, const std::vector>& x0, float scale, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint>& mpc) { if (!x0.empty() and x0.size() != a.size()) { throw std::runtime_error( "Mismatch in size between x0 and bilinear form in assembler."); } if (a.size() != bcs1.size()) { throw std::runtime_error( "Mismatch in size between a and bcs in assembler."); } for (std::size_t j = 0; j < a.size(); ++j) { if (x0.empty()) { impl::apply_lifting(b, a[j], bcs1[j], std::span(), scale, mpc); } else { impl::apply_lifting(b, a[j], bcs1[j], x0[j], scale, mpc); } } } /// Modify b such that: /// /// b <- b - scale * K^T (A_j (g_j 0 x0_j)) /// /// where j is a block (nest) row index and K^T is the reduction matrix stemming /// from the multi point constraint. For non - blocked problems j = 0. /// The boundary conditions bcs1 are on the trial spaces V_j. /// The forms in [a] must have the same test space as L (from /// which b was built), but the trial space may differ. If x0 is not /// supplied, then it is treated as 0. /// @param[in,out] b The vector to be modified /// @param[in] a The bilinear formss, where a[j] is the form that /// generates A[j] /// @param[in] bcs List of boundary conditions for each block, i.e. bcs1[2] /// are the boundary conditions applied to the columns of a[2] / x0[2] /// block. /// @param[in] x0 The vectors used in the lifitng. /// @param[in] scale Scaling to apply /// @param[in] mpc The multi point constraints void apply_lifting( std::span> b, const std::vector< std::shared_ptr>>> a, const std::vector>>>>& bcs1, const std::vector>>& x0, std::complex scale, const std::shared_ptr< const dolfinx_mpc::MultiPointConstraint, float>>& mpc) { if (!x0.empty() and x0.size() != a.size()) { throw std::runtime_error( "Mismatch in size between x0 and bilinear form in assembler."); } if (a.size() != bcs1.size()) { throw std::runtime_error( "Mismatch in size between a and bcs in assembler."); } for (std::size_t j = 0; j < a.size(); ++j) { if (x0.empty()) { impl::apply_lifting>( b, a[j], bcs1[j], std::span>(), scale, mpc); } else { impl::apply_lifting>(b, a[j], bcs1[j], x0[j], scale, mpc); } } } } // namespace dolfinx_mpcdolfinx_mpc-0.9.1/cpp/mpc_helpers.h000066400000000000000000000222311476141270300172640ustar00rootroot00000000000000// Copyright (C) 2021 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #pragma once #include #include namespace dolfinx_mpc { /// Create a map from cell to a set of dofs /// @param[in] The degrees of freedom (local to process) /// @tparam The floating type of the mesh /// @returns The map from cell index (local to process) to dofs (local to /// process) in the cell template std::shared_ptr> create_cell_to_dofs_map(const dolfinx::fem::FunctionSpace& V, std::span dofs) { const auto& mesh = *(V.mesh()); const dolfinx::fem::DofMap& dofmap = *(V.dofmap()); const int tdim = mesh.topology()->dim(); const int num_cells = mesh.topology()->index_map(tdim)->size_local(); const std::int32_t local_size = dofmap.index_map->size_local() + dofmap.index_map->num_ghosts(); const std::int32_t block_size = dofmap.index_map_bs(); // Create dof -> cells map where only slave dofs have entries std::shared_ptr> cell_map; { std::vector num_slave_cells(local_size * block_size, 0); std::vector in_num_cells(local_size * block_size, 0); // Loop through all cells and count number of cells a dof occurs in for (std::int32_t i = 0; i < num_cells; i++) for (auto block : dofmap.cell_dofs(i)) for (std::int32_t j = 0; j < block_size; j++) in_num_cells[block * block_size + j]++; // Count only number of slave cells for dofs for (auto dof : dofs) num_slave_cells[dof] = in_num_cells[dof]; std::vector insert_position(local_size * block_size, 0); std::vector cell_offsets(local_size * block_size + 1); cell_offsets[0] = 0; std::inclusive_scan(num_slave_cells.begin(), num_slave_cells.end(), cell_offsets.begin() + 1); std::vector cell_data(cell_offsets.back()); // Accumulate those cells whose contains a slave dof for (std::int32_t i = 0; i < num_cells; i++) { for (auto block : dofmap.cell_dofs(i)) { for (std::int32_t j = 0; j < block_size; j++) { if (const std::int32_t dof = block * block_size + j; num_slave_cells[dof] > 0) { cell_data[cell_offsets[dof] + insert_position[dof]++] = i; } } } } cell_map = std::make_shared>( cell_data, cell_offsets); } // Create inverse map (cells -> slave dofs) std::vector num_slaves(num_cells, 0); for (std::int32_t i = 0; i < cell_map->num_nodes(); i++) for (auto cell : cell_map->links(i)) num_slaves[cell]++; std::vector insert_position(num_cells, 0); std::vector dof_offsets(num_cells + 1); dof_offsets[0] = 0; std::inclusive_scan(num_slaves.begin(), num_slaves.end(), dof_offsets.begin() + 1); std::vector dof_data(dof_offsets.back()); for (std::int32_t i = 0; i < cell_map->num_nodes(); i++) for (auto cell : cell_map->links(i)) dof_data[dof_offsets[cell] + insert_position[cell]++] = i; return std::make_shared>( dof_data, dof_offsets); } /// Given a list of global degrees of freedom, map them to their local index /// @param[in] V The original function space /// @param[in] global_dofs The list of dofs (global index) /// @tparam The floating type of the mesh /// @returns List of local dofs template std::vector map_dofs_global_to_local(const dolfinx::fem::FunctionSpace& V, const std::vector& global_dofs) { const std::size_t num_dofs = global_dofs.size(); const std::int32_t& block_size = V.dofmap()->index_map_bs(); const std::shared_ptr imap = V.dofmap()->index_map; std::vector global_blocks; global_blocks.reserve(num_dofs); std::vector remainders; remainders.reserve(num_dofs); std::ranges::for_each( global_dofs, [block_size, &global_blocks, &remainders](const auto slave) { global_blocks.push_back(slave / block_size); remainders.push_back(slave % block_size); }); // Compute the new local index of the master blocks std::vector local_blocks(num_dofs); imap->global_to_local(global_blocks, local_blocks); // Go from blocks to actual local dof for (std::size_t i = 0; i < local_blocks.size(); i++) local_blocks[i] = local_blocks[i] * block_size + remainders[i]; return local_blocks; } /// Create an function space with an extended index map, where all input dofs /// (global index) is added to the local index map as ghosts. /// @param[in] V The original function space /// @param[in] global_dofs The list of master dofs (global index) /// @param[in] owners The owners of the master degrees of freedom /// @tparam The floating type of the mesh template dolfinx::fem::FunctionSpace create_extended_functionspace(const dolfinx::fem::FunctionSpace& V, const std::vector& global_dofs, const std::vector& owners) { dolfinx::common::Timer timer( "~MPC: Create new index map with additional ghosts"); MPI_Comm comm = V.mesh()->comm(); const dolfinx::fem::DofMap& old_dofmap = *(V.dofmap()); std::shared_ptr old_index_map = old_dofmap.index_map; const std::int32_t& block_size = V.dofmap()->index_map_bs(); // Compute local master block index. const std::size_t num_dofs = global_dofs.size(); std::vector global_blocks(num_dofs); std::vector local_blocks(num_dofs); std::ranges::transform(global_dofs, global_blocks.begin(), [block_size](const auto dof) { return dof / block_size; }); int mpi_size = -1; MPI_Comm_size(comm, &mpi_size); std::shared_ptr new_index_map; if (mpi_size == 1) { new_index_map = old_index_map; } else { // Map global master blocks to local blocks V.dofmap()->index_map->global_to_local(global_blocks, local_blocks); // Check which local masters that are not on the process already std::vector additional_ghosts; additional_ghosts.reserve(num_dofs); std::vector additional_owners; additional_owners.reserve(num_dofs); for (std::size_t i = 0; i < num_dofs; i++) { // Check if master block already has a local index and // if has has already been ghosted, which is the case // when we have multiple masters from the same block if ((local_blocks[i] == -1) and (std::ranges::find(additional_ghosts, global_blocks[i]) == additional_ghosts.end())) { additional_ghosts.push_back(global_blocks[i]); additional_owners.push_back(owners[i]); } } // Append new ghosts (and corresponding rank) at the end of the old set of // ghosts originating from the old index map std::span ghost_owners = old_index_map->owners(); std::span ghosts = old_index_map->ghosts(); const std::int32_t num_ghosts = ghosts.size(); assert(ghost_owners.size() == ghosts.size()); std::vector all_ghosts(num_ghosts + additional_ghosts.size()); std::ranges::copy(ghosts, all_ghosts.begin()); std::ranges::copy(additional_ghosts, all_ghosts.begin() + num_ghosts); std::vector all_owners(all_ghosts.size()); std::ranges::copy(ghost_owners, all_owners.begin()); std::ranges::copy(additional_owners, all_owners.begin() + num_ghosts); // Create new indexmap with ghosts for master blocks added new_index_map = std::make_shared( comm, old_index_map->size_local(), all_ghosts, all_owners); } // Extract information from the old dofmap to create a new one MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const std::int32_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> dofmap_adj = old_dofmap.map(); // Copy dofmap std::vector flattened_dofmap; flattened_dofmap.reserve(dofmap_adj.extent(0) * dofmap_adj.extent(1)); for (std::size_t i = 0; i < dofmap_adj.extent(0); ++i) for (std::size_t j = 0; j < dofmap_adj.extent(1); ++j) flattened_dofmap.push_back(dofmap_adj(i, j)); auto element = V.element(); // Create the new dofmap based on the extended index map auto new_dofmap = std::make_shared( old_dofmap.element_dof_layout(), new_index_map, old_dofmap.bs(), std::move(flattened_dofmap), old_dofmap.bs()); return dolfinx::fem::FunctionSpace( V.mesh(), element, new_dofmap, dolfinx::fem::compute_value_shape(element, V.mesh()->topology()->dim(), V.mesh()->geometry().dim())); } } // namespace dolfinx_mpcdolfinx_mpc-0.9.1/cpp/mpi_utils.cpp000066400000000000000000000033141476141270300173240ustar00rootroot00000000000000// Copyright (C) 2022 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #include "mpi_utils.h" MPI_Comm dolfinx_mpc::create_owner_to_ghost_comm(const dolfinx::common::IndexMap& map) { // Get source (owner of ghosts) and destination (processes that // ghost an owned index) ranks std::span src_ranks = map.src(); std::span dest_ranks = map.dest(); // Check that src and dest ranks are unique and sorted assert(std::ranges::is_sorted(src_ranks)); assert(std::ranges::is_sorted(dest_ranks)); // Create communicators with directed edges owner -> ghost, MPI_Comm comm; MPI_Dist_graph_create_adjacent(map.comm(), (int)src_ranks.size(), src_ranks.data(), MPI_UNWEIGHTED, (int)dest_ranks.size(), dest_ranks.data(), MPI_UNWEIGHTED, MPI_INFO_NULL, false, &comm); return comm; } //------------------------------------------------------------------------------- std::pair, std::vector> dolfinx_mpc::compute_neighborhood(const MPI_Comm& comm) { int status; MPI_Topo_test(comm, &status); assert(status != MPI_UNDEFINED); // Get list of neighbors int indegree(-1); int outdegree(-2); int weighted(-1); MPI_Dist_graph_neighbors_count(comm, &indegree, &outdegree, &weighted); std::vector src_ranks(indegree); std::vector dest_ranks(outdegree); MPI_Dist_graph_neighbors(comm, indegree, src_ranks.data(), MPI_UNWEIGHTED, outdegree, dest_ranks.data(), MPI_UNWEIGHTED); return {src_ranks, dest_ranks}; } //------------------------------------------------------------------------------- dolfinx_mpc-0.9.1/cpp/mpi_utils.h000066400000000000000000000010761476141270300167740ustar00rootroot00000000000000// Copyright (C) 2022 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #include #include namespace dolfinx_mpc { /// @brief Create a MPI-communicator from owners in the index map to the /// processes with ghosts /// /// @param[in] map The index map /// @returns The mpi communicator MPI_Comm create_owner_to_ghost_comm(const dolfinx::common::IndexMap& map); std::pair, std::vector> compute_neighborhood(const MPI_Comm& comm); } // namespace dolfinx_mpcdolfinx_mpc-0.9.1/cpp/utils.cpp000066400000000000000000000153101476141270300164560ustar00rootroot00000000000000// Copyright (C) 2020 Jorgen S. Dokken and Nathan Sime // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #include "utils.h" #include #include #include #include #include #include using namespace dolfinx_mpc; //----------------------------------------------------------------------------- std::array dolfinx_mpc::create_neighborhood_comms( MPI_Comm comm, const dolfinx::mesh::MeshTags& meshtags, const bool has_slave, std::int32_t& master_marker) { int mpi_size = -1; MPI_Comm_size(comm, &mpi_size); int rank = -1; MPI_Comm_rank(comm, &rank); std::uint8_t slave_val = has_slave ? 1 : 0; std::vector has_slaves(mpi_size, slave_val); // Check if entities if master entities are on this processor std::vector has_masters(mpi_size, 0); if (std::ranges::find(meshtags.values(), master_marker) != meshtags.values().end()) std::ranges::fill(has_masters, 1); // Get received data sizes from each rank std::vector procs_with_masters(mpi_size, -1); MPI_Alltoall(has_masters.data(), 1, MPI_UINT8_T, procs_with_masters.data(), 1, MPI_UINT8_T, comm); std::vector procs_with_slaves(mpi_size, -1); MPI_Alltoall(has_slaves.data(), 1, MPI_UINT8_T, procs_with_slaves.data(), 1, MPI_UINT8_T, comm); // Create communicator with edges slaves (sources) -> masters (destinations) std::vector source_edges; std::vector dest_edges; // If current rank owns masters add all slaves as source edges if (procs_with_masters[rank] == 1) for (int i = 0; i < mpi_size; ++i) if ((i != rank) && (procs_with_slaves[i] == 1)) source_edges.push_back(i); // If current rank owns a slave add all masters as destinations if (procs_with_slaves[rank] == 1) for (int i = 0; i < mpi_size; ++i) if ((i != rank) && (procs_with_masters[i] == 1)) dest_edges.push_back(i); std::array comms{MPI_COMM_NULL, MPI_COMM_NULL}; // Create communicator with edges slaves (sources) -> masters (destinations) { std::vector source_weights(source_edges.size(), 1); std::vector dest_weights(dest_edges.size(), 1); MPI_Dist_graph_create_adjacent( comm, source_edges.size(), source_edges.data(), source_weights.data(), dest_edges.size(), dest_edges.data(), dest_weights.data(), MPI_INFO_NULL, false, &comms[0]); } // Create communicator with edges masters (sources) -> slaves (destinations) { std::vector source_weights(dest_edges.size(), 1); std::vector dest_weights(source_edges.size(), 1); MPI_Dist_graph_create_adjacent(comm, dest_edges.size(), dest_edges.data(), source_weights.data(), source_edges.size(), source_edges.data(), dest_weights.data(), MPI_INFO_NULL, false, &comms[1]); } return comms; } //----------------------------------------------------------------------------- MPI_Comm dolfinx_mpc::create_owner_to_ghost_comm( std::vector& local_blocks, std::vector& ghost_blocks, std::shared_ptr index_map) { // Get data from IndexMap std::span ghost_owners = index_map->owners(); const std::int32_t size_local = index_map->size_local(); dolfinx::graph::AdjacencyList shared_indices = index_map->index_to_dest_ranks(); MPI_Comm comm = create_owner_to_ghost_comm(*index_map); // Array of processors sending to the ghost_dofs std::set src_edges; // Array of processors the local_dofs are sent to std::set dst_edges; int rank = -1; MPI_Comm_rank(comm, &rank); for (auto block : local_blocks) for (auto proc : shared_indices.links(block)) dst_edges.insert(proc); for (auto block : ghost_blocks) src_edges.insert(ghost_owners[block - size_local]); MPI_Comm comm_loc = MPI_COMM_NULL; // Create communicator with edges owners (sources) -> ghosts (destinations) std::vector source_edges; source_edges.assign(src_edges.begin(), src_edges.end()); std::vector dest_edges; dest_edges.assign(dst_edges.begin(), dst_edges.end()); std::vector source_weights(source_edges.size(), 1); std::vector dest_weights(dest_edges.size(), 1); MPI_Dist_graph_create_adjacent(comm, source_edges.size(), source_edges.data(), source_weights.data(), dest_edges.size(), dest_edges.data(), dest_weights.data(), MPI_INFO_NULL, false, &comm_loc); return comm_loc; } std::vector dolfinx_mpc::create_block_to_cell_map(const dolfinx::mesh::Topology& topology, const dolfinx::fem::DofMap& dofmap, std::span blocks) { std::vector cells; cells.reserve(blocks.size()); // Create block -> cells map // Compute number of cells each dof is in auto imap = dofmap.index_map; const int size_local = imap->size_local(); std::span ghost_owners = imap->owners(); std::vector num_cells_per_dof(size_local + ghost_owners.size()); const int tdim = topology.dim(); auto cell_imap = topology.index_map(tdim); const int num_cells_local = cell_imap->size_local(); const int num_ghost_cells = cell_imap->num_ghosts(); for (std::int32_t i = 0; i < num_cells_local + num_ghost_cells; i++) { auto dofs = dofmap.cell_dofs(i); for (auto dof : dofs) num_cells_per_dof[dof]++; } std::vector cell_dofs_disp(num_cells_per_dof.size() + 1, 0); std::partial_sum(num_cells_per_dof.begin(), num_cells_per_dof.end(), cell_dofs_disp.begin() + 1); std::vector cell_map(cell_dofs_disp.back()); // Reuse num_cells_per_dof for insertion std::ranges::fill(num_cells_per_dof, 0); // Create the block -> cells map for (std::int32_t i = 0; i < num_cells_local + num_ghost_cells; i++) { auto dofs = dofmap.cell_dofs(i); for (auto dof : dofs) cell_map[cell_dofs_disp[dof] + num_cells_per_dof[dof]++] = i; } // Populate map from slaves to corresponding cell (choose first cell in map) std::ranges::for_each(blocks, [&cell_dofs_disp, &cell_map, &cells](const auto dof) { cells.push_back(cell_map[cell_dofs_disp[dof]]); }); assert(cells.size() == blocks.size()); return cells; } //----------------------------------------------------------------------------- dolfinx_mpc-0.9.1/cpp/utils.h000066400000000000000000001656511476141270300161410ustar00rootroot00000000000000// Copyright (C) 2020-2022 Jorgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #pragma once #include "MultiPointConstraint.h" #include "mpi_utils.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace impl { /// Create a map from each dof (block) found on the set of facets /// topologically, /// to the connecting facets /// @param[in] V The function space /// @param[in] dim The dimension of the entities /// @param[in] entities The list of entities /// @returns The map from each block (local + ghost) to the set of facets dolfinx::graph::AdjacencyList create_block_to_facet_map(dolfinx::mesh::Topology& topology, const dolfinx::fem::DofMap& dofmap, std::int32_t dim, std::span entities) { std::shared_ptr imap = dofmap.index_map; const std::int32_t tdim = topology.dim(); // Locate all dofs for each facet topology.create_connectivity(dim, tdim); topology.create_connectivity(tdim, dim); auto e_to_c = topology.connectivity(dim, tdim); auto c_to_e = topology.connectivity(tdim, dim); const std::int32_t num_dofs = imap->size_local() + imap->num_ghosts(); std::vector num_facets_per_dof(num_dofs); // Count how many facets each dof on process relates to std::vector local_indices(entities.size()); std::vector cells(entities.size()); for (std::size_t i = 0; i < entities.size(); ++i) { auto cell = e_to_c->links(entities[i]); assert(cell.size() == 1); cells[i] = cell[0]; // Get local index of facet with respect to the cell auto cell_entities = c_to_e->links(cell[0]); const auto it = std::find(cell_entities.begin(), cell_entities.end(), entities[i]); assert(it != cell_entities.end()); const auto local_entity = std::distance(cell_entities.begin(), it); local_indices[i] = local_entity; auto cell_blocks = dofmap.cell_dofs(cell[0]); auto closure_blocks = dofmap.element_dof_layout().entity_closure_dofs(dim, local_entity); std::ranges::for_each(closure_blocks, [&num_facets_per_dof, &cell_blocks](auto block) { const int dof = cell_blocks[block]; num_facets_per_dof[dof]++; }); } // Compute offsets std::vector offsets(num_dofs + 1); offsets[0] = 0; std::partial_sum(num_facets_per_dof.begin(), num_facets_per_dof.end(), offsets.begin() + 1); // Reuse data structure for insertion std::ranges::fill(num_facets_per_dof, 0); // Create dof->entities map std::vector data(offsets.back()); for (std::size_t i = 0; i < entities.size(); ++i) { auto cell_blocks = dofmap.cell_dofs(cells[i]); auto closure_blocks = dofmap.element_dof_layout().entity_closure_dofs( dim, local_indices[i]); std::for_each(closure_blocks.begin(), closure_blocks.end(), [&num_facets_per_dof, &data, &cell_blocks, &offsets, entity = entities[i]](auto block) { const int dof = cell_blocks[block]; data[offsets[dof] + num_facets_per_dof[dof]++] = entity; }); } return dolfinx::graph::AdjacencyList(data, offsets); } } // namespace impl namespace dolfinx_mpc { /// Structure to hold data after mpi communication template struct recv_data { std::vector num_masters_per_slave; std::vector masters; std::vector owners; std::vector coeffs; }; template struct mpc_data { std::vector slaves; std::vector masters; std::vector coeffs; std::vector offsets; std::vector owners; }; template class MultiPointConstraint; /// Given a function space, compute its shared entities template dolfinx::graph::AdjacencyList compute_shared_indices(std::shared_ptr> V) { return V->dofmap()->index_map->index_to_dest_ranks(); } template dolfinx::la::petsc::Matrix create_matrix( const dolfinx::fem::Form& a, const std::shared_ptr> mpc0, const std::shared_ptr> mpc1, const std::string& type = std::string()) { dolfinx::common::Timer timer("~MPC: Create Matrix"); // Build sparsitypattern dolfinx::la::SparsityPattern pattern = create_sparsity_pattern(a, mpc0, mpc1); // Finalise communication dolfinx::common::Timer timer_s("~MPC: Assemble sparsity pattern"); pattern.finalize(); timer_s.stop(); // Initialize matrix dolfinx::la::petsc::Matrix A(a.mesh()->comm(), pattern, type); return A; } template dolfinx::la::petsc::Matrix create_matrix( const dolfinx::fem::Form& a, const std::shared_ptr> mpc, const std::string& type = std::string()) { return dolfinx_mpc::create_matrix(a, mpc, mpc, type); } /// Create neighborhood communicators from every processor with a slave dof on /// it, to the processors with a set of master facets. /// @param[in] comm The MPI communicator to base communications on /// @param[in] meshtags The meshtag /// @param[in] has_slaves Boolean saying if the processor owns slave dofs /// @param[in] master_marker Tag for the other interface std::array create_neighborhood_comms(MPI_Comm comm, const dolfinx::mesh::MeshTags& meshtags, const bool has_slave, std::int32_t& master_marker); /// Create neighbourhood communicators from a set of local indices to process /// who has these indices as ghosts. /// @param[in] local_dofs Vector of local blocks /// @param[in] ghost_dofs Vector of ghost blocks /// @param[in] index_map The index map relating procs and ghosts MPI_Comm create_owner_to_ghost_comm( std::vector& local_blocks, std::vector& ghost_blocks, std::shared_ptr index_map); /// Creates a normal approximation for the dofs in the closure of the attached /// facets, where the normal is an average if a dof belongs to multiple facets /// FIXME: Remove petsc dependency here template dolfinx::fem::Function create_normal_approximation(std::shared_ptr> V, std::int32_t dim, std::span entities) { dolfinx::graph::AdjacencyList block_to_entities = impl::create_block_to_facet_map(*V->mesh()->topology_mutable(), *V->dofmap(), dim, entities); // Create normal vector function and get local span dolfinx::fem::Function nh(V); Vec n_local; dolfinx::la::petsc::Vector n_vec( dolfinx::la::petsc::create_vector_wrap(*nh.x()), false); VecGhostGetLocalForm(n_vec.vec(), &n_local); PetscInt n = 0; VecGetSize(n_local, &n); PetscScalar* array = nullptr; VecGetArray(n_local, &array); std::span _n(array, n); const std::int32_t bs = V->dofmap()->index_map_bs(); std::array normal; std::array n_0; for (std::int32_t i = 0; i < block_to_entities.num_nodes(); i++) { auto ents = block_to_entities.links(i); if (ents.empty()) continue; // Sum all normal for entities std::vector normals = dolfinx::mesh::cell_normals(*V->mesh(), dim, ents); std::ranges::copy_n(normals.begin(), 3, n_0.begin()); std::ranges::copy_n(n_0.begin(), 3, normal.begin()); for (std::size_t j = 1; j < normals.size() / 3; ++j) { // Align direction of normal vectors n_0 and n_j U n_nj = std::transform_reduce( n_0.begin(), n_0.end(), std::next(normals.begin(), 3 * j), 0., std::plus{}, [](auto x, auto y) { return x * y; }); auto sign = n_nj / std::abs(n_nj); // NOTE: Could probably use std::transform for this operation for (std::size_t k = 0; k < 3; ++k) normal[k] += sign * normals[3 * j + k]; } std::ranges::copy_n(normal.begin(), bs, std::next(_n.begin(), i * bs)); } // Receive normals from other processes with dofs on the facets VecGhostUpdateBegin(n_vec.vec(), ADD_VALUES, SCATTER_REVERSE); VecGhostUpdateEnd(n_vec.vec(), ADD_VALUES, SCATTER_REVERSE); // Normalize nh auto imap = V->dofmap()->index_map; std::int32_t num_blocks = imap->size_local(); for (std::int32_t i = 0; i < num_blocks; i++) { PetscScalar acc = 0; for (std::int32_t j = 0; j < bs; j++) acc += _n[i * bs + j] * _n[i * bs + j]; if (U abs = std::abs(acc); abs > 1e-10) { for (std::int32_t j = 0; j < bs; j++) _n[i * bs + j] /= abs; } } VecGhostUpdateBegin(n_vec.vec(), INSERT_VALUES, SCATTER_FORWARD); VecGhostUpdateEnd(n_vec.vec(), INSERT_VALUES, SCATTER_FORWARD); return nh; } /// Append standard sparsity pattern for a given form to a pre-initialized /// pattern and a DofMap /// /// @note This function is almost a copy of /// dolfinx::fem::utils.h::create_sparsity_pattern /// @param[in] pattern The sparsity pattern /// @param[in] a The variational formulation template void build_standard_pattern(dolfinx::la::SparsityPattern& pattern, const dolfinx::fem::Form& a) { dolfinx::common::Timer timer("~MPC: Create sparsity pattern (Classic)"); if (a.rank() != 2) { throw std::runtime_error( "Cannot create sparsity pattern. Form is not a bilinear."); } // Get dof maps and mesh std::array, 2> dofmaps{ *a.function_spaces().at(0)->dofmap(), *a.function_spaces().at(1)->dofmap()}; std::shared_ptr mesh = a.mesh(); assert(mesh); std::shared_ptr mesh0 = a.function_spaces().at(0)->mesh(); assert(mesh0); std::shared_ptr mesh1 = a.function_spaces().at(1)->mesh(); assert(mesh1); const std::set types = a.integral_types(); if (types.find(dolfinx::fem::IntegralType::interior_facet) != types.end() or types.find(dolfinx::fem::IntegralType::exterior_facet) != types.end()) { // FIXME: cleanup these calls? Some of the happen internally again. int tdim = mesh->topology()->dim(); mesh->topology_mutable()->create_entities(tdim - 1); mesh->topology_mutable()->create_connectivity(tdim - 1, tdim); } auto extract_cells = [](std::span facets) { assert(facets.size() % 2 == 0); std::vector cells; cells.reserve(facets.size() / 2); for (std::size_t i = 0; i < facets.size(); i += 2) cells.push_back(facets[i]); return cells; }; for (auto type : types) { std::vector ids = a.integral_ids(type); switch (type) { case dolfinx::fem::IntegralType::cell: for (int id : ids) { dolfinx::fem::sparsitybuild::cells( pattern, {a.domain(type, id, *mesh0), a.domain(type, id, *mesh1)}, {{dofmaps[0], dofmaps[1]}}); } break; case dolfinx::fem::IntegralType::interior_facet: for (int id : ids) { dolfinx::fem::sparsitybuild::interior_facets( pattern, {extract_cells(a.domain(type, id, *mesh0)), extract_cells(a.domain(type, id, *mesh1))}, {{dofmaps[0], dofmaps[1]}}); } break; case dolfinx::fem::IntegralType::exterior_facet: for (int id : ids) { dolfinx::fem::sparsitybuild::cells( pattern, {extract_cells(a.domain(type, id, *mesh0)), extract_cells(a.domain(type, id, *mesh1))}, {{dofmaps[0], dofmaps[1]}}); } break; default: throw std::runtime_error("Unsupported integral type"); } } timer.stop(); } /// Create a map from dof blocks to one of the cells that contains the degree of /// freedom /// @param[in] topology The mesh topology /// @param[in] dofmap The dofmap /// @param[in] blocks The blocks (local to process) we want to map std::vector create_block_to_cell_map(const dolfinx::mesh::Topology& topology, const dolfinx::fem::DofMap& dofmap, std::span blocks); /// Create sparsity pattern with multi point constraint additions to the rows /// and the columns /// @param[in] a bi-linear form for the current variational problem /// (The one used to generate the standard sparsity-pattern) /// @param[in] mpc0 The multi point constraint to apply to the rows of the /// matrix. /// @param[in] mpc1 The multi point constraint to apply to the columns of the /// matrix. template dolfinx::la::SparsityPattern create_sparsity_pattern( const dolfinx::fem::Form& a, const std::shared_ptr> mpc0, const std::shared_ptr> mpc1) { { spdlog::info("Generating MPC sparsity pattern"); dolfinx::common::Timer timer("~MPC: Create sparsity pattern"); if (a.rank() != 2) { throw std::runtime_error( "Cannot create sparsity pattern. Form is not a bilinear form"); } // Extract function space and index map from mpc auto V0 = mpc0->function_space(); auto V1 = mpc1->function_space(); auto bs0 = V0->dofmap()->index_map_bs(); auto bs1 = V1->dofmap()->index_map_bs(); const auto& mesh = *(a.mesh()); std::array, 2> new_maps; new_maps[0] = V0->dofmap()->index_map; new_maps[1] = V1->dofmap()->index_map; std::array bs = {bs0, bs1}; dolfinx::la::SparsityPattern pattern(mesh.comm(), new_maps, bs); spdlog::debug("Build standard pattern"); /// Create and build sparsity pattern for original form. Should be /// equivalent to calling create_sparsity_pattern(Form a) build_standard_pattern(pattern, a); spdlog::debug("Build new pattern\n"); // Arrays replacing slave dof with master dof in sparsity pattern auto pattern_populator = [](dolfinx::la::SparsityPattern& pattern, const std::shared_ptr> mpc, const std::shared_ptr> mpc_off_axis, const auto& pattern_inserter, const auto& master_inserter) { const auto& V = mpc->function_space(); const auto& V_off_axis = mpc_off_axis->function_space(); // Data structures used for insert std::array master_block; std::array other_master_block; // Map from cell index (local to mpc) to slave indices in the cell const std::shared_ptr> cell_to_slaves = mpc->cell_to_slaves(); // For each cell (local to process) having a slave, get all slaves in main // constraint, and all dofs in off-axis constraint in the cell for (std::int32_t i = 0; i < cell_to_slaves->num_nodes(); ++i) { std::span slaves = cell_to_slaves->links(i); if (slaves.empty()) continue; std::span cell_dofs = V_off_axis->dofmap()->cell_dofs(i); // Arrays for flattened master slave data std::vector flattened_masters; flattened_masters.reserve(slaves.size()); // For each slave find all master degrees of freedom and flatten them for (auto slave : slaves) { for (auto master : mpc->masters()->links(slave)) { const std::div_t div = std::div(master, V->dofmap()->index_map_bs()); flattened_masters.push_back(div.quot); } } // Loop over all masters and insert all cell dofs for each master for (std::size_t j = 0; j < flattened_masters.size(); ++j) { master_block[0] = flattened_masters[j]; pattern_inserter(pattern, std::span(master_block), cell_dofs); // Add sparsity pattern for all master dofs of any slave on this cell for (std::size_t k = j + 1; k < flattened_masters.size(); ++k) { other_master_block[0] = flattened_masters[k]; master_inserter(pattern, std::span(other_master_block), std::span(master_block)); } } } }; if (mpc0 == mpc1) // TODO: should this be // mpc0.function_space().contains(mpc1.function_space()) ? { // Only need to loop through once const auto square_inserter = [](auto& pattern, const auto& dofs_m, const auto& dofs_s) { pattern.insert(dofs_m, dofs_s); pattern.insert(dofs_s, dofs_m); }; pattern_populator(pattern, mpc0, mpc1, square_inserter, square_inserter); } else { const auto do_nothing_inserter = []([[maybe_unused]] auto& pattern, [[maybe_unused]] const auto& dofs_m, [[maybe_unused]] const auto& dofs_s) {}; // Potentially rectangular pattern needs each axis inserted separately pattern_populator( pattern, mpc0, mpc1, [](auto& pattern, const auto& dofs_m, const auto& dofs_s) { pattern.insert(dofs_m, dofs_s); }, do_nothing_inserter); pattern_populator( pattern, mpc1, mpc0, [](auto& pattern, const auto& dofs_m, const auto& dofs_s) { pattern.insert(dofs_s, dofs_m); }, do_nothing_inserter); } return pattern; } } /// Compute the dot product u . vs /// @param u The first vector. It must has size 3. /// @param v The second vector. It must has size 3. /// @return The dot product `u . v`. The type will be the same as value size /// of u. template typename U::value_type dot(const U& u, const V& v) { assert(u.size() == 3); assert(v.size() == 3); return u[0] * v[0] + u[1] * v[1] + u[2] * v[2]; } /// Send masters, coefficients, owners and offsets to the process owning the /// slave degree of freedom. /// @param[in] master_to_slave MPI neighborhood communicator from processes with /// master dofs to those owning the slave dofs /// @param[in] num_remote_masters Number of masters that will be sent to each /// destination of the neighboorhod communicator /// @param[in] num_remote_slaves Number of slaves that will be sent to each /// destination of the neighborhood communicator /// @param[in] num_incoming_slaves Number of slaves that wil be received from /// each source of the neighboorhod communicator /// @param[in] num_masters_per_slave The number of masters for each slave that /// will be sent to the owning rank /// @param[in] masters The masters to send to the owning process /// @param[in] coeffs The corresponding coefficients to send to the owning /// process /// @param[in] owners The owning rank of each master /// @returns Data structure with the number of masters per slave, the master /// dofs (global indices), the coefficients and owners template recv_data send_master_data_to_owner( MPI_Comm& master_to_slave, std::vector& num_remote_masters, const std::vector& num_remote_slaves, const std::vector& num_incoming_slaves, const std::vector& num_masters_per_slave, const std::vector& masters, const std::vector& coeffs, const std::vector& owners) { int indegree(-1); int outdegree(-2); int weighted(-1); MPI_Dist_graph_neighbors_count(master_to_slave, &indegree, &outdegree, &weighted); // Communicate how many masters has been found on the other process std::vector num_recv_masters(indegree + 1); num_remote_masters.push_back(0); MPI_Request request_m; MPI_Ineighbor_alltoall( num_remote_masters.data(), 1, dolfinx::MPI::mpi_type(), num_recv_masters.data(), 1, dolfinx::MPI::mpi_type(), master_to_slave, &request_m); num_recv_masters.pop_back(); num_remote_masters.pop_back(); std::vector remote_slave_disp_out(outdegree + 1, 0); std::partial_sum(num_remote_slaves.begin(), num_remote_slaves.end(), remote_slave_disp_out.begin() + 1); // Send num masters per slave std::vector slave_disp_in(indegree + 1, 0); std::partial_sum(num_incoming_slaves.begin(), num_incoming_slaves.end(), slave_disp_in.begin() + 1); std::vector recv_num_masters_per_slave(slave_disp_in.back()); MPI_Neighbor_alltoallv( num_masters_per_slave.data(), num_remote_slaves.data(), remote_slave_disp_out.data(), dolfinx::MPI::mpi_type(), recv_num_masters_per_slave.data(), num_incoming_slaves.data(), slave_disp_in.data(), dolfinx::MPI::mpi_type(), master_to_slave); // Wait for number of remote masters to be received MPI_Status status_m; MPI_Wait(&request_m, &status_m); // Compute in/out displacements for masters/coeffs/owners std::vector master_recv_disp(indegree + 1, 0); std::partial_sum(num_recv_masters.begin(), num_recv_masters.end(), master_recv_disp.begin() + 1); std::vector master_send_disp(outdegree + 1, 0); std::partial_sum(num_remote_masters.begin(), num_remote_masters.end(), master_send_disp.begin() + 1); // Send masters/coeffs/owners to slave process std::vector recv_masters(master_recv_disp.back()); std::vector recv_owners(master_recv_disp.back()); std::vector recv_coeffs(master_recv_disp.back()); std::array data_status; std::array data_request; MPI_Ineighbor_alltoallv( masters.data(), num_remote_masters.data(), master_send_disp.data(), dolfinx::MPI::mpi_type(), recv_masters.data(), num_recv_masters.data(), master_recv_disp.data(), dolfinx::MPI::mpi_type(), master_to_slave, &data_request[0]); MPI_Ineighbor_alltoallv(coeffs.data(), num_remote_masters.data(), master_send_disp.data(), dolfinx::MPI::mpi_type(), recv_coeffs.data(), num_recv_masters.data(), master_recv_disp.data(), dolfinx::MPI::mpi_type(), master_to_slave, &data_request[1]); MPI_Ineighbor_alltoallv( owners.data(), num_remote_masters.data(), master_send_disp.data(), dolfinx::MPI::mpi_type(), recv_owners.data(), num_recv_masters.data(), master_recv_disp.data(), dolfinx::MPI::mpi_type(), master_to_slave, &data_request[2]); /// Wait for all communication to finish MPI_Waitall(3, data_request.data(), data_status.data()); dolfinx_mpc::recv_data output; output.masters = recv_masters; output.coeffs = recv_coeffs; output.num_masters_per_slave = recv_num_masters_per_slave; output.owners = recv_owners; return output; } /// Append received slaves to arrays holding slave, master, coeffs and /// num_masters_per_slave received from other processes /// @param[in] in_data Structure holding incoming masters, coeffs, owners and /// number of masters per slave /// @param[in] local_slaves The slave dofs (local to process), where the ith /// entry corresponds to the ith entry of the vectors in in-data. /// @note local_slaves can have duplicates /// @param[in] masters Array to append the masters to /// @param[in] coeffs Array to append owner ranks to /// @param[in] owners Array to append owner ranks to /// @param[in] num_masters_per_slave Array to append num masters per slave to /// @param[in] size_local The local size of the index map /// @param[in] bs The block size of the index map template void append_master_data(recv_data in_data, const std::vector& local_slaves, std::vector& slaves, std::vector& masters, std::vector& coeffs, std::vector& owners, std::vector& num_masters_per_slave, std::int32_t size_local, std::int32_t bs) { std::vector slave_found(size_local * bs, false); std::vector& m_per_slave = in_data.num_masters_per_slave; std::vector& inc_masters = in_data.masters; std::vector& inc_coeffs = in_data.coeffs; std::vector& inc_owners = in_data.owners; assert(m_per_slave.size() == local_slaves.size()); // Compute accumulated position std::vector disp_m(m_per_slave.size() + 1, 0); std::partial_sum(m_per_slave.begin(), m_per_slave.end(), disp_m.begin() + 1); // NOTE: outdegree is really in degree as we are using the reverse comm for (std::size_t i = 0; i < m_per_slave.size(); i++) { // Only add slave to list if it hasn't been found on another proc and the // number of incoming masters is nonzero if (auto dof = local_slaves[i]; !slave_found[dof] && m_per_slave[i] > 0) { slaves.push_back(dof); for (std::int32_t j = disp_m[i]; j < disp_m[i + 1]; j++) { masters.push_back(inc_masters[j]); owners.push_back(inc_owners[j]); coeffs.push_back(inc_coeffs[j]); } num_masters_per_slave.push_back(m_per_slave[i]); slave_found[dof] = true; } } // Check that all local blocks has found its master [[maybe_unused]] const auto num_found = std::accumulate(std::begin(slave_found), std::end(slave_found), 0.0); [[maybe_unused]] const std::size_t num_unique = std::set(local_slaves.begin(), local_slaves.end()).size(); assert(num_found == num_unique); } /// Distribute local slave->master data from owning process to ghost processes /// @param[in] slaves List of local slaves indices (local to process, unrolled) /// @param[in] masters The corresponding master dofs (global indices, unrolled) /// @param[in] coeffs The master coefficients /// @param[in] owners The owners of the corresponding master dof /// @param[in] num_masters_per_slave The number of masters owned by each slave /// @param[in] imap The index map /// @param[in] bs The index map block size /// @returns Data structure holding the received slave->master data template dolfinx_mpc::mpc_data distribute_ghost_data( std::span slaves, std::span masters, std::span coeffs, std::span owners, std::span num_masters_per_slave, const dolfinx::common::IndexMap& imap, const int bs) { std::shared_ptr slave_to_ghost; std::vector parent_to_sub; parent_to_sub.reserve(slaves.size()); std::vector slave_blocks; slave_blocks.reserve(slaves.size()); std::vector slave_rems; slave_rems.reserve(slaves.size()); // Create new index map for each slave block { // Fill in owned blocks std::vector blocks; blocks.reserve(slaves.size()); std::ranges::transform(slaves, std::back_inserter(blocks), [bs](auto& dof) { return dof / bs; }); // Propagate local slave information to ghost processes std::vector indicator(imap.size_local(), 0); std::ranges::for_each(blocks, [&indicator](auto& block) { indicator[block] = 1; }); dolfinx::common::Scatterer ghost_scatterer(imap, 1); std::vector recieve_indicator(imap.num_ghosts()); ghost_scatterer.scatter_fwd(std::span(indicator), std::span(recieve_indicator)); // Insert ghosts blocks with constraints into blocks for (std::size_t i = 0; i < imap.num_ghosts(); ++i) { if (recieve_indicator[i] == 1) { blocks.push_back(imap.size_local() + i); } } // Sort and delete duplicates std::ranges::sort(blocks); blocks.erase(std::unique(blocks.begin(), blocks.end()), blocks.end()); // Create submap std::pair> compressed_map = dolfinx::common::create_sub_index_map( imap, blocks, dolfinx::common::IndexMapOrder::any, false); slave_to_ghost = std::make_shared( std::move(compressed_map.first)); // Build map from new index map to slave indices (unrolled) for (std::size_t i = 0; i < slaves.size(); i++) { std::div_t div = std::div(slaves[i], bs); slave_blocks[i] = div.quot; slave_rems[i] = div.rem; auto it = std::ranges::find(blocks, div.quot); assert(it != blocks.end()); auto index = std::distance(blocks.begin(), it); parent_to_sub.push_back((int)index); } } // Get communicator for owner->ghost MPI_Comm local_to_ghost = create_owner_to_ghost_comm(*slave_to_ghost); std::span src_ranks_ghosts = slave_to_ghost->src(); std::span dest_ranks_ghosts = slave_to_ghost->dest(); // Compute number of outgoing slaves and masters for each process dolfinx::graph::AdjacencyList shared_indices = slave_to_ghost->index_to_dest_ranks(); const std::size_t num_inc_proc = src_ranks_ghosts.size(); const std::size_t num_out_proc = dest_ranks_ghosts.size(); std::vector out_num_slaves(num_out_proc + 1, 0); std::vector out_num_masters(num_out_proc + 1, 0); for (std::size_t i = 0; i < slaves.size(); ++i) { for (auto proc : shared_indices.links(parent_to_sub[i])) { // Find index of process in local MPI communicator auto it = std::ranges::find(dest_ranks_ghosts, proc); const auto index = std::distance(dest_ranks_ghosts.begin(), it); out_num_masters[index] += num_masters_per_slave[i]; out_num_slaves[index]++; } } // Communicate number of incoming slaves and masters std::vector in_num_slaves(num_inc_proc + 1); std::vector in_num_masters(num_inc_proc + 1); std::array requests; std::array states; MPI_Ineighbor_alltoall(out_num_slaves.data(), 1, MPI_INT, in_num_slaves.data(), 1, MPI_INT, local_to_ghost, &requests[0]); out_num_slaves.pop_back(); in_num_slaves.pop_back(); MPI_Ineighbor_alltoall(out_num_masters.data(), 1, MPI_INT, in_num_masters.data(), 1, MPI_INT, local_to_ghost, &requests[1]); out_num_masters.pop_back(); in_num_masters.pop_back(); // Compute out displacements for slaves and masters std::vector disp_out_masters(num_out_proc + 1, 0); std::partial_sum(out_num_masters.begin(), out_num_masters.end(), disp_out_masters.begin() + 1); std::vector disp_out_slaves(num_out_proc + 1, 0); std::partial_sum(out_num_slaves.begin(), out_num_slaves.end(), disp_out_slaves.begin() + 1); // Compute displacement of masters to able to insert them correctly std::vector local_offsets(slaves.size() + 1, 0); std::partial_sum(num_masters_per_slave.begin(), num_masters_per_slave.end(), local_offsets.begin() + 1); // Insertion counter std::vector insert_slaves(num_out_proc, 0); std::vector insert_masters(num_out_proc, 0); // Prepare arrays for sending ghost information std::vector masters_out(disp_out_masters.back()); std::vector coeffs_out(disp_out_masters.back()); std::vector owners_out(disp_out_masters.back()); std::vector slaves_out_loc(disp_out_slaves.back()); std::vector slaves_out(disp_out_slaves.back()); std::vector masters_per_slave(disp_out_slaves.back()); for (std::size_t i = 0; i < slaves.size(); ++i) { // Find ghost processes for the ith local slave const std::int32_t master_start = local_offsets[i]; const std::int32_t master_end = local_offsets[i + 1]; for (auto proc : shared_indices.links(parent_to_sub[i])) { // Find index of process in local MPI communicator auto it = std::ranges::find(dest_ranks_ghosts, proc); const auto index = std::distance(dest_ranks_ghosts.begin(), it); // Insert slave and num masters per slave slaves_out_loc[disp_out_slaves[index] + insert_slaves[index]] = slaves[i]; masters_per_slave[disp_out_slaves[index] + insert_slaves[index]] = num_masters_per_slave[i]; insert_slaves[index]++; // Insert global master dofs to send std::ranges::copy(masters.begin() + master_start, masters.begin() + master_end, masters_out.begin() + disp_out_masters[index] + insert_masters[index]); // Insert owners to send std::ranges::copy( owners.begin() + master_start, owners.begin() + master_end, owners_out.begin() + disp_out_masters[index] + insert_masters[index]); // Insert coeffs to send std::ranges::copy( coeffs.begin() + master_start, coeffs.begin() + master_end, coeffs_out.begin() + disp_out_masters[index] + insert_masters[index]); insert_masters[index] += num_masters_per_slave[i]; } } // Map slaves to global index { std::vector blocks(slaves_out_loc.size()); std::vector rems(slaves_out_loc.size()); for (std::size_t i = 0; i < blocks.size(); ++i) { std::div_t pos = std::div(slaves_out_loc[i], bs); blocks[i] = pos.quot; rems[i] = pos.rem; } imap.local_to_global(blocks, slaves_out); std::ranges::transform(slaves_out, rems, slaves_out.begin(), [bs](auto dof, auto rem) { return dof * bs + rem; }); } // Create in displacements for slaves MPI_Wait(&requests[0], &states[0]); std::vector disp_in_slaves(num_inc_proc + 1, 0); std::partial_sum(in_num_slaves.begin(), in_num_slaves.end(), disp_in_slaves.begin() + 1); // Create in displacements for masters MPI_Wait(&requests[1], &states[1]); std::vector disp_in_masters(num_inc_proc + 1, 0); std::partial_sum(in_num_masters.begin(), in_num_masters.end(), disp_in_masters.begin() + 1); // Send data to ghost processes std::vector ghost_requests(5); std::vector ghost_status(5); // Receive slaves from owner std::vector recv_slaves(disp_in_slaves.back()); MPI_Ineighbor_alltoallv( slaves_out.data(), out_num_slaves.data(), disp_out_slaves.data(), dolfinx::MPI::mpi_type(), recv_slaves.data(), in_num_slaves.data(), disp_in_slaves.data(), dolfinx::MPI::mpi_type(), local_to_ghost, &ghost_requests[0]); // Receive number of masters from owner std::vector recv_num(disp_in_slaves.back()); MPI_Ineighbor_alltoallv( masters_per_slave.data(), out_num_slaves.data(), disp_out_slaves.data(), dolfinx::MPI::mpi_type(), recv_num.data(), in_num_slaves.data(), disp_in_slaves.data(), dolfinx::MPI::mpi_type(), local_to_ghost, &ghost_requests[1]); // Convert slaves to local index MPI_Wait(&ghost_requests[0], &ghost_status[0]); std::vector recv_block; recv_block.reserve(recv_slaves.size()); std::vector recv_rem; recv_rem.reserve(recv_slaves.size()); std::ranges::for_each(recv_slaves, [bs, &recv_rem, &recv_block](const auto dof) { recv_rem.push_back(dof % bs); recv_block.push_back(dof / bs); }); std::vector recv_local(recv_slaves.size()); imap.global_to_local(recv_block, recv_local); for (std::size_t i = 0; i < recv_local.size(); i++) recv_local[i] = recv_local[i] * bs + recv_rem[i]; MPI_Wait(&ghost_requests[1], &ghost_status[1]); // Receive masters, coeffs and owners from owning processes std::vector recv_masters(disp_in_masters.back()); MPI_Ineighbor_alltoallv( masters_out.data(), out_num_masters.data(), disp_out_masters.data(), dolfinx::MPI::mpi_type(), recv_masters.data(), in_num_masters.data(), disp_in_masters.data(), dolfinx::MPI::mpi_type(), local_to_ghost, &ghost_requests[2]); std::vector recv_owners(disp_in_masters.back()); MPI_Ineighbor_alltoallv( owners_out.data(), out_num_masters.data(), disp_out_masters.data(), dolfinx::MPI::mpi_type(), recv_owners.data(), in_num_masters.data(), disp_in_masters.data(), dolfinx::MPI::mpi_type(), local_to_ghost, &ghost_requests[3]); std::vector recv_coeffs(disp_in_masters.back()); MPI_Ineighbor_alltoallv(coeffs_out.data(), out_num_masters.data(), disp_out_masters.data(), dolfinx::MPI::mpi_type(), recv_coeffs.data(), in_num_masters.data(), disp_in_masters.data(), dolfinx::MPI::mpi_type(), local_to_ghost, &ghost_requests[4]); mpc_data ghost_data; ghost_data.slaves = recv_local; ghost_data.offsets = recv_num; MPI_Wait(&ghost_requests[2], &ghost_status[2]); ghost_data.masters = recv_masters; MPI_Wait(&ghost_requests[3], &ghost_status[3]); ghost_data.owners = recv_owners; MPI_Wait(&ghost_requests[4], &ghost_status[4]); ghost_data.coeffs = recv_coeffs; return ghost_data; } //----------------------------------------------------------------------------- /// Get basis values (not unrolled for block size) for a set of points and /// corresponding cells. /// @param[in] V The function space /// @param[in] x The coordinates of the points. It has shape /// (num_points, 3), flattened row major /// @param[in] cells An array of cell indices. cells[i] is the index /// of the cell that contains the point x(i). Negative cell indices /// can be passed, and the corresponding point will be ignored. /// @param[in,out] u The values at the points. Values are not computed /// for points with a negative cell index. This argument must be /// passed with the correct size. /// @returns basis values (not unrolled for block size) for each point. shape /// (num_points, number_of_dofs, value_size). Flattened row major template std::pair, std::array> evaluate_basis_functions(const dolfinx::fem::FunctionSpace& V, std::span x, std::span cells) { assert(x.size() % 3 == 0); const std::size_t num_points = x.size() / 3; if (num_points != cells.size()) { throw std::runtime_error( "Number of points and number of cells must be equal."); } // Get mesh auto mesh = V.mesh(); assert(mesh); const std::size_t gdim = mesh->geometry().dim(); const std::size_t tdim = mesh->topology()->dim(); auto map = mesh->topology()->index_map(tdim); // Get geometry data MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const std::int32_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> x_dofmap = mesh->geometry().dofmap(); const dolfinx::fem::CoordinateElement& cmap = mesh->geometry().cmap(); const std::size_t num_dofs_g = cmap.dim(); std::span x_g = mesh->geometry().x(); // Get element auto element = V.element(); assert(element); const int bs_element = element->block_size(); const std::size_t reference_value_size = element->reference_value_size() / bs_element; // If the space has sub elements, concatenate the evaluations on the // sub elements const int num_sub_elements = element->num_sub_elements(); if (num_sub_elements > 1 and num_sub_elements != bs_element) { throw std::runtime_error( "Evaluation of basis functions is not supported for mixed " "elements. Extract subspaces."); } // Return early if we have no points std::array basis_shape = element->basix_element().tabulate_shape(0, num_points); assert(basis_shape[2] == std::size_t(element->space_dimension() / bs_element)); assert(basis_shape[3] == std::size_t(V.value_size() / bs_element)); std::array reference_shape = {basis_shape[1], basis_shape[2], basis_shape[3]}; std::vector output_basis(std::reduce( reference_shape.begin(), reference_shape.end(), 1, std::multiplies{})); if (num_points == 0) return {output_basis, reference_shape}; std::span cell_info; if (element->needs_dof_transformations()) { mesh->topology_mutable()->create_entity_permutations(); cell_info = std::span(mesh->topology()->get_cell_permutation_info()); } using cmdspan4_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; using mdspan2_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; using mdspan3_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; // Create buffer for coordinate dofs and point in physical space std::vector coord_dofs_b(num_dofs_g * gdim); mdspan2_t coord_dofs(coord_dofs_b.data(), num_dofs_g, gdim); std::vector xp_b(1 * gdim); mdspan2_t xp(xp_b.data(), 1, gdim); // Evaluate geometry basis at point (0, 0, 0) on the reference cell. // Used in affine case. std::array phi0_shape = cmap.tabulate_shape(1, 1); std::vector phi0_b( std::reduce(phi0_shape.begin(), phi0_shape.end(), 1, std::multiplies{})); cmdspan4_t phi0(phi0_b.data(), phi0_shape); cmap.tabulate(1, std::vector(tdim, 0), {1, tdim}, phi0_b); auto dphi0 = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( phi0, std::pair(1, tdim + 1), 0, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent, 0); // Data structure for evaluating geometry basis at specific points. // Used in non-affine case. std::array phi_shape = cmap.tabulate_shape(1, 1); std::vector phi_b( std::reduce(phi_shape.begin(), phi_shape.end(), 1, std::multiplies{})); cmdspan4_t phi(phi_b.data(), phi_shape); auto dphi = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( phi, std::pair(1, tdim + 1), 0, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent, 0); // Reference coordinates for each point std::vector Xb(num_points * tdim); mdspan2_t X(Xb.data(), num_points, tdim); // Geometry data at each point std::vector J_b(num_points * gdim * tdim); mdspan3_t J(J_b.data(), num_points, gdim, tdim); std::vector K_b(num_points * tdim * gdim); mdspan3_t K(K_b.data(), num_points, tdim, gdim); std::vector detJ(num_points); std::vector det_scratch(2 * gdim * tdim); // Prepare geometry data in each cell for (std::size_t p = 0; p < cells.size(); ++p) { const int cell_index = cells[p]; // Skip negative cell indices if (cell_index < 0) continue; // Get cell geometry (coordinate dofs) auto x_dofs = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( x_dofmap, cell_index, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); for (std::size_t i = 0; i < num_dofs_g; ++i) { const int pos = 3 * x_dofs[i]; for (std::size_t j = 0; j < gdim; ++j) coord_dofs(i, j) = x_g[pos + j]; } for (std::size_t j = 0; j < gdim; ++j) xp(0, j) = x[3 * p + j]; auto _J = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( J, p, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); auto _K = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( K, p, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); std::array Xpb = {0, 0, 0}; MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, 1, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent>> Xp(Xpb.data(), 1, tdim); // Compute reference coordinates X, and J, detJ and K if (cmap.is_affine()) { dolfinx::fem::CoordinateElement::compute_jacobian(dphi0, coord_dofs, _J); dolfinx::fem::CoordinateElement::compute_jacobian_inverse(_J, _K); std::array x0 = {0, 0, 0}; for (std::size_t i = 0; i < coord_dofs.extent(1); ++i) x0[i] += coord_dofs(0, i); dolfinx::fem::CoordinateElement::pull_back_affine(Xp, _K, x0, xp); detJ[p] = dolfinx::fem::CoordinateElement::compute_jacobian_determinant( _J, det_scratch); } else { // Pull-back physical point xp to reference coordinate Xp cmap.pull_back_nonaffine(Xp, xp, coord_dofs, 5000 * std::numeric_limits::epsilon(), 15); cmap.tabulate(1, std::span(Xpb.data(), tdim), {1, tdim}, phi_b); dolfinx::fem::CoordinateElement::compute_jacobian(dphi, coord_dofs, _J); dolfinx::fem::CoordinateElement::compute_jacobian_inverse(_J, _K); detJ[p] = dolfinx::fem::CoordinateElement::compute_jacobian_determinant( _J, det_scratch); } for (std::size_t j = 0; j < X.extent(1); ++j) X(p, j) = Xpb[j]; } // Compute basis on reference element std::vector reference_basisb(std::reduce( basis_shape.begin(), basis_shape.end(), 1, std::multiplies{})); element->tabulate(reference_basisb, Xb, {X.extent(0), X.extent(1)}, 0); // Data structure to hold basis for transformation const std::size_t num_basis_values = basis_shape[2] * basis_shape[3]; std::vector basis_valuesb(num_basis_values); mdspan2_t basis_values(basis_valuesb.data(), basis_shape[2], basis_shape[3]); using xu_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; using xU_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; using xJ_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; using xK_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; auto push_forward_fn = element->basix_element().template map_fn(); auto apply_dof_transformation = element->template dof_transformation_fn( dolfinx::fem::doftransform::standard); mdspan3_t full_basis(output_basis.data(), reference_shape); for (std::size_t p = 0; p < cells.size(); ++p) { const int cell_index = cells[p]; // Skip negative cell indices if (cell_index < 0) continue; // Permute the reference values to account for the cell's orientation std::ranges::copy_n( std::next(reference_basisb.begin(), num_basis_values * p), num_basis_values, basis_valuesb.begin()); apply_dof_transformation(basis_valuesb, cell_info, cell_index, (int)reference_value_size); auto _U = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( full_basis, p, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); auto _J = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( J, p, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); auto _K = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( K, p, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); push_forward_fn(_U, basis_values, _J, detJ[p], _K); } return {output_basis, reference_shape}; } //----------------------------------------------------------------------------- /// Tabuilate dof coordinates (not unrolled for block size) for a set of points /// and corresponding cells. /// @param[in] V The function space /// @param[in] dofs Array of dofs (not unrolled with block size) /// @param[in] cells An array of cell indices. cells[i] is the index /// of a cell that contains dofs[i] /// @param[in] transposed If true return coordiantes in xxyyzz format. Else /// xyzxzyxzy /// @returns The dof coordinates flattened in the appropriate format template std::pair, std::array> tabulate_dof_coordinates( const dolfinx::fem::FunctionSpace& V, std::span dofs, std::span cells, bool transposed = false) { if (!V.component().empty()) { throw std::runtime_error("Cannot tabulate coordinates for a " "FunctionSpace that is a subspace."); } auto element = V.element(); assert(element); if (V.element()->is_mixed()) { throw std::runtime_error( "Cannot tabulate coordinates for a mixed FunctionSpace."); } auto mesh = V.mesh(); assert(mesh); const std::size_t gdim = mesh->geometry().dim(); // Get dofmap local size auto dofmap = V.dofmap(); assert(dofmap); std::shared_ptr index_map = V.dofmap()->index_map; assert(index_map); const int element_block_size = element->block_size(); const std::size_t space_dimension = element->space_dimension() / element_block_size; // Get the dof coordinates on the reference element if (!element->interpolation_ident()) { throw std::runtime_error("Cannot evaluate dof coordinates - this element " "does not have pointwise evaluation."); } auto [X_b, X_shape] = element->interpolation_points(); // Get coordinate map const dolfinx::fem::CoordinateElement& cmap = mesh->geometry().cmap(); // Prepare cell geometry MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const std::int32_t, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> x_dofmap = mesh->geometry().dofmap(); std::span x_g = mesh->geometry().x(); const std::size_t num_dofs_g = x_dofmap.extent(1); // Array to hold coordinates to return std::array coord_shape = {dofs.size(), 3}; if (transposed) coord_shape = {3, dofs.size()}; std::vector coordsb(std::reduce(coord_shape.cbegin(), coord_shape.cend(), 1, std::multiplies{})); using mdspan2_t = MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents>; // Loop over cells and tabulate dofs assert(space_dimension == X_shape[0]); std::vector xb(space_dimension * gdim); mdspan2_t x(xb.data(), space_dimension, gdim); // Create buffer for coordinate dofs and point in physical space std::vector coordinate_dofs_b(num_dofs_g * gdim); mdspan2_t coordinate_dofs(coordinate_dofs_b.data(), num_dofs_g, gdim); std::span cell_info; if (element->needs_dof_transformations()) { mesh->topology_mutable()->create_entity_permutations(); cell_info = std::span(mesh->topology()->get_cell_permutation_info()); } const auto apply_dof_transformation = element->template dof_transformation_fn( dolfinx::fem::doftransform::standard); const std::array bsize = cmap.tabulate_shape(0, X_shape[0]); std::vector phi_b( std::reduce(bsize.begin(), bsize.end(), 1, std::multiplies{})); cmap.tabulate(0, X_b, X_shape, phi_b); MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::dextents> phi_full(phi_b.data(), bsize); auto phi = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( phi_full, 0, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent, MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent, 0); // Create insertion function std::function inserter; if (transposed) { inserter = [&coordsb, &xb, gdim, coord_shape](std::size_t c, std::size_t j, std::ptrdiff_t loc) { coordsb[j * coord_shape[1] + c] = xb[loc * gdim + j]; }; } else { inserter = [&coordsb, &xb, gdim, coord_shape](std::size_t c, std::size_t j, std::ptrdiff_t loc) { coordsb[c * coord_shape[1] + j] = xb[loc * gdim + j]; }; } for (std::size_t c = 0; c < cells.size(); ++c) { // Fetch the coordinates of the cell auto x_dofs = MDSPAN_IMPL_STANDARD_NAMESPACE::submdspan( x_dofmap, cells[c], MDSPAN_IMPL_STANDARD_NAMESPACE::full_extent); for (std::size_t i = 0; i < num_dofs_g; ++i) { const int pos = 3 * x_dofs[i]; for (std::size_t j = 0; j < gdim; ++j) coordinate_dofs(i, j) = x_g[pos + j]; } // Tabulate dof coordinates on cell dolfinx::fem::CoordinateElement::push_forward(x, coordinate_dofs, phi); apply_dof_transformation(std::span(xb.data(), x.size()), std::span(cell_info.data(), cell_info.size()), (std::int32_t)c, (int)gdim); // Get cell dofmap auto cell_dofs = dofmap->cell_dofs(cells[c]); auto it = std::ranges::find(cell_dofs, dofs[c]); auto loc = std::distance(cell_dofs.begin(), it); // Copy dof coordinates into vector for (std::size_t j = 0; j < gdim; ++j) inserter(c, j, loc); } return {coordsb, coord_shape}; } /// From a Mesh, find which cells collide with a set of points. /// @note Uses the GJK algorithm, see dolfinx::geometry::compute_distance_gjk /// for details /// @param[in] mesh The mesh /// @param[in] candidate_cells List of candidate colliding cells for the /// ith point in `points` /// @param[in] points The points to check for collision, shape=(num_points, 3). /// Flattened row major. /// @param[in] eps2 The tolerance for the squared distance to be considered a /// collision /// @return Adjacency list where the ith node is the closest entity whose /// squared distance is within eps2 /// @note There may be nodes with no entries in the adjacency list template dolfinx::graph::AdjacencyList compute_colliding_cells( const dolfinx::mesh::Mesh& mesh, const dolfinx::graph::AdjacencyList& candidate_cells, std::span points, const U eps2) { std::vector offsets = {0}; offsets.reserve(candidate_cells.num_nodes() + 1); std::vector colliding_cells; const int tdim = mesh.topology()->dim(); std::vector result; for (std::int32_t i = 0; i < candidate_cells.num_nodes(); i++) { auto cells = candidate_cells.links(i); if (cells.empty()) { offsets.push_back((std::int32_t)colliding_cells.size()); continue; } // Create span of std::vector distances_sq(cells.size()); for (std::size_t j = 0; j < cells.size(); j++) { distances_sq[j] = dolfinx::geometry::squared_distance(mesh, tdim, cells.subspan(j, 1), points.subspan(3 * i, 3)) .front(); } // Only push back closest cell if (auto cell_idx = std::ranges::min_element(distances_sq); *cell_idx < eps2) { auto pos = std::distance(distances_sq.begin(), cell_idx); colliding_cells.push_back(cells[pos]); } offsets.push_back((std::int32_t)colliding_cells.size()); } return dolfinx::graph::AdjacencyList(std::move(colliding_cells), std::move(offsets)); } /// Given a mesh and corresponding bounding box tree and a set of points,check /// which cells (local to process) collide with each point. /// Return an array of the same size as the number of points, where the ith /// entry corresponds to the first cell colliding with the ith point. /// @note If no colliding point is found, the index -1 is returned. /// @param[in] mesh The mesh /// @param[in] tree The boundingbox tree of all cells (local to process) in /// the mesh /// @param[in] points The points to check collision with, shape (num_points, /// 3). Flattened row major. /// @param[in] eps2 The tolerance for the squared distance to be considered a /// collision template std::vector find_local_collisions(const dolfinx::mesh::Mesh& mesh, const dolfinx::geometry::BoundingBoxTree& tree, std::span points, const U eps2) { assert(points.size() % 3 == 0); // Compute collisions for each point with BoundingBoxTree dolfinx::graph::AdjacencyList bbox_collisions = dolfinx::geometry::compute_collisions(tree, points); // Compute exact collision auto cell_collisions = dolfinx_mpc::compute_colliding_cells( mesh, bbox_collisions, points, eps2); // Extract first collision std::vector collisions(points.size() / 3, -1); for (int i = 0; i < cell_collisions.num_nodes(); i++) { auto local_cells = cell_collisions.links(i); if (!local_cells.empty()) collisions[i] = local_cells[0]; } return collisions; } /// Given an input array of dofs from a function space, return an array with /// true/false if the degree of freedom is in a DirichletBC /// @param[in] V The function space /// @param[in] blocks The degrees of freedom (not unrolled for dofmap block /// size) /// @param[in] bcs List of Dirichlet BCs on V template std::vector is_bc( const dolfinx::fem::FunctionSpace& V, std::span blocks, const std::vector>>& bcs) { auto dofmap = V.dofmap(); assert(dofmap); auto imap = dofmap->index_map; assert(imap); const int bs = dofmap->index_map_bs(); std::int32_t dim = bs * (imap->size_local() + imap->num_ghosts()); std::vector dof_marker(dim, false); std::ranges::for_each(bcs, [&dof_marker, &V](auto bc) { assert(bc); assert(bc->function_space()); if (bc->function_space()->contains(V)) bc->mark_dofs(dof_marker); }); // Remove slave blocks contained in DirichletBC std::vector bc_marker(blocks.size(), 0); const int dofmap_bs = dofmap->bs(); for (std::size_t i = 0; i < blocks.size(); i++) { auto& block = blocks[i]; for (int j = 0; j < dofmap_bs; j++) { if (dof_marker[block * dofmap_bs + j]) { bc_marker[i] = 1; break; } } } return bc_marker; } } // namespace dolfinx_mpc dolfinx_mpc-0.9.1/docker/000077500000000000000000000000001476141270300152775ustar00rootroot00000000000000dolfinx_mpc-0.9.1/docker/Dockerfile000066400000000000000000000032561476141270300172770ustar00rootroot00000000000000FROM ghcr.io/fenics/dolfinx/dolfinx:stable WORKDIR /tmp # This argument should be the same as what-ever the python version of the dol ARG PYTHON_VERSION=3.12 # Set env variables ENV HDF5_MPI="ON" \ HDF5_DIR="/usr/local" RUN python3 -m pip install -U pip setuptools # Install h5py https://github.com/h5py/h5py/issues/2222 RUN python3 -m pip install --no-cache-dir --no-binary=h5py git+https://github.com/h5py/h5py.git RUN python3 -m pip install meshio # Copy DOLFINX_MPC source dir COPY . dolfinx_mpc RUN python3 -m pip install -U pip setuptools # Install real mode RUN . /usr/local/bin/dolfinx-real-mode && \ . /usr/local/dolfinx-real/lib/dolfinx/dolfinx.conf && \ cmake -G Ninja -DCMAKE_INSTALL_PREFIX=/usr/local/dolfinx-real -DCMAKE_BUILD_TYPE=Developer -B build-dir-real dolfinx_mpc/cpp/ && \ ninja install -j4 -C build-dir-real && \ python3 -m pip install -v --no-build-isolation --check-build-dependencies \ --target /usr/local/dolfinx-real/lib/python${PYTHON_VERSION}/dist-packages --no-dependencies --no-cache-dir ./dolfinx_mpc/python # Clean repo to remove build dir from pip RUN rm -rf dolfinx_mpc/python/build # Install complex mode RUN . /usr/local/bin/dolfinx-complex-mode && \ . /usr/local/dolfinx-complex/lib/dolfinx/dolfinx.conf && \ cmake -G Ninja -DCMAKE_INSTALL_PREFIX=/usr/local/dolfinx-complex -DCMAKE_BUILD_TYPE=Developer -B build-dir-complex dolfinx_mpc/cpp/ && \ ninja install -j4 -C build-dir-complex && \ python3 -m pip install -v --no-build-isolation --check-build-dependencies \ --target /usr/local/dolfinx-complex/lib/python${PYTHON_VERSION}/dist-packages --no-dependencies --no-cache-dir ./dolfinx_mpc/python WORKDIR /root dolfinx_mpc-0.9.1/docs/000077500000000000000000000000001476141270300147605ustar00rootroot00000000000000dolfinx_mpc-0.9.1/docs/api.rst000066400000000000000000000001041476141270300162560ustar00rootroot00000000000000dolfinx_mpc =========== .. automodule:: dolfinx_mpc :members: dolfinx_mpc-0.9.1/docs/numba.rst000066400000000000000000000001261476141270300166130ustar00rootroot00000000000000dolfinx_mpc.numba ================= .. automodule:: dolfinx_mpc.numba :members: dolfinx_mpc-0.9.1/docs/utils.rst000066400000000000000000000001261476141270300166510ustar00rootroot00000000000000dolfinx_mpc.utils ================= .. automodule:: dolfinx_mpc.utils :members: dolfinx_mpc-0.9.1/index.md000066400000000000000000000001771476141270300154660ustar00rootroot00000000000000# DOLFINx-MPC Author: Jørgen S. Dokken Welcome to the DOLFINx MPC documentation. See the following pages for the Python API.dolfinx_mpc-0.9.1/python/000077500000000000000000000000001476141270300153515ustar00rootroot00000000000000dolfinx_mpc-0.9.1/python/CMakeLists.txt000066400000000000000000000047061476141270300201200ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.21) PROJECT(dolfinx_mpc_nanobind) find_package(Python COMPONENTS Interpreter Development REQUIRED) # Detect the installed nanobind package and import it into CMake execute_process( COMMAND "${Python_EXECUTABLE}" -m nanobind --cmake_dir OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE NB_DIR) list(APPEND CMAKE_PREFIX_PATH "${NB_DIR}") find_package(nanobind CONFIG REQUIRED) execute_process( COMMAND ${Python3_EXECUTABLE} -c "import os, sys, basix; sys.stdout.write(os.path.dirname(basix.__file__))" OUTPUT_VARIABLE BASIX_PY_DIR RESULT_VARIABLE BASIX_PY_COMMAND_RESULT ERROR_VARIABLE BASIX_ERROR_OUT OUTPUT_STRIP_TRAILING_WHITESPACE ) find_package(Basix REQUIRED CONFIG HINTS ${BASIX_PY_DIR}) if (Basix_FOUND) message(STATUS "Found Basix at ${Basix_DIR}") endif() find_package(DOLFINX REQUIRED CONFIG) if (DOLFINX_FOUND) message(STATUS "Found DOLFINx at ${DOLFINX_DIR}") endif() # Add DOLFINx_mpc libraries find_package(DOLFINX_MPC REQUIRED CONFIG) if (DOLFINX_MPC_FOUND) message(STATUS "Found DOLFINx_MPC at ${DOLFINX_MPC_DIR}") endif() # Create the binding library nanobind handles its own calls to # target_link_libraries nanobind_add_module( cpp NOMINSIZE MODULE dolfinx_mpc/dolfinx_mpc.cpp dolfinx_mpc/mpc.cpp ) target_compile_definitions(cpp PRIVATE cxx_std_20) target_link_libraries(cpp PRIVATE dolfinx_mpc) # Check for petsc4py execute_process( COMMAND ${Python_EXECUTABLE} -c "import petsc4py; print(petsc4py.get_include())" OUTPUT_VARIABLE PETSC4PY_INCLUDE_DIR RESULT_VARIABLE PETSC4PY_COMMAND_RESULT ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE ) if(NOT PETSC4PY_COMMAND_RESULT) message(STATUS "Found petsc4py include directory at ${PETSC4PY_INCLUDE_DIR}") target_include_directories(cpp PRIVATE ${PETSC4PY_INCLUDE_DIR}) target_compile_definitions(cpp PRIVATE HAS_PETSC4PY) else() message(FATAL_ERROR "petsc4py not found.") endif() # Get python include-dirs execute_process( COMMAND ${Python3_EXECUTABLE} -c "import dolfinx.wrappers, sys; sys.stdout.write(str(dolfinx.wrappers.get_include_path()))" OUTPUT_VARIABLE DOLFINX_PY_DIR RESULT_VARIABLE DOLFINX_PY_COMMAND_RESULT OUTPUT_STRIP_TRAILING_WHITESPACE) if (DOLFINX_PY_DIR) message(STATUS "Adding ${DOLFINX_PY_DIR} to include directories") target_include_directories(cpp PRIVATE ${DOLFINX_PY_DIR}) endif() set_target_properties(cpp PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE) install(TARGETS cpp DESTINATION dolfinx_mpc) dolfinx_mpc-0.9.1/python/README.md000066400000000000000000000002451476141270300166310ustar00rootroot00000000000000# Python interface for DolfinX-MPC Can be installed with ```bash python3 -m pip -v install --config-settings=cmake.build-type="Release" --no-build-isolation . ```dolfinx_mpc-0.9.1/python/benchmarks/000077500000000000000000000000001476141270300174665ustar00rootroot00000000000000dolfinx_mpc-0.9.1/python/benchmarks/Makefile000066400000000000000000000006301476141270300211250ustar00rootroot00000000000000periodic: mpirun -n 23 python3 ref_periodic.py --nref 6 --tet --boomeramg --degree 2 mpirun -n 23 python3 bench_periodic.py --nref 6 --tet --boomeramg --degree 2 python3 visualize_iterations.py --periodic elasticity: mpirun -n 23 python3 ref_elasticity.py --nref 6 --gamg --xdmf mpirun -n 23 python3 bench_elasticity_edge.py --nref 6 --gamg --xdmf --info python3 visualize_iterations.py --elasticity dolfinx_mpc-0.9.1/python/benchmarks/bench_contact_3D.py000066400000000000000000000354751476141270300231760ustar00rootroot00000000000000# Copyright (C) 2020-2021 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT # # Multi point constraint problem for linear elasticity with slip conditions # between two cubes. from __future__ import annotations import warnings from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from pathlib import Path from mpi4py import MPI from petsc4py import PETSc import basix.ufl import numpy as np from basix.ufl import element from dolfinx import default_real_type, default_scalar_type from dolfinx.common import Timer, TimingType, list_timings, timing from dolfinx.cpp.mesh import entities_to_geometry from dolfinx.fem import ( Constant, Function, dirichletbc, form, functionspace, locate_dofs_topological, set_bc, ) from dolfinx.io import XDMFFile from dolfinx.mesh import ( CellType, compute_midpoints, create_mesh, create_unit_cube, locate_entities_boundary, meshtags, refine, ) from ufl import Identity, Mesh, TestFunction, TrialFunction, dx, grad, inner, sym, tr from dolfinx_mpc import MultiPointConstraint, apply_lifting, assemble_matrix, assemble_vector from dolfinx_mpc.utils import ( create_normal_approximation, log_info, rigid_motions_nullspace, rotation_matrix, ) comm = MPI.COMM_WORLD if default_real_type == np.float32: warnings.warn( "Demo not supported in single precision as reading from XDMF only support" + "double precision meshes" ) exit(0) def mesh_3D_dolfin(theta=0, ct=CellType.tetrahedron, ext="tetrahedron", num_refinements=0, N0=5): timer = Timer("Create mesh") def find_plane_function(p0, p1, p2): """ Find plane function given three points: http://www.nabla.hr/CG-LinesPlanesIn3DA3.htm """ v1 = np.array(p1) - np.array(p0) v2 = np.array(p2) - np.array(p0) n = np.cross(v1, v2) D = -(n[0] * p0[0] + n[1] * p0[1] + n[2] * p0[2]) return lambda x: np.isclose(0, np.dot(n, x) + D) def over_plane(p0, p1, p2): """ Returns function that checks if a point is over a plane defined by the points p0, p1 and p2. """ v1 = np.array(p1) - np.array(p0) v2 = np.array(p2) - np.array(p0) n = np.cross(v1, v2) D = -(n[0] * p0[0] + n[1] * p0[1] + n[2] * p0[2]) return lambda x: n[0] * x[0] + n[1] * x[1] + D > -n[2] * x[2] tmp_mesh_name = Path("tmp_mesh.xdmf").absolute() tmp_mesh_name.parent.mkdir(exist_ok=True) r_matrix = rotation_matrix([1 / np.sqrt(2), 1 / np.sqrt(2), 0], -theta) if MPI.COMM_WORLD.rank == 0: # Create two coarse meshes and merge them mesh0 = create_unit_cube(MPI.COMM_SELF, N0, N0, N0, ct) mesh0.geometry.x[:, 2] += 1 mesh1 = create_unit_cube(MPI.COMM_SELF, 2 * N0, 2 * N0, 2 * N0, ct) tdim0 = mesh0.topology.dim num_cells0 = mesh0.topology.index_map(tdim0).size_local mesh0.topology.create_connectivity(tdim0, tdim0) cells0 = entities_to_geometry(mesh0._cpp_object, tdim0, np.arange(num_cells0, dtype=np.int32), False) tdim1 = mesh1.topology.dim num_cells1 = mesh1.topology.index_map(tdim1).size_local mesh1.topology.create_connectivity(tdim1, tdim1) cells1 = entities_to_geometry(mesh1._cpp_object, tdim1, np.arange(num_cells1, dtype=np.int32), False) cells1 += mesh0.geometry.x.shape[0] # Concatenate points and cells points = np.vstack([mesh0.geometry.x, mesh1.geometry.x]) cells = np.vstack([cells0, cells1]) domain = Mesh(element("Lagrange", ct.name, 1, shape=(points.shape[1],))) # Rotate mesh points = np.dot(r_matrix, points.T).T.astype(default_real_type) mesh = create_mesh(MPI.COMM_SELF, cells, points, domain) with XDMFFile(MPI.COMM_SELF, tmp_mesh_name, "w") as xdmf: xdmf.write_mesh(mesh) MPI.COMM_WORLD.barrier() with XDMFFile(MPI.COMM_WORLD, tmp_mesh_name, "r") as xdmf: mesh = xdmf.read_mesh() # Refine coarse mesh for i in range(num_refinements): mesh.topology.create_entities(mesh.topology.dim - 2) mesh = refine(mesh, redistribute=True) tdim = mesh.topology.dim fdim = tdim - 1 # Find information about facets to be used in meshtags bottom_points = np.dot(r_matrix, np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]).T) bottom = find_plane_function(bottom_points[:, 0], bottom_points[:, 1], bottom_points[:, 2]) bottom_facets = locate_entities_boundary(mesh, fdim, bottom) top_points = np.dot(r_matrix, np.array([[0, 0, 2], [1, 0, 2], [0, 1, 2], [1, 1, 2]]).T) top = find_plane_function(top_points[:, 0], top_points[:, 1], top_points[:, 2]) top_facets = locate_entities_boundary(mesh, fdim, top) # Determine interface facets if_points = np.dot(r_matrix, np.array([[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]).T) interface = find_plane_function(if_points[:, 0], if_points[:, 1], if_points[:, 2]) i_facets = locate_entities_boundary(mesh, fdim, interface) mesh.topology.create_connectivity(fdim, tdim) top_interface = [] bottom_interface = [] facet_to_cell = mesh.topology.connectivity(fdim, tdim) num_cells = mesh.topology.index_map(tdim).size_local # Find top and bottom interface facets cells = np.arange(num_cells, dtype=np.int32) mesh.topology.create_connectivity(tdim, tdim) cell_midpoints = compute_midpoints(mesh, tdim, cells) top_cube = over_plane(if_points[:, 0], if_points[:, 1], if_points[:, 2]) for facet in i_facets: i_cells = facet_to_cell.links(facet) assert len(i_cells == 1) i_cell = i_cells[0] if top_cube(cell_midpoints[i_cell]): top_interface.append(facet) else: bottom_interface.append(facet) # Create cell tags top_cube_marker = 2 indices = [] values = [] for cell_index in range(num_cells): if top_cube(cell_midpoints[cell_index]): indices.append(cell_index) values.append(top_cube_marker) ct = meshtags(mesh, tdim, np.array(indices, dtype=np.intc), np.array(values, dtype=np.intc)) # Create meshtags for facet data markers = { 3: top_facets, 4: bottom_interface, 9: top_interface, 5: bottom_facets, } # , 6: left_facets, 7: right_facets} indices = np.array([], dtype=np.intc) values = np.array([], dtype=np.intc) for key in markers.keys(): indices = np.append(indices, markers[key]) values = np.append(values, np.full(len(markers[key]), key, dtype=np.intc)) sorted_indices = np.argsort(indices) mt = meshtags(mesh, fdim, indices[sorted_indices], values[sorted_indices]) mt.name = "facet_tags" mesh_dir = Path("meshes").absolute() mesh_dir.mkdir(exist_ok=True) fname = mesh_dir / f"mesh_{ext}_{theta:.2f}.xdmf" with XDMFFile(mesh.comm, fname, "w") as o_f: o_f.write_mesh(mesh) o_f.write_meshtags(ct, x=mesh.geometry) o_f.write_meshtags(mt, x=mesh.geometry) timer.stop() def demo_stacked_cubes(theta, ct, noslip, num_refinements, N0, timings=False): celltype = "hexahedron" if ct == CellType.hexahedron else "tetrahedron" type_ext = "no_slip" if noslip else "slip" log_info(f"Run theta: {theta:.2f}, Cell: {celltype:s}, Noslip: {noslip:b}") # Read in mesh mesh_3D_dolfin(theta=theta, ct=ct, ext=celltype, num_refinements=num_refinements, N0=N0) comm.barrier() mesh_dir = Path("meshes").absolute() with XDMFFile(comm, mesh_dir / f"mesh_{celltype}_{theta:.2f}.xdmf", "r") as xdmf: mesh = xdmf.read_mesh(name="mesh") tdim = mesh.topology.dim fdim = tdim - 1 mesh.topology.create_connectivity(tdim, tdim) mesh.topology.create_connectivity(fdim, tdim) mt = xdmf.read_meshtags(mesh, "facet_tags") mesh.name = f"mesh_{celltype}_{theta:.2f}{type_ext:s}" # Create functionspaces el = basix.ufl.element( "Lagrange", mesh.topology.cell_name(), 1, shape=(mesh.geometry.dim,), dtype=default_real_type ) V = functionspace(mesh, el) # Define boundary conditions # Bottom boundary is fixed in all directions u_bc = Function(V) with u_bc.x.petsc_vec.localForm() as u_local: u_local.set(0.0) u_bc.x.petsc_vec.destroy() bottom_dofs = locate_dofs_topological(V, fdim, mt.find(5)) bc_bottom = dirichletbc(u_bc, bottom_dofs) g_vec = [0, 0, -4.25e-1] if not noslip: # Helper for orienting traction r_matrix = rotation_matrix([1 / np.sqrt(2), 1 / np.sqrt(2), 0], -theta) # Top boundary has a given deformation normal to the interface g_vec = np.dot(r_matrix, [0, 0, -4.25e-1]) def top_v(x): values = np.empty((3, x.shape[1])) values[0] = g_vec[0] values[1] = g_vec[1] values[2] = g_vec[2] return values u_top = Function(V) u_top.interpolate(top_v) u_top.x.scatter_forward() top_dofs = locate_dofs_topological(V, fdim, mt.find(3)) bc_top = dirichletbc(u_top, top_dofs) bcs = [bc_bottom, bc_top] # Elasticity parameters E = default_scalar_type(1.0e3) nu = 0 mu = Constant(mesh, E / (2.0 * (1.0 + nu))) lmbda = Constant(mesh, E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu))) # Stress computation def sigma(v): return 2.0 * mu * sym(grad(v)) + lmbda * tr(sym(grad(v))) * Identity(len(v)) # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(sigma(u), grad(v)) * dx rhs = inner(Constant(mesh, default_scalar_type((0, 0, 0))), v) * dx log_info("Create constraints") mpc = MultiPointConstraint(V) num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs if noslip: with Timer(f"{num_dofs}: Contact-constraint"): mpc.create_contact_inelastic_condition(mt, 4, 9) else: with Timer(f"{num_dofs}: FacetNormal"): nh = create_normal_approximation(V, mt, 4) with Timer(f"{num_dofs}: Contact-constraint"): mpc.create_contact_slip_condition(mt, 4, 9, nh) with Timer(f"{num_dofs}: MPC-init"): mpc.finalize() null_space = rigid_motions_nullspace(mpc.function_space) log_info(f"Num dofs: {num_dofs}") log_info("Assemble matrix") bilinear_form = form(a) linear_form = form(rhs) with Timer(f"{num_dofs}: Assemble-matrix (C++)"): A = assemble_matrix(bilinear_form, mpc, bcs=bcs) with Timer(f"{num_dofs}: Assemble-vector (C++)"): b = assemble_vector(linear_form, mpc) apply_lifting(b, [bilinear_form], [bcs], mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore set_bc(b, bcs) list_timings(MPI.COMM_WORLD, [TimingType.wall]) # Solve Linear problem opts = PETSc.Options() # type: ignore # opts["ksp_rtol"] = 1.0e-8 opts["pc_type"] = "gamg" # opts["pc_gamg_type"] = "agg" # opts["pc_gamg_coarse_eq_limit"] = 1000 # opts["pc_gamg_sym_graph"] = True # opts["mg_levels_ksp_type"] = "chebyshev" # opts["mg_levels_pc_type"] = "jacobi" # opts["mg_levels_esteig_ksp_type"] = "cg" # opts["matptap_via"] = "scalable" # opts["pc_gamg_square_graph"] = 2 # opts["pc_gamg_threshold"] = 1e-2 # opts["help"] = None # List all available options if timings: opts["ksp_view"] = None # List progress of solver # Create functionspace and build near nullspace A.setNearNullSpace(null_space) solver = PETSc.KSP().create(comm) # type: ignore solver.setOperators(A) solver.setFromOptions() uh = Function(mpc.function_space) uh.x.array[:] = 0 log_info("Solve") with Timer(f"{num_dofs}: Solve"): solver.solve(b, uh.x.petsc_vec) uh.x.scatter_forward() log_info("Backsub") with Timer(f"{num_dofs}: Backsubstitution"): mpc.backsubstitution(uh) it = solver.getIterationNumber() # Write solution to file results = Path("results").absolute() results.mkdir(exist_ok=True) with XDMFFile(comm, results / f"bench_contact_{num_dofs}.xdmf", "w") as outfile: outfile.write_mesh(mesh) outfile.write_function(uh, 0.0, f"Xdmf/Domain/Grid[@Name='{mesh.name}'][1]") # Write performance data to file if timings: log_info("Timings") num_slaves = MPI.COMM_WORLD.allreduce(mpc.num_local_slaves, op=MPI.SUM) results_file = None num_procs = comm.size if comm.rank == 0: results_file = open(results / f"results_bench_{num_dofs}.txt", "w") print(f"#Procs: {num_procs}", file=results_file) print(f"#Dofs: {num_dofs}", file=results_file) print(f"#Slaves: {num_slaves}", file=results_file) print(f"#Iterations: {it}", file=results_file) operations = [ "Solve", "Assemble-matrix (C++)", "MPC-init", "Contact-constraint", "FacetNormal", "Assemble-vector (C++)", "Backsubstitution", ] if comm.rank == 0: print("Operation #Calls Avg Min Max", file=results_file) for op in operations: op_timing = timing(f"{num_dofs}: {op}") num_calls = op_timing[0] wall_time = op_timing[1] avg_time = comm.allreduce(wall_time, op=MPI.SUM) / comm.size min_time = comm.allreduce(wall_time, op=MPI.MIN) max_time = comm.allreduce(wall_time, op=MPI.MAX) if comm.rank == 0: print(op, num_calls, avg_time, min_time, max_time, file=results_file) list_timings(MPI.COMM_WORLD, [TimingType.wall]) b.destroy() solver.destroy() if __name__ == "__main__": parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument( "--theta", default=np.pi / 3, type=np.float64, dest="theta", help="Rotation angle around axis [1, 1, 0]", ) parser.add_argument("--ref", default=0, type=np.int32, dest="ref", help="Numer of mesh refinements") parser.add_argument("--N0", default=3, type=np.int32, dest="N0", help="Initial mesh resolution") hex = parser.add_mutually_exclusive_group(required=False) hex.add_argument("--hex", dest="hex", action="store_true", help="Use hexahedron mesh", default=False) slip = parser.add_mutually_exclusive_group(required=False) slip.add_argument( "--no-slip", dest="noslip", action="store_true", help="Use no-slip constraint", default=False, ) args = parser.parse_args() ct = CellType.hexahedron if args.hex else CellType.tetrahedron # Create cache demo_stacked_cubes(theta=args.theta, ct=ct, num_refinements=0, N0=3, noslip=args.noslip, timings=False) # Run benchmark demo_stacked_cubes( theta=args.theta, ct=ct, num_refinements=args.ref, N0=args.N0, noslip=args.noslip, timings=True, ) dolfinx_mpc-0.9.1/python/benchmarks/bench_elasticity.py000066400000000000000000000200371476141270300233530ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations import resource from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from pathlib import Path from typing import Optional from mpi4py import MPI from petsc4py import PETSc import basix.ufl import h5py import numpy as np from dolfinx import default_real_type, default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.fem import ( Constant, Function, dirichletbc, form, functionspace, locate_dofs_topological, set_bc, ) from dolfinx.io import XDMFFile from dolfinx.mesh import create_unit_cube, locate_entities_boundary, meshtags, refine from ufl import Identity, TestFunction, TrialFunction, ds, dx, grad, inner, sym, tr from dolfinx_mpc import MultiPointConstraint, apply_lifting, assemble_matrix, assemble_vector from dolfinx_mpc.utils import log_info, rigid_motions_nullspace def bench_elasticity_one( r_lvl: int = 0, out_hdf5: Optional[h5py.File] = None, xdmf: bool = False, boomeramg: bool = False, kspview: bool = False, ): N = 3 mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N) for i in range(r_lvl): mesh.topology.create_entities(mesh.topology.dim - 2) mesh = refine(mesh, redistribute=True) fdim = mesh.topology.dim - 1 el = basix.ufl.element( "Lagrange", mesh.topology.cell_name(), 1, shape=(mesh.geometry.dim,), dtype=default_real_type ) V = functionspace(mesh, el) # Generate Dirichlet BC on lower boundary (Fixed) u_bc = Function(V) with u_bc.x.petsc_vec.localForm() as u_local: u_local.set(0.0) u_bc.x.petsc_vec.destroy() def boundaries(x): return np.isclose(x[0], np.finfo(float).eps) facets = locate_entities_boundary(mesh, fdim, boundaries) topological_dofs = locate_dofs_topological(V, fdim, facets) bc = dirichletbc(u_bc, topological_dofs) bcs = [bc] # Create traction meshtag def traction_boundary(x): return np.isclose(x[0], 1) t_facets = locate_entities_boundary(mesh, fdim, traction_boundary) facet_values = np.ones(len(t_facets), dtype=np.int32) arg_sort = np.argsort(t_facets) mt = meshtags(mesh, fdim, t_facets[arg_sort], facet_values[arg_sort]) # Define variational problem u = TrialFunction(V) v = TestFunction(V) # Elasticity parameters E = 1.0e4 nu = 0.1 mu = Constant(mesh, default_scalar_type(E / (2.0 * (1.0 + nu)))) lmbda = Constant(mesh, default_scalar_type(E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu)))) g = Constant(mesh, (0, 0, -1e2)) # Stress computation def sigma(v): return 2.0 * mu * sym(grad(v)) + lmbda * tr(sym(grad(v))) * Identity(len(v)) # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(sigma(u), grad(v)) * dx rhs = inner(g, v) * ds(domain=mesh, subdomain_data=mt, subdomain_id=1) # Create MPC with Timer("~Elasticity: Init constraint"): def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = {l2b([1, 0, 0]): {l2b([1, 0, 1]): 0.5}} mpc = MultiPointConstraint(V) mpc.create_general_constraint(s_m_c, 2, 2) mpc.finalize() # Setup MPC system bilinear_form = form(a) linear_form = form(rhs) with Timer("~Elasticity: Assemble LHS and RHS"): A = assemble_matrix(bilinear_form, mpc, bcs=bcs) b = assemble_vector(linear_form, mpc) # Apply boundary conditions apply_lifting(b, [bilinear_form], [bcs], mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore set_bc(b, bcs) # Create functionspace and function for mpc vector # Solve Linear problem solver = PETSc.KSP().create(mesh.comm) # type: ignore opts = PETSc.Options() # type: ignore if boomeramg: opts["ksp_type"] = "cg" opts["ksp_rtol"] = 1.0e-5 opts["pc_type"] = "hypre" opts["pc_hypre_type"] = "boomeramg" opts["pc_hypre_boomeramg_max_iter"] = 1 opts["pc_hypre_boomeramg_cycle_type"] = "v" # opts["pc_hypre_boomeramg_print_statistics"] = 1 else: opts["ksp_rtol"] = 1.0e-10 opts["pc_type"] = "gamg" opts["pc_gamg_type"] = "agg" opts["pc_gamg_coarse_eq_limit"] = 1000 opts["pc_gamg_sym_graph"] = True opts["mg_levels_ksp_type"] = "chebyshev" opts["mg_levels_pc_type"] = "jacobi" opts["mg_levels_esteig_ksp_type"] = "cg" opts["matptap_via"] = "scalable" # opts["help"] = None # List all available options # opts["ksp_view"] = None # List progress of solver with Timer("~Elasticity: Solve problem") as timer: null_space = rigid_motions_nullspace(mpc.function_space) A.setNearNullSpace(null_space) solver.setFromOptions() solver.setOperators(A) # Solve linear problem uh = b.copy() uh.set(0) solver.solve(b, uh) uh.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # type: ignore mpc.backsubstitution(uh) solver_time = timer.elapsed() it = solver.getIterationNumber() if kspview: solver.view() # Print max usage of summary mem = sum(MPI.COMM_WORLD.allgather(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs if MPI.COMM_WORLD.rank == 0: print(f"Rlvl {r_lvl}, Iterations {it}") print(f"Rlvl {r_lvl}, Max usage {mem} (kb), #dofs {num_dofs}") if out_hdf5 is not None: d_set = out_hdf5.get("its") d_set[r_lvl] = it d_set = out_hdf5.get("num_dofs") d_set[r_lvl] = num_dofs d_set = out_hdf5.get("solve_time") d_set[r_lvl, MPI.COMM_WORLD.rank] = solver_time[0] if xdmf: # Write solution to file u_h = Function(mpc.function_space) u_h.x.petsc_vec.setArray(uh.array) u_h.name = "u_mpc" outdir = Path("results") outdir.mkdir(exist_ok=True, parents=True) fname = outdir / f"bench_elasticity_{r_lvl}.xdmf" with XDMFFile(mesh.comm, fname, "w") as outfile: outfile.write_mesh(mesh) outfile.write_function(u_h) if __name__ == "__main__": parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--nref", default=1, type=int, dest="n_ref", help="Number of spatial refinements") parser.add_argument("--xdmf", action="store_true", dest="xdmf", help="XDMF-output of function (Default false)") parser.add_argument("--timings", action="store_true", dest="timings", help="List timings (Default false)") parser.add_argument("--kspview", action="store_true", dest="kspview", help="View PETSc progress") parser.add_argument("-o", default="elasticity_one.hdf5", dest="hdf5", help="Name of HDF5 output file") solver_parser = parser.add_mutually_exclusive_group(required=False) solver_parser.add_argument( "--boomeramg", dest="boomeramg", default=True, action="store_true", help="Use BoomerAMG preconditioner (Default)", ) solver_parser.add_argument("--gamg", dest="boomeramg", action="store_false", help="Use PETSc GAMG preconditioner") args = parser.parse_args() N = args.n_ref + 1 # Setup hd5f output file h5f = h5py.File(args.hdf5, "w", driver="mpio", comm=MPI.COMM_WORLD) h5f.create_dataset("its", (N,), dtype=np.int32) h5f.create_dataset("num_dofs", (N,), dtype=np.int32) sd = h5f.create_dataset("solve_time", (N, MPI.COMM_WORLD.size), dtype=np.float64) solver = "BoomerAMG" if args.boomeramg else "GAMG" sd.attrs["solver"] = np.bytes_(solver) # Loop over refinement levels for i in range(N): log_info(f"Run {i} in progress") bench_elasticity_one(r_lvl=i, out_hdf5=h5f, xdmf=args.xdmf, boomeramg=args.boomeramg, kspview=args.kspview) if args.timings and i == N - 1: list_timings(MPI.COMM_WORLD, [TimingType.wall]) h5f.close() dolfinx_mpc-0.9.1/python/benchmarks/bench_elasticity_edge.py000066400000000000000000000232221476141270300243360ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations import resource from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from pathlib import Path from mpi4py import MPI from petsc4py import PETSc import basix.ufl import h5py import numpy as np from dolfinx import default_real_type, default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.fem import ( Constant, Function, dirichletbc, form, functionspace, locate_dofs_topological, ) from dolfinx.fem.petsc import set_bc from dolfinx.io import XDMFFile from dolfinx.mesh import CellType, create_unit_cube, locate_entities_boundary, meshtags from ufl import ( Identity, SpatialCoordinate, TestFunction, TrialFunction, as_vector, ds, dx, grad, inner, sym, tr, ) from dolfinx_mpc import MultiPointConstraint, apply_lifting, assemble_matrix, assemble_vector from dolfinx_mpc.utils import log_info, rigid_motions_nullspace def bench_elasticity_edge( tetra: bool = True, r_lvl: int = 0, out_hdf5=None, xdmf: bool = False, boomeramg: bool = False, kspview: bool = False, degree: int = 1, info: bool = False, ): N = 3 for i in range(r_lvl): N *= 2 ct = CellType.tetrahedron if tetra else CellType.hexahedron mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N, ct) el = basix.ufl.element( "Lagrange", mesh.topology.cell_name(), int(degree), shape=(mesh.geometry.dim,), dtype=default_real_type ) V = functionspace(mesh, el) # Generate Dirichlet BC (Fixed) u_bc = Function(V) u_bc.x.array[:] = 0 def boundaries(x): return np.isclose(x[0], 0, 500 * np.finfo(x.dtype).resolution) fdim = mesh.topology.dim - 1 facets = locate_entities_boundary(mesh, fdim, boundaries) topological_dofs = locate_dofs_topological(V, fdim, facets) bc = dirichletbc(u_bc, topological_dofs) bcs = [bc] def PeriodicBoundary(x): return np.logical_and(np.isclose(x[0], 1), np.isclose(x[2], 0)) def periodic_relation(x): out_x = np.zeros(x.shape) out_x[0] = x[0] out_x[1] = x[1] out_x[2] = x[2] + 1 return out_x with Timer("~Elasticity: Initialize MPC"): edim = mesh.topology.dim - 2 edges = locate_entities_boundary(mesh, edim, PeriodicBoundary) arg_sort = np.argsort(edges) periodic_mt = meshtags(mesh, edim, edges[arg_sort], np.full(len(edges), 2, dtype=np.int32)) mpc = MultiPointConstraint(V) mpc.create_periodic_constraint_topological( V, periodic_mt, 2, periodic_relation, bcs, scale=default_scalar_type(0.5) ) mpc.finalize() # Create traction meshtag def traction_boundary(x): return np.isclose(x[0], 1) t_facets = locate_entities_boundary(mesh, fdim, traction_boundary) facet_values = np.ones(len(t_facets), dtype=np.int32) arg_sort = np.argsort(t_facets) mt = meshtags(mesh, fdim, t_facets[arg_sort], facet_values) # Elasticity parameters E = 1.0e4 nu = 0.1 mu = Constant(mesh, default_scalar_type(E / (2.0 * (1.0 + nu)))) lmbda = Constant(mesh, default_scalar_type(E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu)))) g = Constant(mesh, default_scalar_type((0, 0, -1e2))) x = SpatialCoordinate(mesh) f = Constant(mesh, default_scalar_type(1e3)) * as_vector((0, -((x[2] - 0.5) ** 2), (x[1] - 0.5) ** 2)) # Stress computation def epsilon(v): return sym(grad(v)) def sigma(v): return 2.0 * mu * epsilon(v) + lmbda * tr(epsilon(v)) * Identity(len(v)) # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(sigma(u), grad(v)) * dx rhs = inner(g, v) * ds(domain=mesh, subdomain_data=mt, subdomain_id=1) + inner(f, v) * dx # Setup MPC system if info: log_info(f"Run {r_lvl}: Assembling matrix and vector") bilinear_form = form(a) linear_form = form(rhs) with Timer("~Elasticity: Assemble LHS and RHS"): A = assemble_matrix(bilinear_form, mpc, bcs=bcs) b = assemble_vector(linear_form, mpc) # Create nullspace for elasticity problem and assign to matrix null_space = rigid_motions_nullspace(mpc.function_space) A.setNearNullSpace(null_space) # Apply boundary conditions apply_lifting(b, [bilinear_form], [bcs], mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore set_bc(b, bcs) opts = PETSc.Options() # type: ignore if boomeramg: opts["ksp_type"] = "cg" opts["ksp_rtol"] = 1.0e-5 opts["pc_type"] = "hypre" opts["pc_hypre_type"] = "boomeramg" opts["pc_hypre_boomeramg_max_iter"] = 1 opts["pc_hypre_boomeramg_cycle_type"] = "v" # opts["pc_hypre_boomeramg_print_statistics"] = 1 else: opts["ksp_rtol"] = 1.0e-8 opts["pc_type"] = "gamg" opts["pc_gamg_type"] = "agg" opts["pc_gamg_coarse_eq_limit"] = 1000 opts["pc_gamg_sym_graph"] = True opts["mg_levels_ksp_type"] = "chebyshev" opts["mg_levels_pc_type"] = "jacobi" opts["mg_levels_esteig_ksp_type"] = "cg" opts["matptap_via"] = "scalable" opts["pc_gamg_square_graph"] = 2 opts["pc_gamg_threshold"] = 0.02 # opts["help"] = None # List all available options # opts["ksp_view"] = None # List progress of solver # Setup PETSc solver solver = PETSc.KSP().create(mesh.comm) # type: ignore solver.setFromOptions() # type: ignore if info: log_info(f"Run {r_lvl}: Solving") with Timer("~Elasticity: Solve problem") as timer: solver.setOperators(A) uh = Function(mpc.function_space) uh.x.array[:] = 0 solver.solve(b, uh.x.petsc_vec) uh.x.scatter_forward() mpc.backsubstitution(uh) solver_time = timer.elapsed() if kspview: solver.view() mem = sum(MPI.COMM_WORLD.allgather(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) it = solver.getIterationNumber() num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs if out_hdf5 is not None: d_set = out_hdf5.get("its") d_set[r_lvl] = it d_set = out_hdf5.get("num_dofs") d_set[r_lvl] = num_dofs d_set = out_hdf5.get("num_slaves") d_set[r_lvl, MPI.COMM_WORLD.rank] = mpc.num_local_slaves d_set = out_hdf5.get("solve_time") d_set[r_lvl, MPI.COMM_WORLD.rank] = solver_time[0] if info: log_info(f"Lvl: {r_lvl}, Its: {it}, max Mem: {mem}, dim(V): {num_dofs}") if xdmf: # Write solution to file u_h = Function(mpc.function_space) u_h.x.petsc_vec.setArray(uh.array) u_h.name = "u_mpc" results = Path("results").absolute() results.mkdir(exist_ok=True) fname = results / f"bench_elasticity_edge_{r_lvl}.xdmf" with XDMFFile(mesh.comm, fname, "w") as outfile: outfile.write_mesh(mesh) outfile.write_function(u_h) if __name__ == "__main__": parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--nref", default=1, type=np.int8, dest="n_ref", help="Number of spatial refinements") parser.add_argument("--degree", default=1, type=np.int8, dest="degree", help="CG Function space degree") parser.add_argument("--xdmf", action="store_true", dest="xdmf", help="XDMF-output of function (Default false)") parser.add_argument("--timings", action="store_true", dest="timings", help="List timings (Default false)") parser.add_argument( "--info", action="store_true", dest="info", help="Set loglevel to info (Default false)", default=False, ) parser.add_argument("--kspview", action="store_true", dest="kspview", help="View PETSc progress") ct_parser = parser.add_mutually_exclusive_group(required=False) ct_parser.add_argument("--tet", dest="tetra", action="store_true", help="Tetrahedron elements") ct_parser.add_argument("--hex", dest="tetra", action="store_false", help="Hexahedron elements") solver_parser = parser.add_mutually_exclusive_group(required=False) solver_parser.add_argument( "--boomeramg", dest="boomeramg", default=True, action="store_true", help="Use BoomerAMG preconditioner (Default)", ) solver_parser.add_argument("--gamg", dest="boomeramg", action="store_false", help="Use PETSc GAMG preconditioner") args = parser.parse_args() N = args.n_ref + 1 out_file = Path("output/ench_edge_output.hdf5").absolute() out_file.parent.mkdir(exist_ok=True) h5f = h5py.File(out_file, "w", driver="mpio", comm=MPI.COMM_WORLD) h5f.create_dataset("its", (N,), dtype=np.int32) h5f.create_dataset("num_dofs", (N,), dtype=np.int32) h5f.create_dataset("num_slaves", (N, MPI.COMM_WORLD.size), dtype=np.int32) sd = h5f.create_dataset("solve_time", (N, MPI.COMM_WORLD.size), dtype=np.float64) solver = "BoomerAMG" if args.boomeramg else "GAMG" ct = "Tet" if args.tetra else "Hex" sd.attrs["solver"] = np.bytes_(solver) sd.attrs["degree"] = np.bytes_(str(int(args.degree))) sd.attrs["ct"] = np.bytes_(ct) for i in range(N): log_info(f"Run {i} in progress") bench_elasticity_edge( tetra=args.tetra, r_lvl=i, out_hdf5=h5f, xdmf=args.xdmf, boomeramg=args.boomeramg, kspview=args.kspview, degree=args.degree, info=args.info, ) if args.timings and i == N - 1: list_timings(MPI.COMM_WORLD, [TimingType.wall]) h5f.close() dolfinx_mpc-0.9.1/python/benchmarks/bench_periodic.py000066400000000000000000000217631476141270300230060ustar00rootroot00000000000000# This demo program solves Poisson's equation # # - div grad u(x, y) = f(x, y) # # on the unit square with homogeneous Dirichlet boundary conditions # at y = 0, 1 and periodic boundary conditions at x = 0, 1. # # Copyright (C) Jørgen S. Dokken 2020. # # This file is part of DOLFINX_MPC. # # SPDX-License-Identifier: MIT from __future__ import annotations from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from pathlib import Path from mpi4py import MPI from petsc4py import PETSc import h5py import numpy as np from dolfinx import default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.fem import Function, dirichletbc, form, functionspace, locate_dofs_geometrical from dolfinx.fem.petsc import set_bc from dolfinx.io import XDMFFile from dolfinx.mesh import CellType, create_unit_cube, locate_entities_boundary, meshtags from ufl import SpatialCoordinate, TestFunction, TrialFunction, dx, exp, grad, inner, pi, sin from dolfinx_mpc import MultiPointConstraint, apply_lifting, assemble_matrix, assemble_vector from dolfinx_mpc.utils import log_info def demo_periodic3D(tetra, r_lvl=0, out_hdf5=None, xdmf=False, boomeramg=False, kspview=False, degree=1): # Create mesh and function space log_info(f"Run {r_lvl}: Create mesh") ct = CellType.tetrahedron if tetra else CellType.hexahedron # Tet setup N = 3 for i in range(r_lvl): N *= 2 mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N, ct) V = functionspace(mesh, ("CG", degree)) # Create Dirichlet boundary condition def dirichletboundary(x): return np.logical_or( np.logical_or(np.isclose(x[1], 0), np.isclose(x[1], 1)), np.logical_or(np.isclose(x[2], 0), np.isclose(x[2], 1)), ) mesh.topology.create_connectivity(2, 1) geometrical_dofs = locate_dofs_geometrical(V, dirichletboundary) bc = dirichletbc(default_scalar_type(0), geometrical_dofs, V) bcs = [bc] def PeriodicBoundary(x): return np.isclose(x[0], 1) def periodic_relation(x): out_x = np.zeros(x.shape) out_x[0] = 1 - x[0] out_x[1] = x[1] out_x[2] = x[2] return out_x num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs log_info(f"Run {r_lvl}: Create MultiPoint Constraint {num_dofs}") with Timer("~Periodic: Initialize periodic constraint"): facets = locate_entities_boundary(mesh, mesh.topology.dim - 1, PeriodicBoundary) arg_sort = np.argsort(facets) mt = meshtags(mesh, mesh.topology.dim - 1, facets[arg_sort], np.full(len(facets), 2, dtype=np.int32)) mpc = MultiPointConstraint(V) mpc.create_periodic_constraint_topological(V, mt, 2, periodic_relation, bcs) mpc.finalize() # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(grad(u), grad(v)) * dx x = SpatialCoordinate(mesh) dx_ = x[0] - 0.9 dy_ = x[1] - 0.5 dz_ = x[2] - 0.1 f = x[0] * sin(5.0 * pi * x[1]) + 1.0 * exp(-(dx_**2 + dy_**2 + dz_**2) / 0.02) rhs = inner(f, v) * dx # Assemble LHS and RHS with multi-point constraint log_info(f"Run {r_lvl}: Assemble matrix") bilinear_form = form(a) with Timer(f"~Periodic {r_lvl}: Assemble matrix (cached)"): A = assemble_matrix(bilinear_form, mpc, bcs=bcs) log_info(f"Run {r_lvl}: Assembling vector") linear_form = form(rhs) with Timer(f"~Periodic: {r_lvl} Assemble vector (Total time)"): b = assemble_vector(linear_form, mpc) # Apply boundary conditions log_info(f"Run {r_lvl}: Apply lifting") apply_lifting(b, [bilinear_form], [bcs], mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) set_bc(b, bcs) # Create nullspace nullspace = PETSc.NullSpace().create(constant=True) # Set PETSc solver options opts = PETSc.Options() if boomeramg: opts["ksp_type"] = "cg" opts["ksp_rtol"] = 1.0e-5 opts["pc_type"] = "hypre" opts["pc_hypre_type"] = "boomeramg" opts["pc_hypre_boomeramg_max_iter"] = 1 opts["pc_hypre_boomeramg_cycle_type"] = "v" # opts["pc_hypre_boomeramg_print_statistics"] = 1 else: opts["ksp_type"] = "cg" opts["ksp_rtol"] = 1.0e-12 opts["pc_type"] = "gamg" opts["pc_gamg_type"] = "agg" opts["pc_gamg_sym_graph"] = True # Use Chebyshev smoothing for multigrid opts["mg_levels_ksp_type"] = "richardson" opts["mg_levels_pc_type"] = "sor" # opts["help"] = None # List all available options # opts["ksp_view"] = None # List progress of solver # Solve linear problem log_info(f"Run {r_lvl}: Solving") solver = PETSc.KSP().create(mesh.comm) with Timer("~Periodic: Solve") as timer: # Create solver, set operator and options PETSc.Mat.setNearNullSpace(A, nullspace) uh = Function(mpc.function_space) uh.x.array[:] = 0 solver.setFromOptions() solver.setOperators(A) solver.solve(b, uh.x.petsc_vec) uh.x.scatter_forward() mpc.backsubstitution(uh) solver_time = timer.elapsed() if kspview: solver.view() # Output information to HDF5 it = solver.getIterationNumber() num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs if out_hdf5 is not None: d_set = out_hdf5.get("its") d_set[r_lvl] = it d_set = out_hdf5.get("num_dofs") d_set[r_lvl] = num_dofs d_set = out_hdf5.get("num_slaves") d_set[r_lvl, MPI.COMM_WORLD.rank] = mpc.num_local_slaves d_set = out_hdf5.get("solve_time") d_set[r_lvl, MPI.COMM_WORLD.rank] = solver_time[0] if MPI.COMM_WORLD.rank == 0: print(f"Rlvl {r_lvl}, Iterations {it}") # Output solution to XDMF if xdmf: # Create function space with correct index map for MPC u_h = Function(mpc.function_space) u_h.x.petsc_vec.setArray(uh.array) # Name formatting of functions ext = "tet" if tetra else "hex" mesh.name = f"mesh_{ext}" u_h.name = f"u_{ext}" results = Path("results").absolute() results.mkdir(exist_ok=True) fname = results / f"bench_periodic3d_{r_lvl}_{ext}.xdmf" with XDMFFile(mesh.comm, fname, "w") as out_xdmf: out_xdmf.write_mesh(mesh) out_xdmf.write_function(u_h, 0.0, f"Xdmf/Domain/Grid[@Name='{mesh.name}'][1]") if __name__ == "__main__": # Set Argparser defaults parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--nref", default=1, type=np.int8, dest="n_ref", help="Number of spatial refinements") parser.add_argument("--degree", default=1, type=np.int8, dest="degree", help="CG Function space degree") parser.add_argument("--xdmf", action="store_true", dest="xdmf", help="XDMF-output of function (Default false)") parser.add_argument("--timings", action="store_true", dest="timings", help="List timings (Default false)") parser.add_argument("--kspview", action="store_true", dest="kspview", help="View PETSc progress") parser.add_argument("-o", default="periodic_output.hdf5", dest="hdf5", help="Name of HDF5 output file") ct_parser = parser.add_mutually_exclusive_group(required=False) ct_parser.add_argument("--tet", dest="tetra", action="store_true", help="Tetrahedron elements") ct_parser.add_argument("--hex", dest="tetra", action="store_false", help="Hexahedron elements") solver_parser = parser.add_mutually_exclusive_group(required=False) solver_parser.add_argument( "--boomeramg", dest="boomeramg", default=True, action="store_true", help="Use BoomerAMG preconditioner (Default)", ) solver_parser.add_argument("--gamg", dest="boomeramg", action="store_false", help="Use PETSc GAMG preconditioner") args = parser.parse_args() N = args.n_ref + 1 out_file = Path(args.hdf5).absolute() out_file.parent.mkdir(exist_ok=True) # Prepare output HDF5 file h5f = h5py.File(args.hdf5, "w", driver="mpio", comm=MPI.COMM_WORLD) h5f.create_dataset("its", (N,), dtype=np.int32) h5f.create_dataset("num_dofs", (N,), dtype=np.int32) h5f.create_dataset("num_slaves", (N, MPI.COMM_WORLD.size), dtype=np.int32) sd = h5f.create_dataset("solve_time", (N, MPI.COMM_WORLD.size), dtype=np.float64) solver = "BoomerAMG" if args.boomeramg else "GAMG" ct = "Tet" if args.tetra else "Hex" sd.attrs["solver"] = np.bytes_(solver) sd.attrs["degree"] = np.bytes_(str(int(args.degree))) sd.attrs["ct"] = np.bytes_(ct) # Loop over refinements for i in range(N): log_info(f"Run {i} in progress") demo_periodic3D( args.tetra, r_lvl=i, out_hdf5=h5f, xdmf=args.xdmf, boomeramg=args.boomeramg, kspview=args.kspview, degree=int(args.degree), ) # List_timings if args.timings and i == N - 1: list_timings(MPI.COMM_WORLD, [TimingType.wall]) h5f.close() dolfinx_mpc-0.9.1/python/benchmarks/post_proc.py000066400000000000000000000116331476141270300220540ustar00rootroot00000000000000from __future__ import annotations import matplotlib.pyplot as plt import numpy as np # Res 0.1 31776, 0.05 234546, 0.025 1801086, 0.02 3488856, 0.0175 5147961,0.015 7960200 dofs = [31776, 234546, 1801086, 3488856, 5147961, 7960200] def visualize_side_by_side(dofs): fig, ax = plt.subplots() plt.grid("on", zorder=1) procs = [] first = True slaves = [] totals = np.zeros(len(dofs)) for i, dof in enumerate(dofs): infile = open("results_bench_{0:d}.txt".format(dof), "r") # Read problem info procs.append(int(infile.readline().split(": ")[-1].strip("\n"))) # Skip num dofs infile.readline() slaves.append(int(infile.readline().split(": ")[-1].strip("\n"))) solve_iterations = int(infile.readline().split(": ")[-1].strip("\n")) # Skip info line infile.readline() # Read timings operations = infile.readlines() colors = [ "tab:blue", "tab:brown", "tab:orange", "tab:green", "tab:red", "tab:purple", "tab:cyan", "tab:olive", ] total_time = 0 for j, line in enumerate(operations): data = line.strip("\n").split(" ") if data[0] == "Backsubstitution": continue if first: plt.bar( i, float(data[2]), 0.5, bottom=total_time, label=data[0], color=colors[j], zorder=2, ) else: plt.bar(i, float(data[2]), 0.5, bottom=total_time, color=colors[j], zorder=2) if data[0] == "Solve": ax.annotate( "{0:d}".format(solve_iterations), xy=(i, total_time + float(data[2]) / 2), xytext=(0, 0), textcoords="offset points", ha="center", va="bottom", ) total_time += float(data[2]) first = False totals[i] = total_time ax.set_xticks(range(len(dofs))) labels = dofs ax.set_xticklabels(labels) assert np.allclose(procs, procs[0]) # Shrink current axis by 20% box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.legend(bbox_to_anchor=(0.95, 0.5)) plt.xlabel("Degrees of freedom") plt.title(f"Average runtime of operations with {procs[0]} MPI ranks") plt.ylabel("Runtime (s)") plt.savefig("comparison_bars.png") # Second figure plt.figure() plt.ylabel("Runtime (s)") plt.xlabel("Degrees of freedom") power_min = int(np.log10(min(dofs))) power_max = int(np.log10(max(dofs)) + 1) power_tmin = int(np.log10(min(totals)) - 1) power_tmax = int(np.log10(max(totals)) + 1) plt.axis((10**power_min, 10**power_max, 10**power_tmin, 10**power_tmax)) plt.plot(dofs, totals, "-ro", label="Simulations") # 7 plt.plot([3.1 * 10**4, 3.1 * 10**7], [4.3 * 10**-1, 4.3 * 10**2], "--g", label="Order 1") # 6 procs # plt.plot([3.1 * 10**4, 3.1 * 10**7], [4.8 * 10**-1, 4.8 * 10**2], "--g", label="Order 1") # 4 procs # plt.plot([3.1 * 10**4, 3.1 * 10**7], [5.7 * 10**-1, 5.7 * 10**2], "--g", label="Order 1") plt.title(f"Total runtime of core operations with {procs[0]} MPI ranks") plt.legend() plt.grid() plt.xscale("log") plt.yscale("log") plt.savefig("comparison.png") def visualize_single(dof): infile = open("results_bench_{0:d}.txt".format(dof), "r") # Read problem info procs = infile.readline().split(": ")[-1].strip("\n") # Skip num dofs infile.readline() slaves = int(infile.readline().split(": ")[-1].strip("\n")) infile.readline() # solve_iterations = int(infile.readline().split(": ")[-1].strip("\n")) # Skip info line infile.readline() # Read timings operations = infile.readlines() colors = [ "tab:blue", "tab:brown", "tab:orange", "tab:green", "tab:red", "tab:purple", "tab:cyan", "tab:olive", ] fig, ax = plt.subplots() plt.grid("on", zorder=1) for j, line in enumerate(operations): data = line.strip("\n").split(" ") if data[0] == "Backsubstitution": continue plt.bar(j, float(data[2]), 0.5, label=data[0], color=colors[j], zorder=2) ax.set_xticks([]) # ax.set_yscale("log") plt.legend() plt.ylabel("Runtime (s)") # Shrink current axis by 20% # box = ax.get_position() # ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # ax.legend(bbox_to_anchor=(0.95, 0.5)) plt.title(f"Average runtime of operations with {dof} ({slaves} slaves) on {procs} MPI ranks") plt.savefig(f"comparison_{slaves}.png") visualize_single(max(dofs)) visualize_side_by_side(dofs) dolfinx_mpc-0.9.1/python/benchmarks/ref_elasticity.py000066400000000000000000000220731476141270300230520ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations import resource from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from pathlib import Path from time import perf_counter from typing import Optional from mpi4py import MPI from petsc4py import PETSc import basix.ufl import h5py import numpy as np from dolfinx import default_real_type, default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.fem import ( Constant, Function, dirichletbc, form, functionspace, locate_dofs_topological, ) from dolfinx.fem.petsc import apply_lifting, assemble_matrix, assemble_vector, set_bc from dolfinx.io import XDMFFile from dolfinx.log import LogLevel, log, set_log_level from dolfinx.mesh import CellType, create_unit_cube, locate_entities_boundary, meshtags, refine from ufl import ( Identity, SpatialCoordinate, TestFunction, TrialFunction, as_vector, ds, dx, grad, inner, sym, tr, ) from dolfinx_mpc.utils import rigid_motions_nullspace def ref_elasticity( tetra: bool = True, r_lvl: int = 0, out_hdf5: Optional[h5py.File] = None, xdmf: bool = False, boomeramg: bool = False, kspview: bool = False, degree: int = 1, ): if tetra: N = 3 if degree == 1 else 2 mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N) else: N = 3 mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N, CellType.hexahedron) for i in range(r_lvl): # set_log_level(LogLevel.INFO) N *= 2 if tetra: mesh = refine(mesh, redistribute=True) else: mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N, CellType.hexahedron) # set_log_level(LogLevel.ERROR) N = degree * N fdim = mesh.topology.dim - 1 el = basix.ufl.element( "Lagrange", mesh.topology.cell_name(), 1, shape=(mesh.geometry.dim,), dtype=default_real_type ) V = functionspace(mesh, el) # Generate Dirichlet BC on lower boundary (Fixed) u_bc = Function(V) with u_bc.x.petsc_vec.localForm() as u_local: u_local.set(0.0) u_bc.x.petsc_vec.destroy() def boundaries(x): return np.isclose(x[0], np.finfo(float).eps) facets = locate_entities_boundary(mesh, fdim, boundaries) topological_dofs = locate_dofs_topological(V, fdim, facets) bc = dirichletbc(u_bc, topological_dofs) bcs = [bc] # Create traction meshtag def traction_boundary(x): return np.isclose(x[0], 1) t_facets = locate_entities_boundary(mesh, fdim, traction_boundary) facet_values = np.ones(len(t_facets), dtype=np.int32) arg_sort = np.argsort(t_facets) mt = meshtags(mesh, fdim, t_facets[arg_sort], facet_values[arg_sort]) # Elasticity parameters E = default_scalar_type(1.0e4) nu = 0.1 mu = Constant(mesh, E / (2.0 * (1.0 + nu))) lmbda = Constant(mesh, E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu))) g = Constant(mesh, default_scalar_type((0, 0, -1e2))) x = SpatialCoordinate(mesh) f = Constant(mesh, default_scalar_type(1e4)) * as_vector((0, -((x[2] - 0.5) ** 2), (x[1] - 0.5) ** 2)) # Stress computation def sigma(v): return 2.0 * mu * sym(grad(v)) + lmbda * tr(sym(grad(v))) * Identity(len(v)) # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(sigma(u), grad(v)) * dx rhs = inner(g, v) * ds(domain=mesh, subdomain_data=mt, subdomain_id=1) + inner(f, v) * dx num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs if MPI.COMM_WORLD.rank == 0: print("Problem size {0:d} ".format(num_dofs)) # Generate reference matrices and unconstrained solution bilinear_form = form(a) A_org = assemble_matrix(bilinear_form, bcs) A_org.assemble() null_space_org = rigid_motions_nullspace(V) A_org.setNearNullSpace(null_space_org) linear_form = form(rhs) L_org = assemble_vector(linear_form) apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore set_bc(L_org, bcs) opts = PETSc.Options() # type: ignore if boomeramg: opts["ksp_type"] = "cg" opts["ksp_rtol"] = 1.0e-5 opts["pc_type"] = "hypre" opts["pc_hypre_type"] = "boomeramg" opts["pc_hypre_boomeramg_max_iter"] = 1 opts["pc_hypre_boomeramg_cycle_type"] = "v" # opts["pc_hypre_boomeramg_print_statistics"] = 1 else: opts["ksp_rtol"] = 1.0e-8 opts["pc_type"] = "gamg" opts["pc_gamg_type"] = "agg" opts["pc_gamg_coarse_eq_limit"] = 1000 opts["pc_gamg_sym_graph"] = True opts["mg_levels_ksp_type"] = "chebyshev" opts["mg_levels_pc_type"] = "jacobi" opts["mg_levels_esteig_ksp_type"] = "cg" opts["matptap_via"] = "scalable" opts["pc_gamg_square_graph"] = 2 opts["pc_gamg_threshold"] = 0.02 # opts["help"] = None # List all available options # opts["ksp_view"] = None # List progress of solver # Create solver, set operator and options solver = PETSc.KSP().create(mesh.comm) # type: ignore solver.setFromOptions() solver.setOperators(A_org) # Solve linear problem u_ = Function(V) start = perf_counter() with Timer("Ref solve"): solver.solve(L_org, u_.x.petsc_vec) end = perf_counter() u_.x.scatter_forward() if kspview: solver.view() it = solver.getIterationNumber() if out_hdf5 is not None: d_set = out_hdf5.get("its") d_set[r_lvl] = it d_set = out_hdf5.get("num_dofs") d_set[r_lvl] = num_dofs d_set = out_hdf5.get("solve_time") d_set[r_lvl, MPI.COMM_WORLD.rank] = end - start if MPI.COMM_WORLD.rank == 0: print("Refinement level {0:d}, Iterations {1:d}".format(r_lvl, it)) # List memory usage mem = sum(MPI.COMM_WORLD.allgather(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) if MPI.COMM_WORLD.rank == 0: print("{1:d}: Max usage after trad. solve {0:d} (kb)".format(mem, r_lvl)) if xdmf: # Name formatting of functions u_.name = "u_unconstrained" outdir = Path("results") outdir.mkdir(exist_ok=True, parents=True) fname = outdir / "ref_elasticity_{0:d}.xdmf".format(r_lvl) with XDMFFile(mesh.comm, fname, "w") as out_xdmf: out_xdmf.write_mesh(mesh) out_xdmf.write_function(u_, 0.0, "Xdmf/Domain/Grid[@Name='{0:s}'][1]".format(mesh.name)) if __name__ == "__main__": parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--nref", default=1, type=np.int8, dest="n_ref", help="Number of spatial refinements") parser.add_argument("--degree", default=1, type=np.int8, dest="degree", help="CG Function space degree") parser.add_argument("--xdmf", action="store_true", dest="xdmf", help="XDMF-output of function (Default false)") parser.add_argument("--timings", action="store_true", dest="timings", help="List timings (Default false)") parser.add_argument("--kspview", action="store_true", dest="kspview", help="View PETSc progress") parser.add_argument("-o", default="elasticity_ref.hdf5", dest="hdf5", help="Name of HDF5 output file") ct_parser = parser.add_mutually_exclusive_group(required=False) ct_parser.add_argument("--tet", dest="tetra", action="store_true", help="Tetrahedron elements") ct_parser.add_argument("--hex", dest="tetra", action="store_false", help="Hexahedron elements") solver_parser = parser.add_mutually_exclusive_group(required=False) solver_parser.add_argument( "--boomeramg", dest="boomeramg", default=True, action="store_true", help="Use BoomerAMG preconditioner (Default)", ) solver_parser.add_argument("--gamg", dest="boomeramg", action="store_false", help="Use PETSc GAMG preconditioner") args = parser.parse_args() N = args.n_ref + 1 # Setup hd5f output file h5f = h5py.File(args.hdf5, "w", driver="mpio", comm=MPI.COMM_WORLD) h5f.create_dataset("its", (N,), dtype=np.int32) h5f.create_dataset("num_dofs", (N,), dtype=np.int32) sd = h5f.create_dataset("solve_time", (N, MPI.COMM_WORLD.size), dtype=np.float64) solver = "BoomerAMG" if args.boomeramg else "GAMG" ct = "Tet" if args.tetra else "Hex" sd.attrs["solver"] = np.bytes_(solver) sd.attrs["degree"] = np.bytes_(str(int(args.degree))) sd.attrs["ct"] = np.bytes_(ct) # Loop over refinement levels for i in range(N): if MPI.COMM_WORLD.rank == 0: set_log_level(LogLevel.INFO) log(LogLevel.INFO, "Run {0:1d} in progress".format(i)) set_log_level(LogLevel.ERROR) ref_elasticity( tetra=args.tetra, r_lvl=i, out_hdf5=h5f, xdmf=args.xdmf, boomeramg=args.boomeramg, kspview=args.kspview, degree=args.degree, ) if args.timings and i == N - 1: list_timings(MPI.COMM_WORLD, [TimingType.wall]) h5f.close() dolfinx_mpc-0.9.1/python/benchmarks/ref_periodic.py000066400000000000000000000201741476141270300224760ustar00rootroot00000000000000# This demo program solves Poisson's equation # # - div grad u(x, y) = f(x, y) # # on the unit square with homogeneous Dirichlet boundary conditions # at y = 0, 1. # # Original implementation in DOLFIN by Kristian B. Oelgaard and Anders Logg # This implementation can be found at: # https://bitbucket.org/fenics-project/dolfin/src/master/python/demo/documented/periodic/demo_periodic.py # # Copyright (C) Jørgen S. Dokken 2020. # # This file is part of DOLFINX_MPC. # # SPDX-License-Identifier: MIT from __future__ import annotations from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from pathlib import Path from time import perf_counter from typing import Optional from mpi4py import MPI from petsc4py import PETSc import h5py import numpy as np from dolfinx import default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.fem import Function, dirichletbc, form, functionspace, locate_dofs_geometrical from dolfinx.fem.petsc import apply_lifting, assemble_matrix, assemble_vector, set_bc from dolfinx.io import XDMFFile from dolfinx.log import LogLevel, log, set_log_level from dolfinx.mesh import CellType, create_unit_cube, refine from ufl import SpatialCoordinate, TestFunction, TrialFunction, dx, exp, grad, inner, pi, sin def reference_periodic( tetra: bool, r_lvl: int = 0, out_hdf5: Optional[h5py.File] = None, xdmf: bool = False, boomeramg: bool = False, kspview: bool = False, degree: int = 1, ): # Create mesh and finite element if tetra: # Tet setup N = 3 mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N) for i in range(r_lvl): mesh.topology.create_entities(mesh.topology.dim - 2) mesh = refine(mesh, redistribute=True) N *= 2 else: # Hex setup N = 3 for i in range(r_lvl): N *= 2 mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N, CellType.hexahedron) V = functionspace(mesh, ("CG", degree)) # Create Dirichlet boundary condition def dirichletboundary(x): return np.logical_or( np.logical_or(np.isclose(x[1], 0), np.isclose(x[1], 1)), np.logical_or(np.isclose(x[2], 0), np.isclose(x[2], 1)), ) mesh.topology.create_connectivity(2, 1) geometrical_dofs = locate_dofs_geometrical(V, dirichletboundary) bc = dirichletbc(default_scalar_type(0), geometrical_dofs, V) bcs = [bc] # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(grad(u), grad(v)) * dx x = SpatialCoordinate(mesh) dx_ = x[0] - 0.9 dy_ = x[1] - 0.5 dz_ = x[2] - 0.1 f = x[0] * sin(5.0 * pi * x[1]) + 1.0 * exp(-(dx_ * dx_ + dy_ * dy_ + dz_ * dz_) / 0.02) rhs = inner(f, v) * dx # Assemble rhs, RHS and apply lifting bilinear_form = form(a) linear_form = form(rhs) A_org = assemble_matrix(bilinear_form, bcs) A_org.assemble() L_org = assemble_vector(linear_form) apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore set_bc(L_org, bcs) # Create PETSc nullspace nullspace = PETSc.NullSpace().create(constant=True) # type: ignore PETSc.Mat.setNearNullSpace(A_org, nullspace) # type: ignore # Set PETSc options opts = PETSc.Options() # type: ignore if boomeramg: opts["ksp_type"] = "cg" opts["ksp_rtol"] = 1.0e-5 opts["pc_type"] = "hypre" opts["pc_hypre_type"] = "boomeramg" opts["pc_hypre_boomeramg_max_iter"] = 1 opts["pc_hypre_boomeramg_cycle_type"] = "v" # opts["pc_hypre_boomeramg_print_statistics"] = 1 else: opts["ksp_type"] = "cg" opts["ksp_rtol"] = 1.0e-12 opts["pc_type"] = "gamg" opts["pc_gamg_type"] = "agg" opts["pc_gamg_sym_graph"] = True # Use Chebyshev smoothing for multigrid opts["mg_levels_ksp_type"] = "richardson" opts["mg_levels_pc_type"] = "sor" # opts["help"] = None # List all available options # opts["ksp_view"] = None # List progress of solver # Initialize PETSc solver, set options and operator solver = PETSc.KSP().create(mesh.comm) # type: ignore solver.setFromOptions() solver.setOperators(A_org) # Solve linear problem u_ = Function(V) start = perf_counter() with Timer("Solve"): solver.solve(L_org, u_.x.petsc_vec) end = perf_counter() u_.x.petsc_vec.ghostUpdate( addv=PETSc.InsertMode.INSERT, # type: ignore mode=PETSc.ScatterMode.FORWARD, # type: ignore ) # type: ignore if kspview: solver.view() it = solver.getIterationNumber() num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs if out_hdf5 is not None: d_set = out_hdf5.get("its") d_set[r_lvl] = it d_set = out_hdf5.get("num_dofs") d_set[r_lvl] = num_dofs d_set = out_hdf5.get("solve_time") d_set[r_lvl, MPI.COMM_WORLD.rank] = end - start if MPI.COMM_WORLD.rank == 0: print("Rlvl {0:d}, Iterations {1:d}".format(r_lvl, it)) # Output solution to XDMF if xdmf: ext = "tet" if tetra else "hex" outdir = Path("results") outdir.mkdir(exist_ok=True, parents=True) fname = outdir / "reference_periodic_{0:d}_{1:s}.xdmf".format(r_lvl, ext) u_.name = "u_" + ext + "_unconstrained" with XDMFFile(mesh.comm, fname, "w") as out_periodic: out_periodic.write_mesh(mesh) out_periodic.write_function(u_, 0.0, "Xdmf/Domain/" + "Grid[@Name='{0:s}'][1]".format(mesh.name)) if __name__ == "__main__": # Set Argparser defaults parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--nref", default=1, type=np.int8, dest="n_ref", help="Number of spatial refinements") parser.add_argument("--degree", default=1, type=np.int8, dest="degree", help="CG Function space degree") parser.add_argument("--xdmf", action="store_true", dest="xdmf", help="XDMF-output of function (Default false)") parser.add_argument("--timings", action="store_true", dest="timings", help="List timings (Default false)") parser.add_argument("--kspview", action="store_true", dest="kspview", help="View PETSc progress") parser.add_argument("-o", default="periodic_ref_output.hdf5", dest="hdf5", help="Name of HDF5 output file") ct_parser = parser.add_mutually_exclusive_group(required=False) ct_parser.add_argument("--tet", dest="tetra", action="store_true", help="Tetrahedron elements", default=True) ct_parser.add_argument("--hex", dest="tetra", action="store_false", help="Hexahedron elements") solver_parser = parser.add_mutually_exclusive_group(required=False) solver_parser.add_argument( "--boomeramg", dest="boomeramg", default=True, action="store_true", help="Use BoomerAMG preconditioner (Default)", ) solver_parser.add_argument("--gamg", dest="boomeramg", action="store_false", help="Use PETSc GAMG preconditioner") args = parser.parse_args() N = args.n_ref + 1 h5f = h5py.File("periodic_ref_output.hdf5", "w", driver="mpio", comm=MPI.COMM_WORLD) h5f.create_dataset("its", (N,), dtype=np.int32) h5f.create_dataset("num_dofs", (N,), dtype=np.int32) sd = h5f.create_dataset("solve_time", (N, MPI.COMM_WORLD.size), dtype=np.float64) solver = "BoomerAMG" if args.boomeramg else "GAMG" ct = "Tet" if args.tetra else "Hex" sd.attrs["solver"] = np.bytes_(solver) sd.attrs["degree"] = np.bytes_(str(int(args.degree))) sd.attrs["ct"] = np.bytes_(ct) for i in range(N): if MPI.COMM_WORLD.rank == 0: set_log_level(LogLevel.INFO) log(LogLevel.INFO, "Run {0:1d} in progress".format(i)) set_log_level(LogLevel.ERROR) reference_periodic( args.tetra, r_lvl=i, out_hdf5=h5f, xdmf=args.xdmf, boomeramg=args.boomeramg, kspview=args.kspview, degree=args.degree, ) if args.timings and i == N - 1: list_timings(MPI.COMM_WORLD, [TimingType.wall]) h5f.close() dolfinx_mpc-0.9.1/python/benchmarks/visualize_iterations.py000066400000000000000000000115621476141270300243210ustar00rootroot00000000000000from __future__ import annotations import argparse import sys import mpi4py import h5py import matplotlib.pyplot as plt import matplotlib.transforms as mtransforms import numpy as np from matplotlib.ticker import LogLocator, MaxNLocator, NullFormatter def visualize_elasticity(): f = h5py.File("bench_edge_output.hdf5", "r", driver="mpio", comm=mpi4py.MPI.COMM_WORLD) iterations = f.get("its")[:] dofs = f.get("num_dofs")[:] slaves = np.sum(f.get("num_slaves")[:], axis=1) solver = f.get("solve_time").attrs["solver"].decode("utf-8") ct = f.get("solve_time").attrs["ct"].decode("utf-8") degree = f.get("solve_time").attrs["degree"].decode("utf-8") f.close() fig = plt.figure() ax = fig.add_subplot(1, 1, 1) plt.plot(dofs, iterations, "-ro", label="MPC", markersize=12) f_ref = h5py.File("elasticity_ref.hdf5", "r", driver="mpio", comm=mpi4py.MPI.COMM_WORLD) iterations_ref = f_ref.get("its")[:] dofs_ref = f_ref.get("num_dofs")[:] f_ref.close() plt.plot(dofs_ref, iterations_ref, "-bs", label="Unconstrained") ax.tick_params(axis="both", which="major", labelsize=20) ax.set_xscale("log") ax.yaxis.set_major_locator(MaxNLocator(integer=True)) plt.xlabel("# DOFS", fontsize=20) plt.ylabel("# Iterations", fontsize=20) trans_offset = mtransforms.offset_copy(ax.transData, fig=fig, x=0.025, y=-0.1, units="inches") for i in range(len(iterations)): plt.text(dofs[i], iterations[i], slaves[i], transform=trans_offset, fontsize=20) plt.title("Linear elasticity (CG{0:s}, {1:s}) with {2:s}".format(degree, ct, solver), fontsize=25) plt.legend(fontsize=15) ax.minorticks_on() ax.set_ylim([0, max([max(iterations), max(iterations_ref)]) + 1]) ax.set_xlim([1e2, 1e8]) locmax = LogLocator(base=10.0, numticks=8) ax.xaxis.set_major_locator(locmax) locmin = LogLocator(base=10.0, subs=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9), numticks=9) ax.xaxis.set_minor_locator(locmin) ax.xaxis.set_minor_formatter(NullFormatter()) plt.grid(True, which="both", axis="both") plt.savefig("elasticity_iterations_CG{0:s}_{1:s}.png".format(degree, ct), bbox_inches="tight") def visualize_periodic(): f = h5py.File("periodic_output.hdf5", "r", driver="mpio", comm=mpi4py.MPI.COMM_WORLD) iterations = f.get("its")[:] dofs = f.get("num_dofs")[:] slaves = np.sum(f.get("num_slaves")[:], axis=1) solver = f.get("solve_time").attrs["solver"].decode("utf-8") ct = f.get("solve_time").attrs["ct"].decode("utf-8") degree = f.get("solve_time").attrs["degree"].decode("utf-8") f.close() fig = plt.figure() ax = fig.add_subplot(1, 1, 1) plt.plot(dofs, iterations, "-ro", label="MPC", markersize=12) f_ref = h5py.File("periodic_ref_output.hdf5", "r", driver="mpio", comm=mpi4py.MPI.COMM_WORLD) iterations_ref = f_ref.get("its")[:] dofs_ref = f_ref.get("num_dofs")[:] f_ref.close() plt.plot(dofs_ref, iterations_ref, "-bs", label="Unconstrained") ax.tick_params(axis="both", which="major", labelsize=20) ax.set_xscale("log") ax.yaxis.set_major_locator(MaxNLocator(integer=True)) ax.set_ylim([0, max(iterations) + 1]) ax.set_xlim([1e2, max(dofs) + 1]) plt.xlabel("# DOFS", fontsize=20) plt.ylabel("# Iterations", fontsize=20) trans_offset = mtransforms.offset_copy(ax.transData, fig=fig, x=0.025, y=0.025, units="inches") for i in range(len(iterations)): plt.text(dofs[i], iterations[i], slaves[i], transform=trans_offset, fontsize=20) plt.title("Periodic Poisson (CG {0:s}, {1:s}) with {2:s}".format(degree, ct, solver), fontsize=25) plt.legend(fontsize=15) ax.minorticks_on() ax.set_ylim([0, max(iterations) + 1]) ax.set_xlim([1e2, 1e8]) locmax = LogLocator(base=10.0, numticks=8) ax.xaxis.set_major_locator(locmax) locmin = LogLocator(base=10.0, subs=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9), numticks=9) ax.xaxis.set_minor_locator(locmin) ax.xaxis.set_minor_formatter(NullFormatter()) plt.grid(True, which="both", axis="both") plt.savefig("periodic_iterations_CG{0:s}_{1:s}.png".format(degree, ct), bbox_inches="tight") if __name__ == "__main__": parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "--elasticity", action="store_true", dest="elasticity", default=False, help="Visualize iterations for elasticity", ) parser.add_argument( "--periodic", action="store_true", dest="periodic", default=False, help="Visualize iterations for periodic", ) args = parser.parse_args() thismodule = sys.modules[__name__] periodic = elasticity = None for key in vars(args): setattr(thismodule, key, getattr(args, key)) if elasticity: visualize_elasticity() if periodic: visualize_periodic() dolfinx_mpc-0.9.1/python/demos/000077500000000000000000000000001476141270300164605ustar00rootroot00000000000000dolfinx_mpc-0.9.1/python/demos/Makefile000066400000000000000000000035251476141270300201250ustar00rootroot00000000000000contact3D-serial: python3 demo_contact_3D.py --theta 0 --timing python3 demo_contact_3D.py --theta 1.05 --timing python3 demo_contact_3D.py --gmsh --theta 0 --timing python3 demo_contact_3D.py --gmsh --theta 1.05 --timing python3 demo_contact_3D.py --gmsh --no-slip --theta 0 --timing python3 demo_contact_3D.py --gmsh --no-slip --theta 1.05 --timing python3 demo_contact_3D.py --gmsh --no-slip --hex --theta 0 --timing python3 demo_contact_3D.py --gmsh --no-slip --hex --theta 1.05 --timing contact3D-parallel: mpirun -n 4 python3 demo_contact_3D.py --theta 0 --timing mpirun -n 4 python3 demo_contact_3D.py --theta 1.05 --timing mpirun -n 4 python3 demo_contact_3D.py --gmsh --theta 0 --timing mpirun -n 4 python3 demo_contact_3D.py --gmsh --theta 1.05 --timing mpirun -n 4 python3 demo_contact_3D.py --gmsh --no-slip --theta 0 --timing mpirun -n 4 python3 demo_contact_3D.py --gmsh --no-slip --theta 1.05 --timing mpirun -n 4 python3 demo_contact_3D.py --gmsh --no-slip --hex --theta 0 --timing mpirun -n 4 python3 demo_contact_3D.py --gmsh --no-slip --hex --theta 1.05 --timing contact2D-serial: python3 demo_contact_2D.py --theta 0 --timing python3 demo_contact_2D.py --theta 1.05 --timing python3 demo_contact_2D.py --gmsh --theta 0 --timing python3 demo_contact_2D.py --gmsh --theta 1.05 --timing python3 demo_contact_2D.py --quad --gmsh --theta 0 --timing python3 demo_contact_2D.py --quad --gmsh --theta 1.05 --timing contact2D-parallel: mpirun -n 4 python3 demo_contact_2D.py --theta 0 --timing mpirun -n 4 python3 demo_contact_2D.py --theta 1.05 --timing mpirun -n 4 python3 demo_contact_2D.py --gmsh --theta 0 --timing mpirun -n 4 python3 demo_contact_2D.py --gmsh --theta 1.05 --timing mpirun -n 4 python3 demo_contact_2D.py --quad --gmsh --theta 0 --timing mpirun -n 4 python3 demo_contact_2D.py --quad --gmsh --theta 1.05 --timingdolfinx_mpc-0.9.1/python/demos/create_and_export_mesh.py000066400000000000000000000700751476141270300235450ustar00rootroot00000000000000from __future__ import annotations from pathlib import Path from typing import Dict, List, Sequence, Tuple, Union from mpi4py import MPI import dolfinx.common as _common import dolfinx.cpp as _cpp import dolfinx.io as _io import dolfinx.mesh as _mesh import gmsh import numpy as np import ufl from basix.ufl import element from dolfinx.io import gmshio import dolfinx_mpc.utils as _utils def gmsh_3D_stacked( celltype: str, theta: float, res: float = 0.1, verbose: bool = False ) -> Tuple[_mesh.Mesh, _mesh.MeshTags]: if celltype == "tetrahedron": mesh, ft = generate_tet_boxes( 0, 0, 0, 1, 1, 1, 2, res, facet_markers=[[11, 5, 12, 13, 4, 14], [21, 9, 22, 23, 3, 24]], volume_markers=[1, 2], verbose=verbose, ) else: mesh, ft = generate_hex_boxes( 0, 0, 0, 1, 1, 1, 2, res, facet_markers=[[11, 5, 12, 13, 4, 14], [21, 9, 22, 23, 3, 24]], volume_markers=[1, 2], verbose=verbose, ) r_matrix = _utils.rotation_matrix([1, 1, 0], -theta) mesh.geometry.x[:] = np.dot(r_matrix, mesh.geometry.x.T).T return mesh, ft def tag_cube_model( model: gmsh.model, x0: float, y0: float, z0: float, x1: float, y1: float, z1: float, z2: float, facet_markers: Sequence[Sequence[int]], volume_markers: Sequence[int], ): """ Helper function to tag a cube and its faces """ # Create entity -> marker map (to be used after rotation) volumes = model.getEntities(3) volume_entities: Dict[str, List[int]] = { "Top": [-1, volume_markers[1]], "Bottom": [-1, volume_markers[0]], } for i, volume in enumerate(volumes): com = model.occ.getCenterOfMass(volume[0], np.abs(volume[1])) if np.isclose(com[2], (z1 - z0) / 2): bottom_index = i volume_entities["Bottom"][0] = volume[1] elif np.isclose(com[2], (z2 - z1) / 2 + z1): top_index = i volume_entities["Top"][0] = volume[1] surfaces = ["Top", "Bottom", "Left", "Right", "Front", "Back"] entities: Dict[str, Dict[str, List[List[int]]]] = { "Bottom": {key: [[], []] for key in surfaces}, "Top": {key: [[], []] for key in surfaces}, } # Identitfy entities for each surface of top and bottom cube # Physical markers for bottom cube bottom_surfaces = model.getBoundary([volumes[bottom_index]], oriented=False, recursive=False) for entity in bottom_surfaces: com = model.occ.getCenterOfMass(entity[0], entity[1]) if np.allclose(com, [(x1 - x0) / 2, y1, (z1 - z0) / 2]): entities["Bottom"]["Back"][0].append(entity[1]) entities["Bottom"]["Back"][1] = [facet_markers[0][0]] elif np.allclose(com, [(x1 - x0) / 2, (y1 - y0) / 2, z0]): entities["Bottom"]["Bottom"][0].append(entity[1]) entities["Bottom"]["Bottom"][1] = [facet_markers[0][1]] elif np.allclose(com, [x1, (y1 - y0) / 2, (z1 - z0) / 2]): entities["Bottom"]["Right"][0].append(entity[1]) entities["Bottom"]["Right"][1] = [facet_markers[0][2]] elif np.allclose(com, [x0, (y1 - y0) / 2, (z1 - z0) / 2]): entities["Bottom"]["Left"][0].append(entity[1]) entities["Bottom"]["Left"][1] = [facet_markers[0][3]] elif np.allclose(com, [(x1 - x0) / 2, (y1 - y0) / 2, z1]): entities["Bottom"]["Top"][0].append(entity[1]) entities["Bottom"]["Top"][1] = [facet_markers[0][4]] elif np.allclose(com, [(x1 - x0) / 2, y0, (z1 - z0) / 2]): entities["Bottom"]["Front"][0].append(entity[1]) entities["Bottom"]["Front"][1] = [facet_markers[0][5]] # Physical markers for top top_surfaces = model.getBoundary([volumes[top_index]], oriented=False, recursive=False) for entity in top_surfaces: com = model.occ.getCenterOfMass(entity[0], entity[1]) if np.allclose(com, [(x1 - x0) / 2, y1, (z2 - z1) / 2 + z1]): entities["Top"]["Back"][0].append(entity[1]) entities["Top"]["Back"][1] = [facet_markers[1][0]] elif np.allclose(com, [(x1 - x0) / 2, (y1 - y0) / 2, z1]): entities["Top"]["Bottom"][0].append(entity[1]) entities["Top"]["Bottom"][1] = [facet_markers[1][1]] elif np.allclose(com, [x1, (y1 - y0) / 2, (z2 - z1) / 2 + z1]): entities["Top"]["Right"][0].append(entity[1]) entities["Top"]["Right"][1] = [facet_markers[1][2]] elif np.allclose(com, [x0, (y1 - y0) / 2, (z2 - z1) / 2 + z1]): entities["Top"]["Left"][0].append(entity[1]) entities["Top"]["Left"][1] = [facet_markers[1][3]] elif np.allclose(com, [(x1 - x0) / 2, (y1 - y0) / 2, z2]): entities["Top"]["Top"][0].append(entity[1]) entities["Top"]["Top"][1] = [facet_markers[1][4]] elif np.allclose(com, [(x1 - x0) / 2, y0, (z2 - z1) / 2 + z1]): entities["Top"]["Front"][0].append(entity[1]) entities["Top"]["Front"][1] = [facet_markers[1][5]] # Note: Rotation cannot be used on recombined surfaces model.occ.synchronize() for volume in volume_entities.keys(): model.addPhysicalGroup(3, [volume_entities[volume][0]], tag=volume_entities[volume][1]) model.setPhysicalName(3, volume_entities[volume][0], volume) for box in entities.keys(): for surface in entities[box].keys(): model.addPhysicalGroup(2, entities[box][surface][0], tag=entities[box][surface][1][0]) model.setPhysicalName(2, entities[box][surface][1][0], box + ":" + surface) def generate_tet_boxes( x0: float, y0: float, z0: float, x1: float, y1: float, z1: float, z2: float, res: float, facet_markers: Sequence[Sequence[int]], volume_markers: Sequence[int], verbose: bool = False, ) -> Tuple[_mesh.Mesh, _mesh.MeshTags]: """ Generate the stacked boxes [x0,y0,z0]x[y1,y1,z1] and [x0,y0,z1] x [x1,y1,z2] with different resolution in each box. The markers are is a list of arrays containing markers array of markers for [back, bottom, right, left, top, front] per box volume_markers a list of marker per volume """ gmsh.initialize() gmsh.option.setNumber("General.Terminal", int(verbose)) if MPI.COMM_WORLD.rank == 0: gmsh.clear() # NOTE: Have to reset this until: # https://gitlab.onelab.info/gmsh/gmsh/-/issues/1001 # is in master gmsh.option.setNumber("Mesh.RecombineAll", 0) # Added tolerance to ensure that gmsh separates boxes tol = 1e-12 gmsh.model.occ.addBox(x0, y0, z0, x1 - x0, y1 - y0, z1 - z0) gmsh.model.occ.addBox(x0, y0, z1 + tol, x1 - x0, y1 - y0, z2 - z1) # Syncronize to be able to fetch entities gmsh.model.occ.synchronize() tag_cube_model(gmsh.model, x0, y0, z0, x1, y1, z1, z2, facet_markers, volume_markers) gmsh.model.mesh.field.add("Box", 1) gmsh.model.mesh.field.setNumber(1, "VIn", res) gmsh.model.mesh.field.setNumber(1, "VOut", 2 * res) gmsh.model.mesh.field.setNumber(1, "XMin", 0) gmsh.model.mesh.field.setNumber(1, "XMax", 1) gmsh.model.mesh.field.setNumber(1, "YMin", 0) gmsh.model.mesh.field.setNumber(1, "YMax", 1) gmsh.model.mesh.field.setNumber(1, "ZMin", 0) gmsh.model.mesh.field.setNumber(1, "ZMax", 1) gmsh.model.mesh.field.setAsBackgroundMesh(1) # NOTE: Need to synchronize after setting mesh sizes gmsh.model.occ.synchronize() # Generate mesh gmsh.option.setNumber("Mesh.MaxNumThreads1D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads2D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads3D", MPI.COMM_WORLD.size) gmsh.model.mesh.generate(3) gmsh.model.mesh.setOrder(1) mesh, _, ft = gmshio.model_to_mesh(gmsh.model, MPI.COMM_WORLD, 0) gmsh.finalize() return mesh, ft def generate_hex_boxes( x0: float, y0: float, z0: float, x1: float, y1: float, z1: float, z2: float, res: float, facet_markers: Sequence[Sequence[int]], volume_markers: Sequence[int], verbose: bool = False, ) -> Tuple[_mesh.Mesh, _mesh.MeshTags]: """ Generate the stacked boxes [x0,y0,z0]x[y1,y1,z1] and [x0,y0,z1] x [x1,y1,z2] with different resolution in each box. The markers are is a list of arrays containing markers array of markers for [back, bottom, right, left, top, front] per box volume_markers a list of marker per volume """ gmsh.initialize() gmsh.option.setNumber("General.Terminal", int(verbose)) if MPI.COMM_WORLD.rank == 0: gmsh.clear() gmsh.option.setNumber("Mesh.RecombinationAlgorithm", 2) gmsh.option.setNumber("Mesh.RecombineAll", 2) bottom = gmsh.model.occ.addRectangle(x0, y0, z0, x1 - x0, y1 - y0) top = gmsh.model.occ.addRectangle(x0, y0, z2, x1 - x0, y1 - y0) # Set mesh size at point gmsh.model.occ.extrude([(2, bottom)], 0, 0, z1 - z0, numElements=[int(1 / (2 * res))], recombine=True) gmsh.model.occ.extrude([(2, top)], 0, 0, z1 - z2 - 1e-12, numElements=[int(1 / (2 * res))], recombine=True) # Syncronize to be able to fetch entities gmsh.model.occ.synchronize() # Tag faces and volume tag_cube_model(gmsh.model, x0, y0, z0, x1, y1, z1, z2, facet_markers, volume_markers) # Set mesh sizes on the points from the surface we are extruding bottom_nodes = gmsh.model.getBoundary([(2, bottom)], oriented=False, recursive=True) gmsh.model.occ.mesh.setSize(bottom_nodes, res) top_nodes = gmsh.model.getBoundary([(2, top)], oriented=False, recursive=True) gmsh.model.occ.mesh.setSize(top_nodes, 2 * res) # NOTE: Need to synchronize after setting mesh sizes gmsh.model.occ.synchronize() # Generate mesh gmsh.option.setNumber("Mesh.MaxNumThreads1D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads2D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads3D", MPI.COMM_WORLD.size) gmsh.model.mesh.generate(3) gmsh.model.mesh.setOrder(1) mesh, _, ft = gmshio.model_to_mesh(gmsh.model, MPI.COMM_WORLD, 0) gmsh.clear() gmsh.finalize() MPI.COMM_WORLD.barrier() return mesh, ft def gmsh_2D_stacked(celltype: str, theta: float, verbose: bool = False) -> Tuple[_mesh.Mesh, _mesh.MeshTags]: res = 0.1 x0, y0, z0 = 0, 0, 0 x1, y1 = 1, 1 y2 = 2 # Check if GMSH is initialized gmsh.initialize() gmsh.option.setNumber("General.Terminal", int(verbose)) gmsh.clear() if MPI.COMM_WORLD.rank == 0: if celltype == "quadrilateral": gmsh.option.setNumber("Mesh.RecombinationAlgorithm", 2) gmsh.option.setNumber("Mesh.RecombineAll", 2) recombine = True else: recombine = False points = [ gmsh.model.occ.addPoint(x0, y0, z0), gmsh.model.occ.addPoint(x1, y0, z0), gmsh.model.occ.addPoint(x0, y2, z0), gmsh.model.occ.addPoint(x1, y2, z0), ] bottom = gmsh.model.occ.addLine(points[0], points[1]) top = gmsh.model.occ.addLine(points[2], points[3]) gmsh.model.occ.extrude([(1, bottom)], 0, y1 - y0, 0, numElements=[int(1 / (res))], recombine=recombine) gmsh.model.occ.extrude([(1, top)], 0, y1 - y2 - 1e-12, 0, numElements=[int(1 / (2 * res))], recombine=recombine) # Syncronize to be able to fetch entities gmsh.model.occ.synchronize() # Create entity -> marker map (to be used after rotation) volumes = gmsh.model.getEntities(2) volume_entities = {"Top": [-1, 1], "Bottom": [-1, 2]} for i, volume in enumerate(volumes): com = gmsh.model.occ.getCenterOfMass(volume[0], volume[1]) if np.isclose(com[1], (y1 - y0) / 2): bottom_index = i volume_entities["Bottom"][0] = volume[1] elif np.isclose(com[1], (y2 - y1) / 2 + y1): top_index = i volume_entities["Top"][0] = volume[1] surfaces = ["Top", "Bottom", "Left", "Right"] entities: Dict[str, Dict[str, List[List[int]]]] = { "Bottom": {key: [[], []] for key in surfaces}, "Top": {key: [[], []] for key in surfaces}, } # Identitfy entities for each surface of top and bottom cube # Bottom cube: Top, Right, Bottom, Left # Top cube : Top, Right, Bottom, Left facet_markers = [[4, 7, 5, 6], [3, 12, 9, 13]] bottom_surfaces = gmsh.model.getBoundary([volumes[bottom_index]], oriented=False, recursive=False) for entity in bottom_surfaces: com = gmsh.model.occ.getCenterOfMass(entity[0], abs(entity[1])) if np.allclose(com, [(x1 - x0) / 2, y0, z0]): entities["Bottom"]["Bottom"][0].append(entity[1]) entities["Bottom"]["Bottom"][1] = [facet_markers[0][2]] elif np.allclose(com, [x1, (y1 - y0) / 2, z0]): entities["Bottom"]["Right"][0].append(entity[1]) entities["Bottom"]["Right"][1] = [facet_markers[0][1]] elif np.allclose(com, [x0, (y1 - y0) / 2, z0]): entities["Bottom"]["Left"][0].append(entity[1]) entities["Bottom"]["Left"][1] = [facet_markers[0][3]] elif np.allclose(com, [(x1 - x0) / 2, y1, z0]): entities["Bottom"]["Top"][0].append(entity[1]) entities["Bottom"]["Top"][1] = [facet_markers[0][0]] # Physical markers for top top_surfaces = gmsh.model.getBoundary([volumes[top_index]], oriented=False, recursive=False) for entity in top_surfaces: com = gmsh.model.occ.getCenterOfMass(entity[0], abs(entity[1])) if np.allclose(com, [(x1 - x0) / 2, y1, z0]): entities["Top"]["Bottom"][0].append(entity[1]) entities["Top"]["Bottom"][1] = [facet_markers[1][2]] elif np.allclose(com, [x1, y1 + (y2 - y1) / 2, z0]): entities["Top"]["Right"][0].append(entity[1]) entities["Top"]["Right"][1] = [facet_markers[1][1]] elif np.allclose(com, [x0, y1 + (y2 - y1) / 2, z0]): entities["Top"]["Left"][0].append(entity[1]) entities["Top"]["Left"][1] = [facet_markers[1][3]] elif np.allclose(com, [(x1 - x0) / 2, y2, z0]): entities["Top"]["Top"][0].append(entity[1]) entities["Top"]["Top"][1] = [facet_markers[1][0]] # Note: Rotation cannot be used on recombined surfaces gmsh.model.occ.synchronize() for volume in volume_entities.keys(): gmsh.model.addPhysicalGroup(2, [volume_entities[volume][0]], tag=volume_entities[volume][1]) gmsh.model.setPhysicalName(2, volume_entities[volume][1], volume) for box in entities.keys(): for surface in entities[box].keys(): gmsh.model.addPhysicalGroup(1, entities[box][surface][0], tag=entities[box][surface][1][0]) gmsh.model.setPhysicalName(1, entities[box][surface][1][0], box + ":" + surface) # Set mesh sizes on the points from the surface we are extruding bottom_nodes = gmsh.model.getBoundary([volumes[bottom_index]], oriented=False, recursive=True) gmsh.model.occ.mesh.setSize(bottom_nodes, res) top_nodes = gmsh.model.getBoundary([volumes[top_index]], oriented=False, recursive=True) gmsh.model.occ.mesh.setSize(top_nodes, 2 * res) # NOTE: Need to synchronize after setting mesh sizes gmsh.model.occ.synchronize() # Generate mesh gmsh.option.setNumber("Mesh.MaxNumThreads1D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads2D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads3D", MPI.COMM_WORLD.size) gmsh.model.mesh.generate(2) gmsh.model.mesh.setOrder(1) mesh, _, ft = gmshio.model_to_mesh(gmsh.model, MPI.COMM_WORLD, 0, gdim=2) r_matrix = _utils.rotation_matrix([0, 0, 1], theta) # NOTE: Hex mesh must be rotated after generation due to gmsh API mesh.geometry.x[:] = np.dot(r_matrix, mesh.geometry.x.T).T gmsh.clear() gmsh.finalize() MPI.COMM_WORLD.barrier() return mesh, ft def mesh_2D_dolfin(celltype: str, theta: float = 0, outdir: Union[str, Path] = Path("meshes")): """ Create two 2D cubes stacked on top of each other, and the corresponding mesh markers using dolfin built-in meshes """ def find_line_function(p0, p1): """ Find line y=ax+b for each of the lines in the mesh https://mathworld.wolfram.com/Two-PointForm.html """ # Line aligned with y axis if np.isclose(p1[0], p0[0]): return lambda x: np.isclose(x[0], p0[0]) return lambda x: np.isclose(x[1], p0[1] + (p1[1] - p0[1]) / (p1[0] - p0[0]) * (x[0] - p0[0])) def over_line(p0, p1): """ Check if a point is over or under y=ax+b for each of the lines in the mesh https://mathworld.wolfram.com/Two-PointForm.html """ return lambda x: x[1] > p0[1] + (p1[1] - p0[1]) / (p1[0] - p0[0]) * (x[0] - p0[0]) # Using built in meshes, stacking cubes on top of each other N = 15 if celltype == "quadrilateral": ct = _mesh.CellType.quadrilateral elif celltype == "triangle": ct = _mesh.CellType.triangle else: raise ValueError("celltype has to be tri or quad") if MPI.COMM_WORLD.rank == 0: mesh0 = _mesh.create_unit_square(MPI.COMM_SELF, N, N, ct) mesh1 = _mesh.create_unit_square(MPI.COMM_SELF, 2 * N, 2 * N, ct) mesh0.geometry.x[:, 1] += 1 # Stack the two meshes in one mesh r_matrix = _utils.rotation_matrix([0, 0, 1], theta) points = np.vstack([mesh0.geometry.x, mesh1.geometry.x]) points = np.dot(r_matrix, points.T) points = points[:2, :].T # Transform topology info into geometry info tdim0 = mesh0.topology.dim num_cells0 = mesh0.topology.index_map(tdim0).size_local mesh0.topology.create_connectivity(tdim0, tdim0) cells0 = _cpp.mesh.entities_to_geometry(mesh0._cpp_object, tdim0, np.arange(num_cells0, dtype=np.int32), False) tdim1 = mesh1.topology.dim num_cells1 = mesh1.topology.index_map(tdim1).size_local mesh1.topology.create_connectivity(tdim1, tdim1) cells1 = _cpp.mesh.entities_to_geometry(mesh1._cpp_object, tdim1, np.arange(num_cells1, dtype=np.int32), False) cells1 += mesh0.geometry.x.shape[0] cells = np.vstack([cells0, cells1]) domain = ufl.Mesh(element("Lagrange", ct.name, 1, shape=(points.shape[1],))) mesh = _mesh.create_mesh(MPI.COMM_SELF, cells, points, domain) tdim = mesh.topology.dim fdim = tdim - 1 # Find information about facets to be used in meshtags bottom_points = np.dot(r_matrix, np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]]).T) bottom = find_line_function(bottom_points[:, 0], bottom_points[:, 1]) bottom_facets = _mesh.locate_entities_boundary(mesh, fdim, bottom) top_points = np.dot(r_matrix, np.array([[0, 1, 0], [1, 1, 0], [1, 2, 0], [0, 2, 0]]).T) top = find_line_function(top_points[:, 2], top_points[:, 3]) top_facets = _mesh.locate_entities_boundary(mesh, fdim, top) left_side = find_line_function(top_points[:, 0], top_points[:, 3]) left_facets = _mesh.locate_entities_boundary(mesh, fdim, left_side) right_side = find_line_function(top_points[:, 1], top_points[:, 2]) right_facets = _mesh.locate_entities_boundary(mesh, fdim, right_side) top_cube = over_line(bottom_points[:, 2], bottom_points[:, 3]) num_cells = mesh.topology.index_map(tdim).size_local mesh.topology.create_connectivity(tdim, tdim) cell_midpoints = _mesh.compute_midpoints(mesh, tdim, np.arange(num_cells, dtype=np.int32)) interface = find_line_function(bottom_points[:, 2], bottom_points[:, 3]) i_facets = _mesh.locate_entities_boundary(mesh, fdim, interface) bottom_interface = [] top_interface = [] mesh.topology.create_connectivity(fdim, tdim) facet_to_cell = mesh.topology.connectivity(fdim, tdim) for facet in i_facets: i_cells = facet_to_cell.links(facet) assert len(i_cells == 1) i_cell = i_cells[0] if top_cube(cell_midpoints[i_cell]): top_interface.append(facet) else: bottom_interface.append(facet) top_cube_marker = 2 cell_indices = [] cell_values = [] for cell_index in range(num_cells): if top_cube(cell_midpoints[cell_index]): cell_indices.append(cell_index) cell_values.append(top_cube_marker) ct = _mesh.meshtags(mesh, tdim, np.array(cell_indices, dtype=np.intc), np.array(cell_values, dtype=np.intc)) # Create meshtags for facet data markers: Dict[int, np.ndarray] = { 3: top_facets, 4: np.hstack(bottom_interface), 9: np.hstack(top_interface), 5: bottom_facets, 6: left_facets, 7: right_facets, } all_indices = [] all_values = [] for key in markers.keys(): all_indices.append(markers[key]) all_values.append(np.full(len(markers[key]), key, dtype=np.intc)) arg_sort = np.argsort(np.hstack(all_indices)) mt = _mesh.meshtags(mesh, fdim, np.hstack(all_indices)[arg_sort], np.hstack(all_values)[arg_sort]) mt.name = "facet_tags" outpath = Path(outdir) outpath.mkdir(exist_ok=True, parents=True) with _io.XDMFFile(MPI.COMM_SELF, outpath / f"mesh_{celltype}_{theta:.2f}.xdmf", "w") as o_f: o_f.write_mesh(mesh) o_f.write_meshtags(ct, x=mesh.geometry) o_f.write_meshtags(mt, x=mesh.geometry) MPI.COMM_WORLD.barrier() def mesh_3D_dolfin( theta: float = 0, ct: _mesh.CellType = _mesh.CellType.tetrahedron, ext: str = "tetrahedron", res: float = 0.1, outdir: Union[str, Path] = Path("meshes"), ): timer = _common.Timer("~~Contact: Create mesh") def find_plane_function(p0, p1, p2): """ Find plane function given three points: http://www.nabla.hr/CG-LinesPlanesIn3DA3.htm """ v1 = np.array(p1) - np.array(p0) v2 = np.array(p2) - np.array(p0) n = np.cross(v1, v2) D = -(n[0] * p0[0] + n[1] * p0[1] + n[2] * p0[2]) return lambda x: np.isclose(0, np.dot(n, x) + D) def over_plane(p0, p1, p2): """ Returns function that checks if a point is over a plane defined by the points p0, p1 and p2. """ v1 = np.array(p1) - np.array(p0) v2 = np.array(p2) - np.array(p0) n = np.cross(v1, v2) D = -(n[0] * p0[0] + n[1] * p0[1] + n[2] * p0[2]) return lambda x: n[0] * x[0] + n[1] * x[1] + D > -n[2] * x[2] N = int(1 / res) if MPI.COMM_WORLD.rank == 0: mesh0 = _mesh.create_unit_cube(MPI.COMM_SELF, N, N, N, ct) mesh1 = _mesh.create_unit_cube(MPI.COMM_SELF, 2 * N, 2 * N, 2 * N, ct) mesh0.geometry.x[:, 2] += 1 # Stack the two meshes in one mesh r_matrix = _utils.rotation_matrix([1 / np.sqrt(2), 1 / np.sqrt(2), 0], -theta) points = np.vstack([mesh0.geometry.x, mesh1.geometry.x]) points = np.dot(r_matrix, points.T).T # Transform topology info into geometry info tdim0 = mesh0.topology.dim num_cells0 = mesh0.topology.index_map(tdim0).size_local mesh0.topology.create_connectivity(tdim0, tdim0) cells0 = _cpp.mesh.entities_to_geometry(mesh0._cpp_object, tdim0, np.arange(num_cells0, dtype=np.int32), False) tdim1 = mesh1.topology.dim num_cells1 = mesh1.topology.index_map(tdim1).size_local mesh1.topology.create_connectivity(tdim1, tdim1) cells1 = _cpp.mesh.entities_to_geometry(mesh1._cpp_object, tdim1, np.arange(num_cells1, dtype=np.int32), False) cells1 += mesh0.geometry.x.shape[0] cells = np.vstack([cells0, cells1]) domain = ufl.Mesh(element("Lagrange", ct.name, 1, shape=(points.shape[1],))) mesh = _mesh.create_mesh(MPI.COMM_SELF, cells, points, domain) tdim = mesh.topology.dim fdim = tdim - 1 # Find information about facets to be used in meshtags bottom_points = np.dot(r_matrix, np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]).T) bottom = find_plane_function(bottom_points[:, 0], bottom_points[:, 1], bottom_points[:, 2]) bottom_facets = _mesh.locate_entities_boundary(mesh, fdim, bottom) top_points = np.dot(r_matrix, np.array([[0, 0, 2], [1, 0, 2], [0, 1, 2], [1, 1, 2]]).T) top = find_plane_function(top_points[:, 0], top_points[:, 1], top_points[:, 2]) top_facets = _mesh.locate_entities_boundary(mesh, fdim, top) # left_side = find_line_function(top_points[:, 0], top_points[:, 3]) # left_facets = _mesh.locate_entities_boundary( # mesh, fdim, left_side) # right_side = find_line_function(top_points[:, 1], top_points[:, 2]) # right_facets = _mesh.locate_entities_boundary( # mesh, fdim, right_side) if_points = np.dot(r_matrix, np.array([[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]).T) interface = find_plane_function(if_points[:, 0], if_points[:, 1], if_points[:, 2]) i_facets = _mesh.locate_entities_boundary(mesh, fdim, interface) mesh.topology.create_connectivity(fdim, tdim) top_interface = [] bottom_interface = [] facet_to_cell = mesh.topology.connectivity(fdim, tdim) num_cells = mesh.topology.index_map(tdim).size_local mesh.topology.create_connectivity(tdim, tdim) cell_midpoints = _mesh.compute_midpoints(mesh, tdim, np.arange(num_cells, dtype=np.int32)) top_cube = over_plane(if_points[:, 0], if_points[:, 1], if_points[:, 2]) for facet in i_facets: i_cells = facet_to_cell.links(facet) assert len(i_cells) == 1 i_cell = i_cells[0] if top_cube(cell_midpoints[i_cell]): top_interface.append(facet) else: bottom_interface.append(facet) num_cells = mesh.topology.index_map(tdim).size_local mesh.topology.create_connectivity(tdim, tdim) cell_midpoints = _mesh.compute_midpoints(mesh, tdim, np.arange(num_cells, dtype=np.int32)) top_cube_marker = 2 indices = [] values = [] for cell_index in range(num_cells): if top_cube(cell_midpoints[cell_index]): indices.append(cell_index) values.append(top_cube_marker) ct = _mesh.meshtags(mesh, tdim, np.array(indices, dtype=np.int32), np.array(values, dtype=np.int32)) # Create meshtags for facet data markers: Dict[int, np.ndarray] = { 3: top_facets, 4: np.hstack(bottom_interface), 9: np.hstack(top_interface), 5: bottom_facets, } # , 6: left_facets, 7: right_facets} all_indices = [] all_values = [] for key in markers.keys(): all_indices.append(np.asarray(markers[key], dtype=np.int32)) all_values.append(np.full(len(markers[key]), key, dtype=np.int32)) arg_sort = np.argsort(np.hstack(all_indices)) sorted_vals = np.asarray(np.hstack(all_values)[arg_sort], dtype=np.int32) sorted_indices = np.asarray(np.hstack(all_indices)[arg_sort], dtype=np.int32) mt = _mesh.meshtags(mesh, fdim, sorted_indices, sorted_vals) mt.name = "facet_tags" outpath = Path(outdir) outpath.mkdir(exist_ok=True, parents=True) fname = outpath / f"mesh_{ext}_{theta:.2f}.xdmf" with _io.XDMFFile(MPI.COMM_SELF, fname, "w") as o_f: o_f.write_mesh(mesh) o_f.write_meshtags(ct, mesh.geometry) o_f.write_meshtags(mt, mesh.geometry) timer.stop() dolfinx_mpc-0.9.1/python/demos/demo_contact_2D.py000066400000000000000000000226671476141270300220330ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT # # This demo demonstrates how to solve a contact problem between # two stacked cubes. # The bottom cube is fixed at the bottom surface # The top cube has a force applied normal to its to surface. # A slip condition is implemented at the interface of the cube. # Additional constraints to avoid tangential movement is # added to the to left corner of the top cube. from __future__ import annotations import warnings from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from pathlib import Path from mpi4py import MPI from petsc4py import PETSc import numpy as np import scipy.sparse.linalg from dolfinx import default_real_type, default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.fem import Constant, dirichletbc, form, functionspace, locate_dofs_geometrical from dolfinx.fem.petsc import apply_lifting, assemble_matrix, assemble_vector, set_bc from dolfinx.io import XDMFFile from dolfinx.log import LogLevel, set_log_level from dolfinx.mesh import locate_entities_boundary, meshtags from ufl import Identity, Measure, TestFunction, TrialFunction, dx, grad, inner, sym, tr from create_and_export_mesh import gmsh_2D_stacked, mesh_2D_dolfin from dolfinx_mpc import LinearProblem, MultiPointConstraint from dolfinx_mpc.utils import ( compare_mpc_lhs, compare_mpc_rhs, create_normal_approximation, facet_normal_approximation, gather_PETScMatrix, gather_PETScVector, gather_transformation_matrix, log_info, rigid_motions_nullspace, rotation_matrix, ) set_log_level(LogLevel.ERROR) def demo_stacked_cubes( outfile: XDMFFile, theta: float, gmsh: bool = True, quad: bool = False, compare: bool = False, res: float = 0.1, ): log_info(f"Run theta:{theta:.2f}, Quad: {quad}, Gmsh {gmsh}, Res {res:.2e}") celltype = "quadrilateral" if quad else "triangle" meshdir = Path("meshes") meshdir.mkdir(exist_ok=True, parents=True) if gmsh: mesh, mt = gmsh_2D_stacked(celltype, theta) mesh.name = f"mesh_{celltype}_{theta:.2f}_gmsh" else: if default_real_type == np.float32: warnings.warn("Demo does not run for single float precision due to limited xdmf support") exit(0) mesh_name = "mesh" filename = meshdir / f"mesh_{celltype}_{theta:.2f}.xdmf" mesh_2D_dolfin(celltype, theta) with XDMFFile(MPI.COMM_WORLD, filename, "r") as xdmf: mesh = xdmf.read_mesh(name=mesh_name) mesh.name = f"mesh_{celltype}_{theta:.2f}" tdim = mesh.topology.dim fdim = tdim - 1 mesh.topology.create_connectivity(tdim, tdim) mesh.topology.create_connectivity(fdim, tdim) mt = xdmf.read_meshtags(mesh, name="facet_tags") # Helper until meshtags can be read in from xdmf V = functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) r_matrix = rotation_matrix([0, 0, 1], theta) g_vec = np.dot(r_matrix, [0, -1.25e2, 0]) g = Constant(mesh, default_scalar_type(g_vec[:2])) def bottom_corner(x): return np.isclose(x, [[0], [0], [0]], atol=5e2 * np.finfo(default_scalar_type).resolution).all(axis=0) # Fix bottom corner bc_value = np.array((0,) * mesh.geometry.dim, dtype=default_scalar_type) # type: ignore bottom_dofs = locate_dofs_geometrical(V, bottom_corner) bc_bottom = dirichletbc(bc_value, bottom_dofs, V) bcs = [bc_bottom] # Elasticity parameters E = 1.0e3 nu = 0 mu = Constant(mesh, default_scalar_type(E / (2.0 * (1.0 + nu)))) lmbda = Constant(mesh, default_scalar_type(E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu)))) # Stress computation def sigma(v): return 2.0 * mu * sym(grad(v)) + lmbda * tr(sym(grad(v))) * Identity(len(v)) # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(sigma(u), grad(v)) * dx ds = Measure("ds", domain=mesh, subdomain_data=mt, subdomain_id=3) rhs = inner(Constant(mesh, default_scalar_type((0, 0))), v) * dx + inner(g, v) * ds # type: ignore tol = float(5e2 * np.finfo(default_scalar_type).resolution) def left_corner(x): return np.isclose(x.T, np.dot(r_matrix, [0, 2, 0]), atol=tol).all(axis=1) # Create multi point constraint mpc = MultiPointConstraint(V) with Timer("~Contact: Create contact constraint"): nh = create_normal_approximation(V, mt, 4) mpc.create_contact_slip_condition(mt, 4, 9, nh, eps2=tol) with Timer("~Contact: Add non-slip condition at bottom interface"): bottom_normal = facet_normal_approximation(V, mt, 5) mpc.create_slip_constraint(V, (mt, 5), bottom_normal, bcs=bcs) with Timer("~Contact: Add tangential constraint at one point"): vertex = locate_entities_boundary(mesh, 0, left_corner) tangent = facet_normal_approximation(V, mt, 3, tangent=True) mtv = meshtags(mesh, 0, vertex, np.full(len(vertex), 6, dtype=np.int32)) mpc.create_slip_constraint(V, (mtv, 6), tangent, bcs=bcs) mpc.finalize() tol = float(5e2 * np.finfo(default_scalar_type).resolution) petsc_options = { "ksp_rtol": tol, "ksp_atol": tol, "pc_type": "gamg", "pc_gamg_type": "agg", "pc_gamg_square_graph": 2, "pc_gamg_threshold": 0.02, "pc_gamg_coarse_eq_limit": 1000, "pc_gamg_sym_graph": True, "mg_levels_ksp_type": "chebyshev", "mg_levels_pc_type": "jacobi", "mg_levels_esteig_ksp_type": "cg", # , "help": None, "ksp_view": None } # Solve Linear problem problem = LinearProblem(a, rhs, mpc, bcs=bcs, petsc_options=petsc_options) # Build near nullspace null_space = rigid_motions_nullspace(mpc.function_space) problem.A.setNearNullSpace(null_space) u_h = problem.solve() it = problem.solver.getIterationNumber() if MPI.COMM_WORLD.rank == 0: print("Number of iterations: {0:d}".format(it)) unorm = u_h.x.petsc_vec.norm() if MPI.COMM_WORLD.rank == 0: print(f"Norm of u: {unorm}") # Write solution to file ext = "_gmsh" if gmsh else "" u_h.name = "u_mpc_{0:s}_{1:.2f}{2:s}".format(celltype, theta, ext) outfile.write_mesh(mesh) outfile.write_function(u_h, 0.0, f"Xdmf/Domain/Grid[@Name='{mesh.name}'][1]") # Solve the MPC problem using a global transformation matrix # and numpy solvers to get reference values if not compare: return log_info("Solving reference problem with global matrix (using numpy)") with Timer("~MPC: Reference problem"): # Generate reference matrices and unconstrained solution A_org = assemble_matrix(form(a), bcs) A_org.assemble() L_org = assemble_vector(form(rhs)) apply_lifting(L_org, [form(a)], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore set_bc(L_org, bcs) root = 0 with Timer("~MPC: Verification"): compare_mpc_lhs(A_org, problem.A, mpc, root=root) compare_mpc_rhs(L_org, problem.b, mpc, root=root) # Gather LHS, RHS and solution on one process A_csr = gather_PETScMatrix(A_org, root=root) K = gather_transformation_matrix(mpc, root=root) L_np = gather_PETScVector(L_org, root=root) u_mpc = gather_PETScVector(u_h.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T * A_csr * K reduced_L = K.T @ L_np # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K @ d assert np.allclose(uh_numpy, u_mpc, rtol=tol, atol=tol) L_org.destroy() A_org.destroy() if __name__ == "__main__": parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--res", default=0.1, type=np.float64, dest="res", help="Resolution of Mesh") parser.add_argument( "--theta", default=np.pi / 3, type=np.float64, dest="theta", help="Rotation angle around axis [1, 1, 0]", ) quad = parser.add_mutually_exclusive_group(required=False) quad.add_argument("--quad", dest="quad", action="store_true", help="Use quadrilateral mesh", default=False) gmsh = parser.add_mutually_exclusive_group(required=False) gmsh.add_argument( "--gmsh", dest="gmsh", action="store_true", help="Gmsh mesh instead of built-in grid", default=False, ) comp = parser.add_mutually_exclusive_group(required=False) comp.add_argument( "--compare", dest="compare", action="store_true", help="Compare with global solution", default=False, ) time = parser.add_mutually_exclusive_group(required=False) time.add_argument("--timing", dest="timing", action="store_true", help="List timings", default=False) args = parser.parse_args() # Create results file outdir = Path("results") outdir.mkdir(parents=True, exist_ok=True) outfile = XDMFFile(MPI.COMM_WORLD, outdir / "demo_contact_2D.xdmf", "w") # Run demo for input parameters demo_stacked_cubes( outfile, theta=args.theta, gmsh=args.gmsh, quad=args.quad, compare=args.compare, res=args.res, ) outfile.close() if args.timing: list_timings(MPI.COMM_WORLD, [TimingType.wall]) dolfinx_mpc-0.9.1/python/demos/demo_contact_3D.py000066400000000000000000000250441476141270300220240ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT # # Multi point constraint problem for linear elasticity with slip conditions # between two cubes. from __future__ import annotations import warnings from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from pathlib import Path from mpi4py import MPI from petsc4py import PETSc import dolfinx.fem as fem import numpy as np import scipy.sparse.linalg from dolfinx import default_real_type, default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.io import XDMFFile from dolfinx.mesh import CellType from ufl import Identity, TestFunction, TrialFunction, dx, grad, inner, sym, tr from create_and_export_mesh import gmsh_3D_stacked, mesh_3D_dolfin from dolfinx_mpc import MultiPointConstraint, apply_lifting, assemble_matrix, assemble_vector from dolfinx_mpc.utils import ( compare_mpc_lhs, compare_mpc_rhs, create_normal_approximation, gather_PETScMatrix, gather_PETScVector, gather_transformation_matrix, log_info, rigid_motions_nullspace, rotation_matrix, ) def demo_stacked_cubes( outfile: XDMFFile, theta: float, gmsh: bool = False, ct: CellType = CellType.tetrahedron, compare: bool = True, res: float = 0.1, noslip: bool = False, ): celltype = "hexahedron" if ct == CellType.hexahedron else "tetrahedron" type_ext = "no_slip" if noslip else "slip" mesh_ext = "_gmsh_" if gmsh else "_" log_info(f"Run theta:{theta:.2f}, Cell: {celltype}, GMSH {gmsh}, Noslip: {noslip}") # Read in mesh if gmsh: mesh, mt = gmsh_3D_stacked(celltype, theta, res) tdim = mesh.topology.dim fdim = tdim - 1 mesh.topology.create_connectivity(tdim, tdim) mesh.topology.create_connectivity(fdim, tdim) else: if default_real_type == np.float32: warnings.warn("Demo does not run for single float precision due to limited xdmf support") exit(0) mesh_3D_dolfin(theta, ct, celltype, res) MPI.COMM_WORLD.barrier() with XDMFFile(MPI.COMM_WORLD, f"meshes/mesh_{celltype}_{theta:.2f}.xdmf", "r") as xdmf: mesh = xdmf.read_mesh(name="mesh") tdim = mesh.topology.dim fdim = tdim - 1 mesh.topology.create_connectivity(tdim, tdim) mesh.topology.create_connectivity(fdim, tdim) mt = xdmf.read_meshtags(mesh, "facet_tags") mesh.name = f"mesh_{celltype}_{theta:.2f}{type_ext}{mesh_ext}" # Create functionspaces V = fem.functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) # Define boundary conditions # Bottom boundary is fixed in all directions bottom_dofs = fem.locate_dofs_topological(V, fdim, mt.find(5)) u_bc = np.array((0,) * mesh.geometry.dim, dtype=default_scalar_type) bc_bottom = fem.dirichletbc(u_bc, bottom_dofs, V) g_vec = np.array([0, 0, -4.25e-1], dtype=default_scalar_type) if not noslip: # Helper for orienting traction r_matrix = rotation_matrix([1 / np.sqrt(2), 1 / np.sqrt(2), 0], -theta) # Top boundary has a given deformation normal to the interface g_vec = np.dot(r_matrix, g_vec).astype(default_scalar_type) top_dofs = fem.locate_dofs_topological(V, fdim, mt.find(3)) bc_top = fem.dirichletbc(g_vec, top_dofs, V) bcs = [bc_bottom, bc_top] # Elasticity parameters E = 1.0e3 nu = 0 mu = fem.Constant(mesh, default_scalar_type(E / (2.0 * (1.0 + nu)))) lmbda = fem.Constant(mesh, default_scalar_type(E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu)))) # Stress computation def sigma(v): return 2.0 * mu * sym(grad(v)) + lmbda * tr(sym(grad(v))) * Identity(len(v)) # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(sigma(u), grad(v)) * dx # NOTE: Traction deactivated until we have a way of fixing nullspace # g = fem.Constant(mesh, default_scalar_type(g_vec)) # ds = Measure("ds", domain=mesh, subdomain_data=mt, subdomain_id=3) rhs = inner(fem.Constant(mesh, default_scalar_type((0, 0, 0))), v) * dx # + inner(g, v) * ds bilinear_form = fem.form(a) linear_form = fem.form(rhs) mpc = MultiPointConstraint(V) tol = float(5e2 * np.finfo(default_scalar_type).resolution) if noslip: with Timer("~~Contact: Create non-elastic constraint"): mpc.create_contact_inelastic_condition(mt, 4, 9, eps2=tol) else: with Timer("~Contact: Create contact constraint"): nh = create_normal_approximation(V, mt, 4) mpc.create_contact_slip_condition(mt, 4, 9, nh, eps2=tol) with Timer("~~Contact: Add data and finialize MPC"): mpc.finalize() # Create null-space null_space = rigid_motions_nullspace(mpc.function_space) num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs with Timer(f"~~Contact: Assemble matrix ({num_dofs})"): A = assemble_matrix(bilinear_form, mpc, bcs=bcs) with Timer(f"~~Contact: Assemble vector ({num_dofs})"): b = assemble_vector(linear_form, mpc) apply_lifting(b, [bilinear_form], [bcs], mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore fem.petsc.set_bc(b, bcs) # Solve Linear problem opts = PETSc.Options() # type: ignore opts["ksp_rtol"] = 1.0e-8 opts["pc_type"] = "gamg" opts["pc_gamg_type"] = "agg" opts["pc_gamg_coarse_eq_limit"] = 1000 opts["pc_gamg_sym_graph"] = True opts["mg_levels_ksp_type"] = "chebyshev" opts["mg_levels_pc_type"] = "jacobi" opts["mg_levels_esteig_ksp_type"] = "cg" opts["matptap_via"] = "scalable" # opts["pc_gamg_square_graph"] = 2 # opts["pc_gamg_threshold"] = 1e-2 # opts["help"] = None # List all available options # opts["ksp_view"] = None # List progress of solver # Create functionspace and build near nullspace A.setNearNullSpace(null_space) solver = PETSc.KSP().create(mesh.comm) # type: ignore solver.setOperators(A) solver.setFromOptions() u_h = fem.Function(mpc.function_space) with Timer("~~Contact: Solve"): # Temporary fix while: # https://gitlab.com/petsc/petsc/-/issues/1339 # gets sorted A.convert("baij", A) A.convert("aij", A) solver.solve(b, u_h.x.petsc_vec) u_h.x.scatter_forward() with Timer("~~Contact: Backsubstitution"): mpc.backsubstitution(u_h) it = solver.getIterationNumber() unorm = u_h.x.petsc_vec.norm() num_slaves = MPI.COMM_WORLD.allreduce(mpc.num_local_slaves, op=MPI.SUM) if mesh.comm.rank == 0: num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs print(f"Number of dofs: {num_dofs}") print(f"Number of slaves: {num_slaves}") print(f"Number of iterations: {it}") print(f"Norm of u {unorm:.5e}") # Write solution to file u_h.name = f"u_{celltype}_{theta:.2f}{mesh_ext}{type_ext}" outfile.write_mesh(mesh) outfile.write_function(u_h, 0.0, f"Xdmf/Domain/Grid[@Name='{mesh.name}'][1]") outfile.close() # Solve the MPC problem using a global transformation matrix # and numpy solvers to get reference values if not compare: b.destroy() solver.destroy() return log_info("Solving reference problem with global matrix (using scipy)") with Timer("~~Contact: Reference problem"): A_org = fem.petsc.assemble_matrix(bilinear_form, bcs) A_org.assemble() L_org = fem.petsc.assemble_vector(linear_form) fem.petsc.apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore fem.petsc.set_bc(L_org, bcs) root = 0 with Timer("~~Contact: Compare LHS, RHS and solution"): compare_mpc_lhs(A_org, A, mpc, root=root) compare_mpc_rhs(L_org, b, mpc, root=root) # Gather LHS, RHS and solution on one process A_csr = gather_PETScMatrix(A_org, root=root) K = gather_transformation_matrix(mpc, root=root) L_np = gather_PETScVector(L_org, root=root) u_mpc = gather_PETScVector(u_h.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T * A_csr * K reduced_L = K.T @ L_np # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K @ d assert np.allclose(uh_numpy, u_mpc) list_timings(mesh.comm, [TimingType.wall]) b.destroy() L_org.destroy() solver.destroy() if __name__ == "__main__": parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--res", default=0.1, type=np.float64, dest="res", help="Resolution of Mesh") parser.add_argument( "--theta", default=np.pi / 3, type=np.float64, dest="theta", help="Rotation angle around axis [1, 1, 0]", ) hex = parser.add_mutually_exclusive_group(required=False) hex.add_argument("--hex", dest="hex", action="store_true", help="Use hexahedron mesh", default=False) slip = parser.add_mutually_exclusive_group(required=False) slip.add_argument( "--no-slip", dest="noslip", action="store_true", help="Use no-slip constraint", default=False, ) gmsh = parser.add_mutually_exclusive_group(required=False) gmsh.add_argument( "--gmsh", dest="gmsh", action="store_true", help="Gmsh mesh instead of built-in grid", default=False, ) comp = parser.add_mutually_exclusive_group(required=False) comp.add_argument( "--compare", dest="compare", action="store_true", help="Compare with global solution", default=False, ) time = parser.add_mutually_exclusive_group(required=False) time.add_argument("--timing", dest="timing", action="store_true", help="List timings", default=False) args = parser.parse_args() outdir = Path("results") outdir.mkdir(exist_ok=True, parents=True) outfile = XDMFFile(MPI.COMM_WORLD, outdir / "demo_contact_3D.xdmf", "w") ct = CellType.hexahedron if args.hex else CellType.tetrahedron demo_stacked_cubes( outfile, theta=args.theta, gmsh=args.gmsh, ct=ct, compare=args.compare, res=args.res, noslip=args.noslip, ) outfile.close() log_info("Simulation finished") if args.timing: list_timings(MPI.COMM_WORLD, [TimingType.wall]) dolfinx_mpc-0.9.1/python/demos/demo_elasticity.py000066400000000000000000000147031476141270300222150ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from pathlib import Path from mpi4py import MPI from petsc4py import PETSc import dolfinx.fem as fem import numpy as np import scipy.sparse.linalg from dolfinx import default_scalar_type from dolfinx.common import Timer from dolfinx.io import XDMFFile from dolfinx.mesh import create_unit_square, locate_entities_boundary from ufl import ( Identity, SpatialCoordinate, TestFunction, TrialFunction, as_vector, dx, grad, inner, sym, tr, ) import dolfinx_mpc.utils from dolfinx_mpc import LinearProblem, MultiPointConstraint def demo_elasticity(): mesh = create_unit_square(MPI.COMM_WORLD, 10, 10) V = fem.functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) # Generate Dirichlet BC on lower boundary (Fixed) def boundaries(x): return np.isclose(x[0], np.finfo(float).eps) facets = locate_entities_boundary(mesh, 1, boundaries) topological_dofs = fem.locate_dofs_topological(V, 1, facets) bc = fem.dirichletbc(np.array([0, 0], dtype=default_scalar_type), topological_dofs, V) bcs = [bc] # Define variational problem u = TrialFunction(V) v = TestFunction(V) # Elasticity parameters E = 1.0e4 nu = 0.0 mu = fem.Constant(mesh, default_scalar_type(E / (2.0 * (1.0 + nu)))) lmbda = fem.Constant(mesh, default_scalar_type(E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu)))) # Stress computation def sigma(v): return 2.0 * mu * sym(grad(v)) + lmbda * tr(sym(grad(v))) * Identity(len(v)) x = SpatialCoordinate(mesh) # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(sigma(u), grad(v)) * dx rhs = inner(as_vector((0, (x[0] - 0.5) * 10**4 * x[1])), v) * dx # Create MPC def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = {l2b([1, 0]): {l2b([1, 1]): 0.9}} mpc = MultiPointConstraint(V) mpc.create_general_constraint(s_m_c, 1, 1) mpc.finalize() # Solve Linear problem petsc_options = {"ksp_type": "preonly", "pc_type": "lu"} problem = LinearProblem(a, rhs, mpc, bcs=bcs, petsc_options=petsc_options) u_h = problem.solve() u_h.name = "u_mpc" outdir = Path("results") outdir.mkdir(exist_ok=True, parents=True) with XDMFFile(mesh.comm, outdir / "demo_elasticity.xdmf", "w") as outfile: outfile.write_mesh(mesh) outfile.write_function(u_h) # Solve the MPC problem using a global transformation matrix # and numpy solvers to get reference values bilinear_form = fem.form(a) A_org = fem.petsc.assemble_matrix(bilinear_form, bcs) A_org.assemble() linear_form = fem.form(rhs) L_org = fem.petsc.assemble_vector(linear_form) fem.petsc.apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) fem.petsc.set_bc(L_org, bcs) solver = PETSc.KSP().create(mesh.comm) solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) solver.setOperators(A_org) u_ = fem.Function(V) solver.solve(L_org, u_.x.petsc_vec) u_.x.scatter_forward() u_.name = "u_unconstrained" with XDMFFile(mesh.comm, outdir / "demo_elasticity.xdmf", "a") as outfile: outfile.write_function(u_) outfile.close() root = 0 with Timer("~Demo: Verification"): dolfinx_mpc.utils.compare_mpc_lhs(A_org, problem.A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(L_org, problem.b, mpc, root=root) # Gather LHS, RHS and solution on one process A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(u_h.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T * A_csr * K reduced_L = K.T @ L_np # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K @ d assert np.allclose(uh_numpy, u_mpc, atol=500 * np.finfo(u_mpc.dtype).resolution) # Print out master-slave connectivity for the first slave master_owner = None master_data = None slave_owner = None if mpc.num_local_slaves > 0: slave_owner = MPI.COMM_WORLD.rank bs = mpc.function_space.dofmap.index_map_bs slave = mpc.slaves[0] print("Constrained: {0:.5e}\n Unconstrained: {1:.5e}".format(u_h.x.array[slave], u_.x.petsc_vec.array[slave])) master_owner = mpc._cpp_object.owners.links(slave)[0] _masters = mpc.masters master = _masters.links(slave)[0] glob_master = mpc.function_space.dofmap.index_map.local_to_global(np.array([master // bs], dtype=np.int32))[0] coeffs, offs = mpc.coefficients() master_data = [glob_master * bs + master % bs, coeffs[offs[slave] : offs[slave + 1]][0]] # If master not on proc send info to this processor if MPI.COMM_WORLD.rank != master_owner: MPI.COMM_WORLD.send(master_data, dest=master_owner, tag=1) else: print( "Master*Coeff: {0:.5e}".format( coeffs[offs[slave] : offs[slave + 1]][0] * u_h.x.array[_masters.links(slave)[0]] ) ) # As a processor with a master is not aware that it has a master, # Determine this so that it can receive the global dof and coefficient master_recv = MPI.COMM_WORLD.allgather(master_owner) for master in master_recv: if master is not None: master_owner = master break if slave_owner != master_owner and MPI.COMM_WORLD.rank == master_owner: dofmap = mpc.function_space.dofmap bs = dofmap.index_map_bs in_data = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=1) num_local = dofmap.index_map.size_local + dofmap.index_map.num_ghosts l2g = dofmap.index_map.local_to_global(np.arange(num_local, dtype=np.int32)) l_index = np.flatnonzero(l2g == in_data[0] // bs)[0] print("Master*Coeff (on other proc): {0:.5e}".format(u_h.x.array[l_index * bs + in_data[0] % bs] * in_data[1])) L_org.destroy() solver.destroy() if __name__ == "__main__": demo_elasticity() dolfinx_mpc-0.9.1/python/demos/demo_elasticity_disconnect.py000066400000000000000000000170141476141270300244240ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT # # Create constraint between two bodies that are not in contact from __future__ import annotations from pathlib import Path from mpi4py import MPI import basix.ufl import gmsh import numpy as np from dolfinx import default_real_type, default_scalar_type from dolfinx.fem import Constant, Function, dirichletbc, functionspace, locate_dofs_topological from dolfinx.io import XDMFFile, gmshio from ufl import ( Identity, Measure, SpatialCoordinate, TestFunction, TrialFunction, as_vector, grad, inner, sym, tr, ) from dolfinx_mpc import LinearProblem, MultiPointConstraint from dolfinx_mpc.utils import ( create_point_to_point_constraint, determine_closest_block, rigid_motions_nullspace, ) # Mesh parameters for creating a mesh consisting of two spheres, # Sphere(r2)\Sphere(r1) and Sphere(r_0) r0, r0_tag = 0.4, 1 r1, r1_tag = 0.5, 2 r2, r2_tag = 0.8, 3 outer_tag = 1 inner_tag = 2 assert r0 < r1 and r1 < r2 gmsh.initialize() if MPI.COMM_WORLD.rank == 0: gmsh.clear() # Create Sphere(r2)\Sphere(r1) p0 = gmsh.model.occ.addPoint(0, 0, 0) outer_sphere = gmsh.model.occ.addSphere(0, 0, 0, r2) mid_sphere = gmsh.model.occ.addSphere(0, 0, 0, r1) hollow_sphere = gmsh.model.occ.cut([(3, outer_sphere)], [(3, mid_sphere)]) # Create Sphere(r0) inner_sphere = gmsh.model.occ.addSphere(0, 0, 0, r0) gmsh.model.occ.synchronize() # Add physical tags for volumes gmsh.model.addPhysicalGroup(hollow_sphere[0][0][0], [hollow_sphere[0][0][1]], tag=outer_tag) gmsh.model.setPhysicalName(hollow_sphere[0][0][0], 1, "Hollow sphere") gmsh.model.addPhysicalGroup(3, [inner_sphere], tag=inner_tag) gmsh.model.setPhysicalName(3, 2, "Inner sphere") # Add physical tags for surfaces r1_surface, r2_surface = [], [] hollow_boundary = gmsh.model.getBoundary(hollow_sphere[0], oriented=False) inner_boundary = gmsh.model.getBoundary([(3, inner_sphere)], oriented=False) for boundary in hollow_boundary: bbox = gmsh.model.getBoundingBox(boundary[0], boundary[1]) if np.isclose(max(bbox), r1): r1_surface.append(boundary[1]) elif np.isclose(max(bbox), r2): r2_surface.append(boundary[1]) gmsh.model.addPhysicalGroup(inner_boundary[0][0], [inner_boundary[0][1]], r0_tag) gmsh.model.setPhysicalName(inner_boundary[0][0], r0_tag, "Inner boundary") gmsh.model.addPhysicalGroup(2, r1_surface, r1_tag) gmsh.model.setPhysicalName(2, r1_tag, "Mid boundary") gmsh.model.addPhysicalGroup(2, r2_surface, r2_tag) gmsh.model.setPhysicalName(2, r2_tag, "Outer boundary") # Set mesh resolution res_inner = r0 / 5 res_outer = (r1 + r2) / 5 gmsh.model.occ.synchronize() gmsh.model.mesh.field.add("Distance", 1) gmsh.model.mesh.field.setNumbers(1, "NodesList", [p0]) gmsh.model.mesh.field.add("Threshold", 2) gmsh.model.mesh.field.setNumber(2, "IField", 1) gmsh.model.mesh.field.setNumber(2, "LcMin", res_inner) gmsh.model.mesh.field.setNumber(2, "LcMax", res_outer) gmsh.model.mesh.field.setNumber(2, "DistMin", r0) gmsh.model.mesh.field.setNumber(2, "DistMax", r1) gmsh.model.mesh.field.add("Threshold", 3) gmsh.model.mesh.field.setNumber(3, "IField", 1) gmsh.model.mesh.field.setNumber(3, "LcMin", res_outer) gmsh.model.mesh.field.setNumber(3, "LcMax", res_outer) gmsh.model.mesh.field.setNumber(3, "DistMin", r1) gmsh.model.mesh.field.setNumber(3, "DistMax", r2) gmsh.model.mesh.field.add("Min", 4) gmsh.model.mesh.field.setNumbers(4, "FieldsList", [2, 3]) gmsh.model.mesh.field.setAsBackgroundMesh(4) # Generate mesh gmsh.model.mesh.generate(3) gmsh.option.setNumber("General.Terminal", 1) gmsh.model.mesh.optimize("Netgen") gmsh.model.mesh.setOrder(2) mesh, ct, ft = gmshio.model_to_mesh(gmsh.model, MPI.COMM_WORLD, 0, gdim=3) gmsh.clear() gmsh.finalize() MPI.COMM_WORLD.barrier() V = functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) tdim = mesh.topology.dim fdim = tdim - 1 DG0 = functionspace(mesh, ("DG", 0)) mesh.topology.create_connectivity(tdim, tdim) outer_dofs = locate_dofs_topological(DG0, tdim, ct.find(outer_tag)) inner_dofs = locate_dofs_topological(DG0, tdim, ct.find(inner_tag)) # Elasticity parameters E_outer = 1e3 E_inner = 1e5 nu_outer = 0.3 nu_inner = 0.1 mu = Function(DG0) lmbda = Function(DG0) with mu.x.petsc_vec.localForm() as local: local.array[inner_dofs] = E_inner / (2 * (1 + nu_inner)) local.array[outer_dofs] = E_outer / (2 * (1 + nu_outer)) with lmbda.x.petsc_vec.localForm() as local: local.array[inner_dofs] = E_inner * nu_inner / ((1 + nu_inner) * (1 - 2 * nu_inner)) local.array[outer_dofs] = E_outer * nu_outer / ((1 + nu_outer) * (1 - 2 * nu_outer)) mu.x.petsc_vec.destroy() lmbda.x.petsc_vec.destroy() # Stress computation def sigma(v): return 2.0 * mu * sym(grad(v)) + lmbda * tr(sym(grad(v))) * Identity(len(v)) # Define variational problem u = TrialFunction(V) v = TestFunction(V) dx = Measure("dx", domain=mesh, subdomain_data=ct) a = inner(sigma(u), grad(v)) * dx x = SpatialCoordinate(mesh) rhs = inner(Constant(mesh, default_scalar_type((0, 0, 0))), v) * dx rhs += inner(Constant(mesh, default_scalar_type((0.01, 0.02, 0))), v) * dx(outer_tag) rhs += inner(as_vector((0, 0, -9.81e-2)), v) * dx(inner_tag) # Create dirichletbc owning_processor, bc_dofs = determine_closest_block(V, -np.array([-r2, 0, 0])) bc_dofs = [] if bc_dofs is None else bc_dofs u_fixed = np.array([0, 0, 0], dtype=default_scalar_type) bc_fixed = dirichletbc(u_fixed, np.asarray(bc_dofs, dtype=np.int32), V) bcs = [bc_fixed] # Create point to point constraints mpc = MultiPointConstraint(V) signs = [-1, 1] axis = [0, 1] for i in axis: for s in signs: r0_point = np.zeros(3) r1_point = np.zeros(3) r0_point[i] = s * r0 r1_point[i] = s * r1 sl, ms, co, ow, off = create_point_to_point_constraint(V, r1_point, r0_point) mpc.add_constraint(V, sl, ms, co, ow, off) mpc.finalize() # Create nullspace null_space = rigid_motions_nullspace(mpc.function_space) ksp_rtol = 5e2 * np.finfo(default_scalar_type).resolution petsc_options = { "ksp_rtol": ksp_rtol, "pc_type": "gamg", "pc_gamg_type": "agg", "pc_gamg_coarse_eq_limit": 1000, "pc_gamg_sym_graph": True, "mg_levels_ksp_type": "chebyshev", "mg_levels_pc_type": "jacobi", "mg_levels_esteig_ksp_type": "cg", "matptap_via": "scalable", "pc_gamg_square_graph": 2, "pc_gamg_threshold": 0.02, # ,"help": None, "ksp_view": None } problem = LinearProblem(a, rhs, mpc, bcs=bcs, petsc_options=petsc_options) # Build near nullspace null_space = rigid_motions_nullspace(mpc.function_space) problem.A.setNearNullSpace(null_space) u_h = problem.solve() it = problem.solver.getIterationNumber() unorm = u_h.x.petsc_vec.norm() if MPI.COMM_WORLD.rank == 0: print("Number of iterations: {0:d}".format(it)) # Write solution to file V_out = functionspace( mesh, basix.ufl.element( "Lagrange", mesh.topology.cell_name(), mesh.geometry.cmap.degree, lagrange_variant=basix.LagrangeVariant(mesh.geometry.cmap.variant), shape=(V.dofmap.bs,), dtype=default_real_type, ), ) u_out = Function(V_out) u_out.interpolate(u_h) u_out.name = "uh" out_path = Path("results") out_path.mkdir(exist_ok=True, parents=True) with XDMFFile(mesh.comm, out_path / "demo_elasticity_disconnect.xdmf", "w") as xdmf: xdmf.write_mesh(mesh) xdmf.write_function(u_out) dolfinx_mpc-0.9.1/python/demos/demo_elasticity_disconnect_2D.py000066400000000000000000000152251476141270300247530ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT # # Create constraint between two bodies that are not in contact from __future__ import annotations from pathlib import Path from mpi4py import MPI import gmsh import numpy as np from dolfinx import default_scalar_type from dolfinx.fem import ( Constant, Function, FunctionSpace, dirichletbc, functionspace, locate_dofs_geometrical, locate_dofs_topological, ) from dolfinx.io import XDMFFile, gmshio from dolfinx.mesh import locate_entities_boundary from ufl import ( Identity, Measure, SpatialCoordinate, TestFunction, TrialFunction, grad, inner, sym, tr, ) from dolfinx_mpc import LinearProblem, MultiPointConstraint from dolfinx_mpc.utils import create_point_to_point_constraint, rigid_motions_nullspace # Mesh parameters for creating a mesh consisting of two disjoint rectangles right_tag = 1 left_tag = 2 gmsh.initialize() if MPI.COMM_WORLD.rank == 0: gmsh.clear() left_rectangle = gmsh.model.occ.addRectangle(0, 0, 0, 1, 1) right_rectangle = gmsh.model.occ.addRectangle(1.1, 0, 0, 1, 1) gmsh.model.occ.synchronize() # Add physical tags for volumes gmsh.model.addPhysicalGroup(2, [right_rectangle], tag=right_tag) gmsh.model.setPhysicalName(2, right_tag, "Right square") gmsh.model.addPhysicalGroup(2, [left_rectangle], tag=left_tag) gmsh.model.setPhysicalName(2, left_tag, "Left square") gmsh.option.setNumber("Mesh.CharacteristicLengthMin", 1) gmsh.option.setNumber("Mesh.CharacteristicLengthMax", 1) # Generate mesh gmsh.model.mesh.generate(2) # gmsh.option.setNumber("General.Terminal", 1) gmsh.model.mesh.optimize("Netgen") mesh, ct, _ = gmshio.model_to_mesh(gmsh.model, MPI.COMM_WORLD, 0, gdim=2) gmsh.clear() gmsh.finalize() MPI.COMM_WORLD.barrier() with XDMFFile(mesh.comm, "test.xdmf", "w") as xdmf: xdmf.write_mesh(mesh) V = functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) tdim = mesh.topology.dim fdim = tdim - 1 # Locate cells with different elasticity parameters DG0 = functionspace(mesh, ("DG", 0)) mesh.topology.create_connectivity(tdim, tdim) left_dofs = locate_dofs_topological(DG0, tdim, ct.find(left_tag)) right_dofs = locate_dofs_topological(DG0, tdim, ct.find(right_tag)) # Elasticity parameters E_right = 1e2 E_left = 1e2 nu_right = 0.3 nu_left = 0.1 mu = Function(DG0) lmbda = Function(DG0) with mu.x.petsc_vec.localForm() as local: local.array[left_dofs] = E_left / (2 * (1 + nu_left)) local.array[right_dofs] = E_right / (2 * (1 + nu_right)) with lmbda.x.petsc_vec.localForm() as local: local.array[left_dofs] = E_left * nu_left / ((1 + nu_left) * (1 - 2 * nu_left)) local.array[right_dofs] = E_right * nu_right / ((1 + nu_right) * (1 - 2 * nu_right)) mu.x.petsc_vec.destroy() lmbda.x.petsc_vec.destroy() # Stress computation def sigma(v): return 2.0 * mu * sym(grad(v)) + lmbda * tr(sym(grad(v))) * Identity(len(v)) # Define variational problem u = TrialFunction(V) v = TestFunction(V) dx = Measure("dx", domain=mesh, subdomain_data=ct) a = inner(sigma(u), grad(v)) * dx x = SpatialCoordinate(mesh) rhs = inner(Constant(mesh, default_scalar_type((0, 0))), v) * dx # Set boundary conditions u_push = np.array([0.1, 0], dtype=default_scalar_type) dofs = locate_dofs_geometrical(V, lambda x: np.isclose(x[0], 0)) bc_push = dirichletbc(u_push, dofs, V) u_fix = np.array([0, 0], dtype=default_scalar_type) bc_fix = dirichletbc(u_fix, locate_dofs_geometrical(V, lambda x: np.isclose(x[0], 2.1)), V) bcs = [bc_push, bc_fix] def gather_dof_coordinates(V: FunctionSpace, dofs: np.ndarray): """ Distributes the dof coordinates of this subset of dofs to all processors """ x = V.tabulate_dof_coordinates() local_dofs = dofs[dofs < V.dofmap.index_map.size_local * V.dofmap.index_map_bs] coords = x[local_dofs] num_nodes = len(coords) glob_num_nodes = MPI.COMM_WORLD.allreduce(num_nodes, op=MPI.SUM) recvbuf = np.empty(0, dtype=V.mesh.geometry.x.dtype) if MPI.COMM_WORLD.rank == 0: recvbuf = np.zeros(3 * glob_num_nodes, dtype=V.mesh.geometry.x.dtype) sendbuf = coords.reshape(-1) sendcounts = np.array(MPI.COMM_WORLD.gather(len(sendbuf), 0)) MPI.COMM_WORLD.Gatherv(sendbuf, (recvbuf, sendcounts), root=0) # type: ignore glob_coords = MPI.COMM_WORLD.bcast(recvbuf, root=0).reshape((-1, 3)) return glob_coords # Create pairs of dofs at each boundary V0, _ = V.sub(0).collapse() facets_r = locate_entities_boundary(mesh, fdim, lambda x: np.isclose(x[0], 1)) dofs_r = locate_dofs_topological(V0, fdim, facets_r) facets_l = locate_entities_boundary(mesh, fdim, lambda x: np.isclose(x[0], 1.1)) dofs_l = locate_dofs_topological(V0, fdim, facets_l) # Given the local coordinates of the dofs, distribute them on all processors nodes = [gather_dof_coordinates(V0, dofs_r), gather_dof_coordinates(V0, dofs_l)] pairs = [] for x in nodes[0]: for y in nodes[1]: if np.isclose(x[1], y[1]): pairs.append([x, y]) break mpc = MultiPointConstraint(V) for i, pair in enumerate(pairs): sl, ms, co, ow, off = create_point_to_point_constraint(V, pair[0], pair[1], vector=[1, 0]) mpc.add_constraint(V, sl, ms, co, ow, off) mpc.finalize() # Add back once PETSc release has added fix for # https://gitlab.com/petsc/petsc/-/issues/1149 # petsc_options = {"ksp_rtol": 1.0e-8, # "ksp_type": "cg", # "pc_type": "gamg", # "pc_gamg_type": "agg", # "pc_gamg_coarse_eq_limit": 1000, # "pc_gamg_sym_graph": True, # "pc_gamg_square_graph": 2, # "pc_gamg_threshold": 0.02, # "mg_levels_ksp_type": "chebyshev", # "mg_levels_pc_type": "jacobi", # "mg_levels_esteig_ksp_type": "cg", # # "matptap_via": "scalable", # "ksp_view": None, # "help": None, # "ksp_monitor": None # } petsc_options = {"ksp_type": "preonly", "pc_type": "lu"} problem = LinearProblem(a, rhs, mpc, bcs=bcs, petsc_options=petsc_options) # Build near nullspace null_space = rigid_motions_nullspace(mpc.function_space) problem.A.setNearNullSpace(null_space) u_h = problem.solve() it = problem.solver.getIterationNumber() unorm = u_h.x.petsc_vec.norm() if MPI.COMM_WORLD.rank == 0: print("Number of iterations: {0:d}".format(it)) # Write solution to file u_h.name = "u" outdir = Path("results") outdir.mkdir(exist_ok=True, parents=True) with XDMFFile(mesh.comm, outdir / "demo_elasticity_disconnect_2D.xdmf", "w") as xdmf: xdmf.write_mesh(mesh) xdmf.write_function(u_h) dolfinx_mpc-0.9.1/python/demos/demo_periodic3d_topological.py000066400000000000000000000154371476141270300244710ustar00rootroot00000000000000# This demo program solves Poisson's equation # # - div grad u(x, y) = f(x, y) # # on the unit square with homogeneous Dirichlet boundary conditions # at y = 0, 1 and periodic boundary conditions at x = 0, 1. # # Original implementation in DOLFIN by Kristian B. Oelgaard and Anders Logg # This implementation can be found at: # https://bitbucket.org/fenics-project/dolfin/src/master/python/demo/documented/periodic/demo_periodic.py # # Copyright (C) Jørgen S. Dokken 2020-2022. # # This file is part of DOLFINX_MPCX. # # SPDX-License-Identifier: MIT from __future__ import annotations from pathlib import Path from typing import Dict, Union from mpi4py import MPI import dolfinx.fem as fem import numpy as np import scipy.sparse.linalg from dolfinx import default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.io import VTXWriter from dolfinx.mesh import CellType, create_unit_cube, locate_entities_boundary, meshtags from numpy.typing import NDArray from ufl import ( SpatialCoordinate, TestFunction, TrialFunction, as_vector, dx, exp, grad, inner, pi, sin, ) import dolfinx_mpc.utils from dolfinx_mpc import LinearProblem # Get PETSc int and scalar types complex_mode = True if np.dtype(default_scalar_type).kind == "c" else False def demo_periodic3D(celltype: CellType): # Create mesh and finite element if celltype == CellType.tetrahedron: # Tet setup N = 10 mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N) V = fem.functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) else: # Hex setup N = 10 mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N, CellType.hexahedron) V = fem.functionspace(mesh, ("Lagrange", 2, (mesh.geometry.dim,))) tol = float(5e2 * np.finfo(default_scalar_type).resolution) def dirichletboundary(x: NDArray[Union[np.float32, np.float64]]) -> NDArray[np.bool_]: return np.logical_or( np.logical_or(np.isclose(x[1], 0, atol=tol), np.isclose(x[1], 1, atol=tol)), np.logical_or(np.isclose(x[2], 0, atol=tol), np.isclose(x[2], 1, atol=tol)), ) # Create Dirichlet boundary condition zero = default_scalar_type([0, 0, 0]) geometrical_dofs = fem.locate_dofs_geometrical(V, dirichletboundary) bc = fem.dirichletbc(zero, geometrical_dofs, V) bcs = [bc] def PeriodicBoundary(x): """ Full surface minus dofs constrained by BCs """ return np.isclose(x[0], 1, atol=tol) facets = locate_entities_boundary(mesh, mesh.topology.dim - 1, PeriodicBoundary) arg_sort = np.argsort(facets) mt = meshtags(mesh, mesh.topology.dim - 1, facets[arg_sort], np.full(len(facets), 2, dtype=np.int32)) def periodic_relation(x): out_x = np.zeros(x.shape) out_x[0] = 1 - x[0] out_x[1] = x[1] out_x[2] = x[2] return out_x with Timer("~~Periodic: Compute mpc condition"): mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_periodic_constraint_topological(V.sub(0), mt, 2, periodic_relation, bcs, default_scalar_type(1)) mpc.finalize() # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(grad(u), grad(v)) * dx x = SpatialCoordinate(mesh) dx_ = x[0] - 0.9 dy_ = x[1] - 0.5 dz_ = x[2] - 0.1 f = as_vector( ( x[0] * sin(5.0 * pi * x[1]) + 1.0 * exp(-(dx_ * dx_ + dy_ * dy_ + dz_ * dz_) / 0.02), 0.1 * dx_ * dz_, 0.1 * dx_ * dy_, ) ) rhs = inner(f, v) * dx petsc_options: Dict[str, Union[str, float, int]] if complex_mode or default_scalar_type == np.float32: petsc_options = {"ksp_type": "preonly", "pc_type": "lu"} else: petsc_options = { "ksp_type": "cg", "ksp_rtol": str(tol), "pc_type": "hypre", "pc_hypre_type": "boomeramg", "pc_hypre_boomeramg_max_iter": 1, "pc_hypre_boomeramg_cycle_type": "v", "pc_hypre_boomeramg_print_statistics": 1, } problem = LinearProblem(a, rhs, mpc, bcs, petsc_options=petsc_options) u_h = problem.solve() # --------------------VERIFICATION------------------------- print("----Verification----") u_ = fem.Function(V) u_.x.array[:] = 0 org_problem = fem.petsc.LinearProblem(a, rhs, u=u_, bcs=bcs, petsc_options=petsc_options) with Timer("~Periodic: Unconstrained solve"): org_problem.solve() it = org_problem.solver.getIterationNumber() print(f"Unconstrained solver iterations: {it}") # Write solutions to file ext = "tet" if celltype == CellType.tetrahedron else "hex" u_.name = "u_" + ext + "_unconstrained" # NOTE: Workaround as tabulate dof coordinates does not like extra ghosts u_out = fem.Function(V) old_local = u_out.x.index_map.size_local * u_out.x.block_size old_ghosts = u_out.x.index_map.num_ghosts * u_out.x.block_size mpc_local = u_h.x.index_map.size_local * u_h.x.block_size assert old_local == mpc_local u_out.x.array[: old_local + old_ghosts] = u_h.x.array[: mpc_local + old_ghosts] u_out.name = "u_" + ext outdir = Path("results") outdir.mkdir(exist_ok=True, parents=True) fname = outdir / f"demo_periodic3d_{ext}.bp" out_periodic = VTXWriter(mesh.comm, fname, u_out, engine="BP4") out_periodic.write(0) out_periodic.close() root = 0 with Timer("~Demo: Verification"): dolfinx_mpc.utils.compare_mpc_lhs(org_problem.A, problem.A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(org_problem.b, problem.b, mpc, root=root) is_complex = np.issubdtype(default_scalar_type, np.complexfloating) # type: ignore scipy_dtype = np.complex128 if is_complex else np.float64 # Gather LHS, RHS and solution on one process A_csr = dolfinx_mpc.utils.gather_PETScMatrix(org_problem.A, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(org_problem.b, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(u_h.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T.astype(scipy_dtype) * A_csr.astype(scipy_dtype) * K.astype(scipy_dtype) reduced_L = K.T.astype(scipy_dtype) @ L_np.astype(scipy_dtype) # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K.astype(scipy_dtype) @ d.astype(scipy_dtype) assert np.allclose(uh_numpy.astype(u_mpc.dtype), u_mpc, rtol=tol, atol=tol) if __name__ == "__main__": for celltype in [CellType.hexahedron, CellType.tetrahedron]: demo_periodic3D(celltype) list_timings(MPI.COMM_WORLD, [TimingType.wall]) dolfinx_mpc-0.9.1/python/demos/demo_periodic_geometrical.py000066400000000000000000000131651476141270300242150ustar00rootroot00000000000000# This demo program solves Poisson's equation # # - div grad u(x, y) = f(x, y) # # on the unit square with homogeneous Dirichlet boundary conditions # at y = 0, 1 and periodic boundary conditions at x = 0, 1. # # Copyright (C) Jørgen S. Dokken 2020-2022. # # This file is part of DOLFINX_MPCX. # # SPDX-License-Identifier: MIT from __future__ import annotations from pathlib import Path from typing import Union from mpi4py import MPI from petsc4py import PETSc import dolfinx.fem as fem import numpy as np import scipy.sparse.linalg from dolfinx import default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.io import XDMFFile from dolfinx.mesh import create_unit_square, locate_entities_boundary from ufl import ( SpatialCoordinate, TestFunction, TrialFunction, as_vector, dx, exp, grad, inner, pi, sin, ) import dolfinx_mpc.utils from dolfinx_mpc import LinearProblem, MultiPointConstraint # Get PETSc int and scalar types complex_mode = True if np.dtype(default_scalar_type).kind == "c" else False # Create mesh and finite element NX = 50 NY = 100 mesh = create_unit_square(MPI.COMM_WORLD, NX, NY) V = fem.functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) tol = 250 * np.finfo(default_scalar_type).resolution def dirichletboundary(x): return np.logical_or(np.isclose(x[1], 0, atol=tol), np.isclose(x[1], 1, atol=tol)) # Create Dirichlet boundary condition facets = locate_entities_boundary(mesh, 1, dirichletboundary) topological_dofs = fem.locate_dofs_topological(V, 1, facets) zero = np.array([0, 0], dtype=default_scalar_type) bc = fem.dirichletbc(zero, topological_dofs, V) bcs = [bc] def periodic_boundary(x): return np.isclose(x[0], 1, atol=tol) def periodic_relation(x): out_x = np.zeros_like(x) out_x[0] = 1 - x[0] out_x[1] = x[1] out_x[2] = x[2] return out_x with Timer("~PERIODIC: Initialize MPC"): mpc = MultiPointConstraint(V) mpc.create_periodic_constraint_geometrical(V, periodic_boundary, periodic_relation, bcs) mpc.finalize() # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(grad(u), grad(v)) * dx x = SpatialCoordinate(mesh) dx_ = x[0] - 0.9 dy_ = x[1] - 0.5 f = as_vector((x[0] * sin(5.0 * pi * x[1]) + 1.0 * exp(-(dx_ * dx_ + dy_ * dy_) / 0.02), 0.3 * x[1])) rhs = inner(f, v) * dx # Setup MPC system with Timer("~PERIODIC: Initialize varitional problem"): problem = LinearProblem(a, rhs, mpc, bcs=bcs) solver = problem.solver # Give PETSc solver options a unique prefix solver_prefix = "dolfinx_mpc_solve_{}".format(id(solver)) solver.setOptionsPrefix(solver_prefix) petsc_options: dict[str, Union[str, int, float]] if complex_mode or default_scalar_type == np.float32: petsc_options = {"ksp_type": "preonly", "pc_type": "lu"} else: petsc_options = { "ksp_type": "cg", "ksp_rtol": 1e-6, "pc_type": "hypre", "pc_hypre_type": "boomeramg", "pc_hypre_boomeramg_max_iter": 1, "pc_hypre_boomeramg_cycle_type": "v", # , # "pc_hypre_boomeramg_print_statistics": 1 } # Set PETSc options opts = PETSc.Options() # type: ignore opts.prefixPush(solver_prefix) if petsc_options is not None: for k, v in petsc_options.items(): opts[k] = v opts.prefixPop() solver.setFromOptions() with Timer("~PERIODIC: Assemble and solve MPC problem"): uh = problem.solve() # solver.view() it = solver.getIterationNumber() print("Constrained solver iterations {0:d}".format(it)) # Write solution to file outdir = Path("results") outdir.mkdir(exist_ok=True, parents=True) uh.name = "u_mpc" outfile = XDMFFile(mesh.comm, outdir / "demo_periodic_geometrical.xdmf", "w") outfile.write_mesh(mesh) outfile.write_function(uh) print("----Verification----") # --------------------VERIFICATION------------------------- bilinear_form = fem.form(a) A_org = fem.petsc.assemble_matrix(bilinear_form, bcs) A_org.assemble() linear_form = fem.form(rhs) L_org = fem.petsc.assemble_vector(linear_form) fem.petsc.apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore fem.petsc.set_bc(L_org, bcs) solver.setOperators(A_org) u_ = fem.Function(V) solver.solve(L_org, u_.x.petsc_vec) it = solver.getIterationNumber() print("Unconstrained solver iterations {0:d}".format(it)) u_.x.scatter_forward() u_.name = "u_unconstrained" outfile.write_function(u_) root = 0 comm = mesh.comm with Timer("~Demo: Verification"): dolfinx_mpc.utils.compare_mpc_lhs(A_org, problem._A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(L_org, problem._b, mpc, root=root) is_complex = np.issubdtype(default_scalar_type, np.complexfloating) # type: ignore scipy_dtype = np.complex128 if is_complex else np.float64 # Gather LHS, RHS and solution on one process A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(uh.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T.astype(scipy_dtype) * A_csr.astype(scipy_dtype) * K.astype(scipy_dtype) reduced_L = K.T.astype(scipy_dtype) @ L_np.astype(scipy_dtype) # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K.astype(scipy_dtype) @ d.astype(scipy_dtype) assert np.allclose(uh_numpy.astype(u_mpc.dtype), u_mpc, atol=float(tol)) list_timings(MPI.COMM_WORLD, [TimingType.wall]) L_org.destroy() dolfinx_mpc-0.9.1/python/demos/demo_periodic_gep.py000077500000000000000000000315061476141270300224770ustar00rootroot00000000000000# Copyright (C) 2021-2022 fmonteghetti, Connor D. Pierce, and Jorgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT # This demo solves the Laplace eigenvalue problem # # - div grad u(x, y) = lambda*u(x, y) # # on the unit square with two sets of boundary conditions: # # u(x=0) = u(x=1) and u(y=0) = u(y=1) = 0, # # or # # u(x=0) = u(x=1) and u(y=0) = u(y=1). # # The weak form reads # # (grad(u),grad(v)) = lambda * (u,v), # # which leads to the generalized eigenvalue problem # # A * U = lambda * B * U, # # where A and B are real symmetric positive definite matrices. The generalized # eigenvalue problem is solved using SLEPc and the computed eigenvalues are # compared to the exact ones. from __future__ import annotations from pathlib import Path from typing import List, Tuple from mpi4py import MPI from petsc4py import PETSc import dolfinx.fem as fem import numpy as np from dolfinx import default_scalar_type from dolfinx.io import XDMFFile from dolfinx.mesh import create_unit_square, locate_entities_boundary, meshtags from slepc4py import SLEPc from ufl import TestFunction, TrialFunction, dx, grad, inner from dolfinx_mpc import MultiPointConstraint, assemble_matrix def print0(string: str): """Print on rank 0 only""" if MPI.COMM_WORLD.rank == 0: print(string) def monitor_EPS_short(EPS: SLEPc.EPS, it: int, nconv: int, eig: list, err: list, it_skip: int): """ Concise monitor for EPS.solve(). Parameters ---------- eps Eigenvalue Problem Solver class. it Current iteration number. nconv Number of converged eigenvalue. eig Eigenvalues err Computed errors. it_skip Iteration skip. """ if it == 1: print0("******************************") print0("*** SLEPc Iterations... ***") print0("******************************") print0("Iter. | Conv. | Max. error") print0(f"{it:5d} | {nconv:5d} | {max(err):1.1e}") elif not it % it_skip: print0(f"{it:5d} | {nconv:5d} | {max(err):1.1e}") def EPS_print_results(EPS: SLEPc.EPS): """Print summary of solution results.""" print0("\n******************************") print0("*** SLEPc Solution Results ***") print0("******************************") its = EPS.getIterationNumber() print0(f"Iteration number: {its}") nconv = EPS.getConverged() print0(f"Converged eigenpairs: {nconv}") if nconv > 0: # Create the results vectors vr, vi = EPS.getOperators()[0].createVecs() print0("\nConverged eigval. Error ") print0("----------------- -------") for i in range(nconv): k = EPS.getEigenpair(i, vr, vi) error = EPS.computeError(i) if not np.isclose(k.imag, 0.0): print0(f" {k.real:2.2e} + {k.imag:2.2e}j {error:1.1e}") else: pad = " " * 11 print0(f" {k.real:2.2e} {pad} {error:1.1e}") def EPS_get_spectrum( EPS: SLEPc.EPS, mpc: MultiPointConstraint ) -> Tuple[List[complex], List[PETSc.Vec], List[PETSc.Vec]]: # type: ignore """Retrieve eigenvalues and eigenfunctions from SLEPc EPS object. Parameters ---------- EPS The SLEPc solver mpc The multipoint constraint Returns ------- Tuple consisting of: List of complex converted eigenvalues, lists of converted eigenvectors (real part) and (imaginary part) """ # Get results in lists eigval = [EPS.getEigenvalue(i) for i in range(EPS.getConverged())] eigvec_r = list() eigvec_i = list() V = mpc.function_space for i in range(EPS.getConverged()): vr = fem.Function(V) vi = fem.Function(V) EPS.getEigenvector(i, vr.x.petsc_vec, vi.x.petsc_vec) eigvec_r.append(vr) eigvec_i.append(vi) # Sort by increasing real parts idx = np.argsort(np.real(np.array(eigval)), axis=0) eigval = [eigval[i] for i in idx] eigvec_r = [eigvec_r[i] for i in idx] eigvec_i = [eigvec_i[i] for i in idx] return (eigval, eigvec_r, eigvec_i) def solve_GEP_shiftinvert( A: PETSc.Mat, # type: ignore B: PETSc.Mat, # type: ignore problem_type: SLEPc.EPS.ProblemType = SLEPc.EPS.ProblemType.GNHEP, solver: SLEPc.EPS.Type = SLEPc.EPS.Type.KRYLOVSCHUR, nev: int = 10, tol: float = 1e-7, max_it: int = 10, target: float = 0.0, shift: float = 0.0, comm: MPI.Intracomm = MPI.COMM_WORLD, ) -> SLEPc.EPS: """ Solve generalized eigenvalue problem A*x=lambda*B*x using shift-and-invert as spectral transform method. Parameters ---------- A The matrix A B The matrix B problem_type The problem type, for options see: https://bit.ly/3gM5pth solver: Solver type, for options see: https://bit.ly/35LDcMG nev Number of requested eigenvalues. tol Tolerance for slepc solver max_it Maximum number of iterations. target Target eigenvalue. Also used for sorting. shift Shift 'sigma' used in shift-and-invert. Returns ------- EPS The SLEPc solver """ # Build an Eigenvalue Problem Solver object EPS = SLEPc.EPS() EPS.create(comm=comm) EPS.setOperators(A, B) EPS.setProblemType(problem_type) # set the number of eigenvalues requested EPS.setDimensions(nev=nev) # Set solver EPS.setType(solver) # set eigenvalues of interest EPS.setWhichEigenpairs(SLEPc.EPS.Which.TARGET_MAGNITUDE) EPS.setTarget(target) # sorting # set tolerance and max iterations EPS.setTolerances(tol=tol, max_it=max_it) # Set up shift-and-invert # Only work if 'whichEigenpairs' is 'TARGET_XX' ST = EPS.getST() ST.setType(SLEPc.ST.Type.SINVERT) ST.setShift(shift) EPS.setST(ST) # set monitor it_skip = 1 EPS.setMonitor(lambda eps, it, nconv, eig, err: monitor_EPS_short(eps, it, nconv, eig, err, it_skip)) # parse command line options EPS.setFromOptions() # Display all options (including those of ST object) # EPS.view() EPS.solve() EPS_print_results(EPS) return EPS def assemble_and_solve(boundary_condition: List[str] = ["dirichlet", "periodic"], Nev: int = 10): """ Assemble and solve the Laplace eigenvalue problem on the unit square with the prescribed boundary conditions. Parameters ---------- boundary_condition First item describes b.c. on {x=0} and {x=1} Second item describes b.c. on {y=0} and {y=1} Nev Number of requested eigenvalues. The default is 10. """ comm = MPI.COMM_WORLD # Create mesh and finite element N = 50 mesh = create_unit_square(comm, N, N) V = fem.functionspace(mesh, ("Lagrange", 1)) fdim = mesh.topology.dim - 1 bcs = [] pbc_directions = [] pbc_slave_tags = [] pbc_is_slave = [] pbc_is_master = [] pbc_meshtags = [] pbc_slave_to_master_maps = [] def generate_pbc_slave_to_master_map(i): def pbc_slave_to_master_map(x): out_x = x.copy() out_x[i] = x[i] - 1 return out_x return pbc_slave_to_master_map def generate_pbc_is_slave(i): return lambda x: np.isclose(x[i], 1) def generate_pbc_is_master(i): return lambda x: np.isclose(x[i], 0) # Parse boundary conditions for i, bc_type in enumerate(boundary_condition): if bc_type == "dirichlet": u_bc = fem.Function(V) u_bc.x.array[:] = 0 def dirichletboundary(x): return np.logical_or(np.isclose(x[i], 0), np.isclose(x[i], 1)) facets = locate_entities_boundary(mesh, fdim, dirichletboundary) topological_dofs = fem.locate_dofs_topological(V, fdim, facets) bcs.append(fem.dirichletbc(u_bc, topological_dofs)) elif bc_type == "periodic": pbc_directions.append(i) pbc_slave_tags.append(i + 2) pbc_is_slave.append(generate_pbc_is_slave(i)) pbc_is_master.append(generate_pbc_is_master(i)) pbc_slave_to_master_maps.append(generate_pbc_slave_to_master_map(i)) facets = locate_entities_boundary(mesh, fdim, pbc_is_slave[-1]) arg_sort = np.argsort(facets) pbc_meshtags.append( meshtags( mesh, fdim, facets[arg_sort], np.full(len(facets), pbc_slave_tags[-1], dtype=np.int32), ) ) # Create MultiPointConstraint object mpc = MultiPointConstraint(V) N_pbc = len(pbc_directions) for i in range(N_pbc): if N_pbc > 1: def pbc_slave_to_master_map(x): out_x = pbc_slave_to_master_maps[i](x) idx = pbc_is_slave[(i + 1) % N_pbc](x) out_x[pbc_directions[i]][idx] = np.nan return out_x else: pbc_slave_to_master_map = pbc_slave_to_master_maps[i] mpc.create_periodic_constraint_topological(V, pbc_meshtags[i], pbc_slave_tags[i], pbc_slave_to_master_map, bcs) if len(pbc_directions) > 1: # Map intersection(slaves_x, slaves_y) to intersection(masters_x, masters_y), # i.e. map the slave dof at (1, 1) to the master dof at (0, 0) def pbc_slave_to_master_map(x): out_x = x.copy() out_x[0] = x[0] - 1 out_x[1] = x[1] - 1 idx = np.logical_and(pbc_is_slave[0](x), pbc_is_slave[1](x)) out_x[0][~idx] = np.nan out_x[1][~idx] = np.nan return out_x mpc.create_periodic_constraint_topological(V, pbc_meshtags[1], pbc_slave_tags[1], pbc_slave_to_master_map, bcs) mpc.finalize() # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(grad(u), grad(v)) * dx b = inner(u, v) * dx mass_form = fem.form(a) stiffness_form = fem.form(b) # Diagonal values for slave and Dirichlet DoF # The generalized eigenvalue problem will have spurious eigenvalues at # lambda_spurious = diagval_A/diagval_B. Here we choose lambda_spurious=1e4, # which is far from the region of interest. diagval_A = 1e2 diagval_B = 1e-2 tol = float(5e2 * np.finfo(default_scalar_type).resolution) A = assemble_matrix(mass_form, mpc, bcs=bcs, diagval=diagval_A) B = assemble_matrix(stiffness_form, mpc, bcs=bcs, diagval=diagval_B) EPS = solve_GEP_shiftinvert( A, B, problem_type=SLEPc.EPS.ProblemType.GHEP, solver=SLEPc.EPS.Type.KRYLOVSCHUR, nev=Nev, tol=tol, max_it=10, target=1.5, shift=1.5, comm=comm, ) (eigval, eigvec_r, eigvec_i) = EPS_get_spectrum(EPS, mpc) # update slave DoF for i in range(len(eigval)): eigvec_r[i].x.scatter_forward() mpc.backsubstitution(eigvec_r[i]) eigvec_i[i].x.scatter_forward() mpc.backsubstitution(eigvec_i[i]) print0(f"Computed eigenvalues:\n {np.around(eigval,decimals=2)}") # Save all eigenvectors suffix = "".join([bc_type[0] for bc_type in boundary_condition]) outdir = Path("results") outdir.mkdir(exist_ok=True, parents=True) with XDMFFile(mesh.comm, outdir / f"eigenvector_{suffix}.xdmf", "w") as xdmf: xdmf.write_mesh(mesh) for i, e_vec in enumerate(eigvec_r): xdmf.write_function(e_vec, i) def print_exact_eigenvalues(boundary_condition: List[str], N: int): L = [1, 1] if boundary_condition[0] == "dirichlet": ev_x = [(n * np.pi / L[0]) ** 2 for n in range(1, N + 1)] elif boundary_condition[0] == "periodic": ev_x = [(n * 2 * np.pi / L[0]) ** 2 for n in range(-N, N)] if boundary_condition[1] == "dirichlet": ev_y = [(n * np.pi / L[1]) ** 2 for n in range(1, N + 1)] elif boundary_condition[1] == "periodic": ev_y = [(n * 2 * np.pi / L[1]) ** 2 for n in range(-N, N)] ev_ex = np.sort([r + q for r in ev_x for q in ev_y]) ev_ex = ev_ex[0:N] print0(f"Exact eigenvalues (repeated with multiplicity):\n {np.around(ev_ex,decimals=2)}") # Dirichlet boundary condition on {x=0} and {x=1} # Dirichlet boundary condition on {y=0} and {y=1} assemble_and_solve(["dirichlet", "dirichlet"], 10) print_exact_eigenvalues(["dirichlet", "dirichlet"], 10) # Dirichlet boundary condition on {x=0} and {x=1} # Periodic boundary condition: {y=0} -> {y=1} assemble_and_solve(["dirichlet", "periodic"], 10) print_exact_eigenvalues(["dirichlet", "periodic"], 10) # Periodic boundary condition: {x=0} -> {x=1} # Dirichlet boundary condition on {y=0} and {y=1} assemble_and_solve(["periodic", "dirichlet"], 10) print_exact_eigenvalues(["periodic", "dirichlet"], 10) # Periodic boundary condition: {x=0} -> {x=1} # Periodic boundary condition: {y=0} -> {y=1} assemble_and_solve(["periodic", "periodic"], 10) print_exact_eigenvalues(["periodic", "periodic"], 10) dolfinx_mpc-0.9.1/python/demos/demo_stokes.py000066400000000000000000000263601476141270300213550ustar00rootroot00000000000000# # Stokes flow with slip conditions # **Author** Jørgen S. Dokken # # **License** MIT # # This demo illustrates how to apply a slip condition on an # interface not aligned with the coordiante axis. # We start by the various modules required for this demo # + from __future__ import annotations from pathlib import Path from typing import Union from mpi4py import MPI from petsc4py import PETSc import basix.ufl import gmsh import numpy as np import scipy.sparse.linalg from dolfinx import common, default_real_type, default_scalar_type, fem, io from dolfinx.io import gmshio from numpy.typing import NDArray from ufl import ( FacetNormal, Identity, Measure, TestFunctions, TrialFunctions, div, dot, dx, grad, inner, outer, sym, ) from ufl.core.expr import Expr import dolfinx_mpc.utils from dolfinx_mpc import LinearProblem, MultiPointConstraint # - # ## Mesh generation # # Next we create the computational domain, a channel titled with respect to the coordinate axis. # We use GMSH and the DOLFINx GMSH-IO to convert the GMSH model into a DOLFINx mesh # + def create_mesh_gmsh( L: int = 2, H: int = 1, res: float = 0.1, theta: float = np.pi / 5, wall_marker: int = 1, outlet_marker: int = 2, inlet_marker: int = 3, ): """ Create a channel of length L, height H, rotated theta degrees around origin, with facet markers for inlet, outlet and walls. Parameters ---------- L The length of the channel H Width of the channel res Mesh resolution (uniform) theta Rotation angle wall_marker Integer used to mark the walls of the channel outlet_marker Integer used to mark the outlet of the channel inlet_marker Integer used to mark the inlet of the channel """ gmsh.initialize() if MPI.COMM_WORLD.rank == 0: gmsh.model.add("Square duct") # Create rectangular channel channel = gmsh.model.occ.addRectangle(0, 0, 0, L, H) gmsh.model.occ.synchronize() # Find entity markers before rotation surfaces = gmsh.model.occ.getEntities(dim=1) walls = [] inlets = [] outlets = [] for surface in surfaces: com = gmsh.model.occ.getCenterOfMass(surface[0], surface[1]) if np.allclose(com, [0, H / 2, 0]): inlets.append(surface[1]) elif np.allclose(com, [L, H / 2, 0]): outlets.append(surface[1]) elif np.isclose(com[1], 0) or np.isclose(com[1], H): walls.append(surface[1]) # Rotate channel theta degrees in the xy-plane gmsh.model.occ.rotate([(2, channel)], 0, 0, 0, 0, 0, 1, theta) gmsh.model.occ.synchronize() # Add physical markers gmsh.model.addPhysicalGroup(2, [channel], 1) gmsh.model.setPhysicalName(2, 1, "Fluid volume") gmsh.model.addPhysicalGroup(1, walls, wall_marker) gmsh.model.setPhysicalName(1, wall_marker, "Walls") gmsh.model.addPhysicalGroup(1, inlets, inlet_marker) gmsh.model.setPhysicalName(1, inlet_marker, "Fluid inlet") gmsh.model.addPhysicalGroup(1, outlets, outlet_marker) gmsh.model.setPhysicalName(1, outlet_marker, "Fluid outlet") # Set number of threads used for mesh gmsh.option.setNumber("Mesh.MaxNumThreads1D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads2D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads3D", MPI.COMM_WORLD.size) # Set uniform mesh size gmsh.option.setNumber("Mesh.CharacteristicLengthMin", res) gmsh.option.setNumber("Mesh.CharacteristicLengthMax", res) # Generate mesh gmsh.model.mesh.generate(2) # Convert gmsh model to DOLFINx Mesh and meshtags mesh, _, ft = gmshio.model_to_mesh(gmsh.model, MPI.COMM_WORLD, 0, gdim=2) gmsh.finalize() return mesh, ft mesh, mt = create_mesh_gmsh(res=0.1) fdim = mesh.topology.dim - 1 # The next step is the create the function spaces for the fluid velocit and pressure. # We will use a mixed-formulation, and we use `basix.ufl` to create the Taylor-Hood finite element pair P2 = basix.ufl.element("Lagrange", mesh.topology.cell_name(), 2, shape=(mesh.geometry.dim,), dtype=default_real_type) P1 = basix.ufl.element("Lagrange", mesh.topology.cell_name(), 1, dtype=default_real_type) TH = basix.ufl.mixed_element([P2, P1]) W = fem.functionspace(mesh, TH) # - # ## Boundary conditions # Next we want to prescribe an inflow condition on a set of facets, those marked by 3 in GMSH # To do so, we start by creating a function in the **collapsed** subspace of V, and create the # expression of choice for the boundary condition. # We use a Python-function for this, that takes in an array of shape `(num_points, 3)` and # returns the function values as an `(num_points, 2)` array. # Note that we therefore can use vectorized Numpy operations to compute the inlet values for a sequence of points # with a single function call V, _ = W.sub(0).collapse() Q, _ = W.sub(1).collapse() inlet_velocity = fem.Function(V) def inlet_velocity_expression( x: NDArray[Union[np.float32, np.float64]], ) -> NDArray[Union[np.float32, np.float64, np.complex64, np.complex128]]: return np.stack( ( np.sin(np.pi * np.sqrt(x[0] ** 2 + x[1] ** 2)), 5 * x[1] * np.sin(np.pi * np.sqrt(x[0] ** 2 + x[1] ** 2)), ) ).astype(default_scalar_type) inlet_velocity.interpolate(inlet_velocity_expression) inlet_velocity.x.scatter_forward() # Next, we have to create the Dirichlet boundary condition. As the function is in the collapsed space, # we send in the un-collapsed space and the collapsed space to locate dofs topological to find the # appropriate dofs and the mapping from `V` to `W0`. # + W0 = W.sub(0) dofs = fem.locate_dofs_topological((W0, V), 1, mt.find(3)) bc1 = fem.dirichletbc(inlet_velocity, dofs, W0) bcs = [bc1] # - # Next, we want to create the slip conditions at the side walls of the channel. # To do so, we compute an approximation of the normal at all the degrees of freedom that # are associated with the facets marked with `1`, either by being on one of the vertices of a # facet marked with `1`, or on the facet itself (for instance at a midpoint). # If a dof is associated with a vertex that is connected to mutliple facets marked with `1`, # we define the normal as the average between the normals at the vertex viewed from each facet. # + n = dolfinx_mpc.utils.create_normal_approximation(V, mt, 1) # - # Next, we can create the multipoint-constraint, enforcing that $\mathbf{u}\cdot\mathbf{n}=0$, # except where we have already enforced a Dirichlet boundary condition # + with common.Timer("~Stokes: Create slip constraint"): mpc = MultiPointConstraint(W) mpc.create_slip_constraint(W.sub(0), (mt, 1), n, bcs=bcs) mpc.finalize() # - # ## Variational formulation # We start by creating some convenience functions, including the tangential projection of a vector, # the symmetric gradient and traction. # + def tangential_proj(u: Expr, n: Expr): """ See for instance: https://link.springer.com/content/pdf/10.1023/A:1022235512626.pdf """ return (Identity(u.ufl_shape[0]) - outer(n, n)) * u def sym_grad(u: Expr): return sym(grad(u)) def T(u: Expr, p: Expr, mu: Expr): return 2 * mu * sym_grad(u) - p * Identity(u.ufl_shape[0]) # - # Next, we define the classical terms of the bilinear form `a` and linear form `L`. # We note that we use the symmetric formulation after integration by parts. # + mu = fem.Constant(mesh, default_scalar_type(1.0)) f = fem.Constant(mesh, default_scalar_type((0, 0))) (u, p) = TrialFunctions(W) (v, q) = TestFunctions(W) a = (2 * mu * inner(sym_grad(u), sym_grad(v)) - inner(p, div(v)) - inner(div(u), q)) * dx L = inner(f, v) * dx # - # We could prescibe some shear stress at the slip boundaries. However, in this demo, we set it to `0`, # but include it in the variational formulation. We add the appropriate terms due to the slip condition, # as explained in https://arxiv.org/pdf/2001.10639.pdf # + n = FacetNormal(mesh) g_tau = tangential_proj(fem.Constant(mesh, default_scalar_type(((0, 0), (0, 0)))) * n, n) ds = Measure("ds", domain=mesh, subdomain_data=mt, subdomain_id=1) a -= inner(outer(n, n) * dot(T(u, p, mu), n), v) * ds L += inner(g_tau, v) * ds # - # ## Solve the variational problem # We use the MUMPS solver provided through the DOLFINX_MPC PETSc interface to solve the variational problem # + petsc_options = {"ksp_type": "preonly", "pc_type": "lu", "pc_factor_mat_solver_type": "mumps"} problem = LinearProblem(a, L, mpc, bcs=bcs, petsc_options=petsc_options) U = problem.solve() # - # ## Visualization # We store the solution to the `VTX` file format, which can be opend with the # `ADIOS2VTXReader` in Paraview # + u = U.sub(0).collapse() p = U.sub(1).collapse() u.name = "u" p.name = "p" outdir = Path("results").absolute() outdir.mkdir(exist_ok=True, parents=True) with io.VTXWriter(mesh.comm, outdir / "demo_stokes_u.bp", u, engine="BP4") as vtx: vtx.write(0.0) with io.VTXWriter(mesh.comm, outdir / "demo_stokes_p.bp", p, engine="BP4") as vtx: vtx.write(0.0) # - # ## Verification # We verify that the MPC implementation is correct by creating the global reduction matrix # and performing global matrix-matrix-matrix multiplication using numpy # + with common.Timer("~Stokes: Verification of problem by global matrix reduction"): # Solve the MPC problem using a global transformation matrix # and numpy solvers to get reference values # Generate reference matrices and unconstrained solution bilinear_form = fem.form(a) A_org = fem.petsc.assemble_matrix(bilinear_form, bcs) A_org.assemble() linear_form = fem.form(L) L_org = fem.petsc.assemble_vector(linear_form) fem.petsc.apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore fem.petsc.set_bc(L_org, bcs) root = 0 dolfinx_mpc.utils.compare_mpc_lhs(A_org, problem.A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(L_org, problem.b, mpc, root=root) # Gather LHS, RHS and solution on one process A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(U.x.petsc_vec, root=root) is_complex = np.issubdtype(default_scalar_type, np.complexfloating) # type: ignore scipy_dtype = np.complex128 if is_complex else np.float64 if MPI.COMM_WORLD.rank == root: KTAK = K.T.astype(scipy_dtype) * A_csr.astype(scipy_dtype) * K.astype(scipy_dtype) reduced_L = K.T.astype(scipy_dtype) @ L_np.astype(scipy_dtype) # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK.astype(scipy_dtype), reduced_L.astype(scipy_dtype)) # Back substitution to full solution vector uh_numpy = K.astype(scipy_dtype) @ d.astype(scipy_dtype) assert np.allclose(uh_numpy.astype(u_mpc.dtype), u_mpc, atol=float(5e2 * np.finfo(u_mpc.dtype).resolution)) # - # ## Timings # Finally, we list the execution time of various operations used in the demo common.list_timings(MPI.COMM_WORLD, [common.TimingType.wall]) dolfinx_mpc-0.9.1/python/demos/demo_stokes_nest.py000066400000000000000000000305601476141270300224030ustar00rootroot00000000000000# Copyright (C) 2022 Nathan Sime # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT # # This demo illustrates how to apply a slip condition on an # interface not aligned with the coordiante axis. # The demos solves the Stokes problem using the nest functionality to # avoid using mixed function spaces. The demo also illustrates how to use # block preconditioners with PETSc from __future__ import annotations from pathlib import Path from mpi4py import MPI from petsc4py import PETSc import basix import dolfinx.io import gmsh import numpy as np import scipy.sparse.linalg import ufl from dolfinx import default_real_type, default_scalar_type from dolfinx.io import gmshio from ufl.core.expr import Expr import dolfinx_mpc import dolfinx_mpc.utils def create_mesh_gmsh( L: int = 2, H: int = 1, res: float = 0.1, theta: float = np.pi / 5, wall_marker: int = 1, outlet_marker: int = 2, inlet_marker: int = 3, ): """ Create a channel of length L, height H, rotated theta degrees around origin, with facet markers for inlet, outlet and walls. Parameters ---------- L The length of the channel H Width of the channel res Mesh resolution (uniform) theta Rotation angle wall_marker Integer used to mark the walls of the channel outlet_marker Integer used to mark the outlet of the channel inlet_marker Integer used to mark the inlet of the channel """ gmsh.initialize() if MPI.COMM_WORLD.rank == 0: gmsh.model.add("Square duct") # Create rectangular channel channel = gmsh.model.occ.addRectangle(0, 0, 0, L, H) gmsh.model.occ.synchronize() # Find entity markers before rotation surfaces = gmsh.model.occ.getEntities(dim=1) walls = [] inlets = [] outlets = [] for surface in surfaces: com = gmsh.model.occ.getCenterOfMass(surface[0], surface[1]) if np.allclose(com, [0, H / 2, 0]): inlets.append(surface[1]) elif np.allclose(com, [L, H / 2, 0]): outlets.append(surface[1]) elif np.isclose(com[1], 0) or np.isclose(com[1], H): walls.append(surface[1]) # Rotate channel theta degrees in the xy-plane gmsh.model.occ.rotate([(2, channel)], 0, 0, 0, 0, 0, 1, theta) gmsh.model.occ.synchronize() # Add physical markers gmsh.model.addPhysicalGroup(2, [channel], 1) gmsh.model.setPhysicalName(2, 1, "Fluid volume") gmsh.model.addPhysicalGroup(1, walls, wall_marker) gmsh.model.setPhysicalName(1, wall_marker, "Walls") gmsh.model.addPhysicalGroup(1, inlets, inlet_marker) gmsh.model.setPhysicalName(1, inlet_marker, "Fluid inlet") gmsh.model.addPhysicalGroup(1, outlets, outlet_marker) gmsh.model.setPhysicalName(1, outlet_marker, "Fluid outlet") # Set number of threads used for mesh gmsh.option.setNumber("Mesh.MaxNumThreads1D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads2D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads3D", MPI.COMM_WORLD.size) # Set uniform mesh size gmsh.option.setNumber("Mesh.CharacteristicLengthMin", res) gmsh.option.setNumber("Mesh.CharacteristicLengthMax", res) # Generate mesh gmsh.model.mesh.generate(2) # Convert gmsh model to DOLFINx Mesh and meshtags mesh, _, ft = gmshio.model_to_mesh(gmsh.model, MPI.COMM_WORLD, 0, gdim=2) gmsh.finalize() return mesh, ft # ------------------- Mesh and function space creation ------------------------ mesh, mt = create_mesh_gmsh(res=0.1) fdim = mesh.topology.dim - 1 # Create the function space cellname = mesh.ufl_cell().cellname() Ve = basix.ufl.element(basix.ElementFamily.P, cellname, 2, shape=(mesh.geometry.dim,), dtype=default_real_type) Qe = basix.ufl.element(basix.ElementFamily.P, cellname, 1, dtype=default_real_type) V = dolfinx.fem.functionspace(mesh, Ve) Q = dolfinx.fem.functionspace(mesh, Qe) def inlet_velocity_expression(x): return np.stack( ( np.sin(np.pi * np.sqrt(x[0] ** 2 + x[1] ** 2)), 5 * x[1] * np.sin(np.pi * np.sqrt(x[0] ** 2 + x[1] ** 2)), ) ) # ----------------------Defining boundary conditions---------------------- # Inlet velocity Dirichlet BC inlet_velocity = dolfinx.fem.Function(V) inlet_velocity.interpolate(inlet_velocity_expression) inlet_velocity.x.scatter_forward() dofs = dolfinx.fem.locate_dofs_topological(V, 1, mt.find(3)) bc1 = dolfinx.fem.dirichletbc(inlet_velocity, dofs) # Collect Dirichlet boundary conditions bcs = [bc1] # Slip conditions for walls n = dolfinx_mpc.utils.create_normal_approximation(V, mt, 1) with dolfinx.common.Timer("~Stokes: Create slip constraint"): mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_slip_constraint(V, (mt, 1), n, bcs=bcs) mpc.finalize() mpc_q = dolfinx_mpc.MultiPointConstraint(Q) mpc_q.finalize() def tangential_proj(u: Expr, n: Expr): """ See for instance: https://link.springer.com/content/pdf/10.1023/A:1022235512626.pdf """ return (ufl.Identity(u.ufl_shape[0]) - ufl.outer(n, n)) * u def sym_grad(u: Expr): return ufl.sym(ufl.grad(u)) def T(u: Expr, p: Expr, mu: Expr): return 2 * mu * sym_grad(u) - p * ufl.Identity(u.ufl_shape[0]) # --------------------------Variational problem--------------------------- # Traditional terms mu = 1 f = dolfinx.fem.Constant(mesh, default_scalar_type((0, 0))) (u, p) = ufl.TrialFunction(V), ufl.TrialFunction(Q) (v, q) = ufl.TestFunction(V), ufl.TestFunction(Q) a00 = 2 * mu * ufl.inner(sym_grad(u), sym_grad(v)) * ufl.dx a01 = -ufl.inner(p, ufl.div(v)) * ufl.dx a10 = -ufl.inner(ufl.div(u), q) * ufl.dx a11 = None L0 = ufl.inner(f, v) * ufl.dx L1 = ufl.inner(dolfinx.fem.Constant(mesh, default_scalar_type(0.0)), q) * ufl.dx # No prescribed shear stress n = ufl.FacetNormal(mesh) g_tau = tangential_proj(dolfinx.fem.Constant(mesh, default_scalar_type(((0, 0), (0, 0)))) * n, n) ds = ufl.Measure("ds", domain=mesh, subdomain_data=mt, subdomain_id=1) # Terms due to slip condition # Explained in for instance: https://arxiv.org/pdf/2001.10639.pdf a00 -= ufl.inner(ufl.outer(n, n) * ufl.dot(2 * mu * sym_grad(u), n), v) * ds a01 -= ufl.inner(ufl.outer(n, n) * ufl.dot(-p * ufl.Identity(u.ufl_shape[0]), n), v) * ds L0 += ufl.inner(g_tau, v) * ds a = [[dolfinx.fem.form(a00), dolfinx.fem.form(a01)], [dolfinx.fem.form(a10), dolfinx.fem.form(a11)]] L = [dolfinx.fem.form(L0), dolfinx.fem.form(L1)] # Assemble LHS matrix and RHS vector with dolfinx.common.Timer("~Stokes: Assemble LHS and RHS"): A = dolfinx_mpc.create_matrix_nest(a, [mpc, mpc_q]) dolfinx_mpc.assemble_matrix_nest(A, a, [mpc, mpc_q], bcs) A.assemble() b = dolfinx_mpc.create_vector_nest(L, [mpc, mpc_q]) dolfinx_mpc.assemble_vector_nest(b, L, [mpc, mpc_q]) # Set Dirichlet boundary condition values in the RHS dolfinx.fem.petsc.apply_lifting_nest(b, a, bcs) for b_sub in b.getNestSubVecs(): b_sub.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) # type: ignore # bcs0 = dolfinx.cpp.fem.bcs_rows( # dolfinx.fem.assemble._create_cpp_form(L), bcs) bcs0 = dolfinx.fem.bcs_by_block(dolfinx.fem.extract_function_spaces(L), bcs) dolfinx.fem.petsc.set_bc_nest(b, bcs0) # Preconditioner P11 = dolfinx.fem.petsc.assemble_matrix(dolfinx.fem.form(p * q * ufl.dx)) P = PETSc.Mat().createNest([[A.getNestSubMatrix(0, 0), None], [None, P11]]) # type: ignore P.assemble() # ---------------------- Solve variational problem ----------------------- ksp = PETSc.KSP().create(mesh.comm) # type: ignore ksp.setOperators(A, P) ksp.setMonitor( lambda ctx, it, r: PETSc.Sys.Print( # type: ignore f"Iteration: {it:>4d}, |r| = {r:.3e}" ) ) ksp.setType("minres") ksp.setTolerances(rtol=1e-8) ksp.getPC().setType("fieldsplit") ksp.getPC().setFieldSplitType(PETSc.PC.CompositeType.ADDITIVE) # type: ignore nested_IS = P.getNestISs() ksp.getPC().setFieldSplitIS(("u", nested_IS[0][0]), ("p", nested_IS[0][1])) ksp_u, ksp_p = ksp.getPC().getFieldSplitSubKSP() ksp_u.setType("preonly") ksp_u.getPC().setType("gamg") ksp_p.setType("preonly") ksp_p.getPC().setType("jacobi") ksp.setFromOptions() Uh = b.copy() ksp.solve(b, Uh) for Uh_sub in Uh.getNestSubVecs(): Uh_sub.ghostUpdate( addv=PETSc.InsertMode.INSERT, # type: ignore mode=PETSc.ScatterMode.FORWARD, # type: ignore ) # type: ignore # ----------------------------- Put NestVec into DOLFINx Function - --------- uh = dolfinx.fem.Function(mpc.function_space) uh.x.petsc_vec.setArray(Uh.getNestSubVecs()[0].array) ph = dolfinx.fem.Function(mpc_q.function_space) ph.x.petsc_vec.setArray(Uh.getNestSubVecs()[1].array) uh.x.scatter_forward() ph.x.scatter_forward() # Backsubstitute to update slave dofs in solution vector mpc.backsubstitution(uh) mpc_q.backsubstitution(ph) # ------------------------------ Output ---------------------------------- uh.name = "u" ph.name = "p" outdir = Path("results") outdir.mkdir(exist_ok=True, parents=True) with dolfinx.io.XDMFFile(mesh.comm, outdir / "demo_stokes_nest.xdmf", "w") as outfile: outfile.write_mesh(mesh) outfile.write_meshtags(mt, mesh.geometry) outfile.write_function(uh) outfile.write_function(ph) with dolfinx.io.VTXWriter(mesh.comm, outdir / "stokes_nest_uh.bp", uh, engine="BP4") as vtx: vtx.write(0.0) # -------------------- Verification -------------------------------- # Transfer data from the MPC problem to numpy arrays for comparison with dolfinx.common.Timer("~Stokes: Verification of problem by global matrix reduction"): W = dolfinx.fem.functionspace(mesh, basix.ufl.mixed_element([Ve, Qe])) V, V_to_W = W.sub(0).collapse() _, Q_to_W = W.sub(1).collapse() # Inlet velocity Dirichlet BC inlet_velocity = dolfinx.fem.Function(V) inlet_velocity.interpolate(inlet_velocity_expression) inlet_velocity.x.scatter_forward() W0 = W.sub(0) dofs = dolfinx.fem.locate_dofs_topological((W0, V), 1, mt.find(3)) bc1 = dolfinx.fem.dirichletbc(inlet_velocity, dofs, W0) # Collect Dirichlet boundary conditions bcs = [bc1] # Slip conditions for walls n = dolfinx_mpc.utils.create_normal_approximation(V, mt, 1) with dolfinx.common.Timer("~Stokes: Create slip constraint"): mpc = dolfinx_mpc.MultiPointConstraint(W) mpc.create_slip_constraint(W.sub(0), (mt, 1), n, bcs=bcs) mpc.finalize() (u, p) = ufl.TrialFunctions(W) (v, q) = ufl.TestFunctions(W) a = (2 * mu * ufl.inner(sym_grad(u), sym_grad(v)) - ufl.inner(p, ufl.div(v)) - ufl.inner(ufl.div(u), q)) * ufl.dx L = ufl.inner(f, v) * ufl.dx # Terms due to slip condition # Explained in for instance: https://arxiv.org/pdf/2001.10639.pdf a -= ufl.inner(ufl.outer(n, n) * ufl.dot(T(u, p, mu), n), v) * ds L += ufl.inner(g_tau, v) * ds af = dolfinx.fem.form(a) Lf = dolfinx.fem.form(L) # Solve the MPC problem using a global transformation matrix # and numpy solvers to get reference values # Generate reference matrices and unconstrained solution A_org = dolfinx.fem.petsc.assemble_matrix(af, bcs) A_org.assemble() L_org = dolfinx.fem.petsc.assemble_vector(Lf) dolfinx.fem.petsc.apply_lifting(L_org, [af], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore dolfinx.fem.petsc.set_bc(L_org, bcs) root = 0 # Gather LHS, RHS and solution on one process A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(uh.x.petsc_vec, root=root) p_mpc = dolfinx_mpc.utils.gather_PETScVector(ph.x.petsc_vec, root=root) up_mpc = np.hstack([u_mpc, p_mpc]) if MPI.COMM_WORLD.rank == root: KTAK = K.T * A_csr * K reduced_L = K.T @ L_np # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K @ d assert np.allclose(np.linalg.norm(uh_numpy, 2), np.linalg.norm(up_mpc, 2)) A.destroy() b.destroy() for Uh_sub in Uh.getNestSubVecs(): Uh_sub.destroy() Uh.destroy() ksp.destroy() # -------------------- List timings -------------------------- dolfinx.common.list_timings(MPI.COMM_WORLD, [dolfinx.common.TimingType.wall]) dolfinx_mpc-0.9.1/python/dolfinx_mpc/000077500000000000000000000000001476141270300176535ustar00rootroot00000000000000dolfinx_mpc-0.9.1/python/dolfinx_mpc/__init__.py000066400000000000000000000015431476141270300217670ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen Schartum Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT """Main module for DOLFINX_MPC""" # flake8: noqa from __future__ import annotations import dolfinx_mpc.cpp # New local assemblies from .assemble_matrix import ( assemble_matrix, assemble_matrix_nest, create_matrix_nest, create_sparsity_pattern, ) from .assemble_vector import ( apply_lifting, assemble_vector, assemble_vector_nest, create_vector_nest, ) from .multipointconstraint import MultiPointConstraint from .problem import LinearProblem __all__ = [ "assemble_matrix", "create_matrix_nest", "assemble_matrix_nest", "assemble_vector", "apply_lifting", "assemble_vector_nest", "create_vector_nest", "MultiPointConstraint", "LinearProblem", "create_sparsity_pattern", ] dolfinx_mpc-0.9.1/python/dolfinx_mpc/assemble_matrix.py000066400000000000000000000115301476141270300234040ustar00rootroot00000000000000# Copyright (C) 2020-2021 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from collections.abc import Sequence from typing import Optional, Union from petsc4py import PETSc as _PETSc import dolfinx.cpp as _cpp import dolfinx.fem as _fem from dolfinx_mpc import cpp from .multipointconstraint import MultiPointConstraint def assemble_matrix( form: _fem.Form, constraint: Union[MultiPointConstraint, Sequence[MultiPointConstraint]], bcs: Optional[Sequence[_fem.DirichletBC]] = None, diagval: _PETSc.ScalarType = 1, # type: ignore A: Optional[_PETSc.Mat] = None, # type: ignore ) -> _PETSc.Mat: # type: ignore """ Assemble a compiled DOLFINx bilinear form into a PETSc matrix with corresponding multi point constraints and Dirichlet boundary conditions. Args: form: The compiled bilinear variational form constraint: The multi point constraint bcs: Sequence of Dirichlet boundary conditions diagval: Value to set on the diagonal of the matrix A: PETSc matrix to assemble into Returns: _PETSc.Mat: The matrix with the assembled bi-linear form #type: ignore """ bcs = [] if bcs is None else [bc._cpp_object for bc in bcs] if not isinstance(constraint, Sequence): assert form.function_spaces[0] == form.function_spaces[1] constraint = (constraint, constraint) # Generate matrix with MPC sparsity pattern if A is None: A = cpp.mpc.create_matrix(form._cpp_object, constraint[0]._cpp_object, constraint[1]._cpp_object) A.zeroEntries() # Assemble matrix in C++ cpp.mpc.assemble_matrix(A, form._cpp_object, constraint[0]._cpp_object, constraint[1]._cpp_object, bcs, diagval) # Add one on diagonal for Dirichlet boundary conditions if form.function_spaces[0] is form.function_spaces[1]: A.assemblyBegin(_PETSc.Mat.AssemblyType.FLUSH) # type: ignore A.assemblyEnd(_PETSc.Mat.AssemblyType.FLUSH) # type: ignore _cpp.fem.petsc.insert_diagonal(A, form.function_spaces[0], bcs, diagval) A.assemble() return A def create_sparsity_pattern(form: _fem.Form, mpc: Union[MultiPointConstraint, Sequence[MultiPointConstraint]]): """ Create sparsity-pattern for MPC given a compiled DOLFINx form Args: form: The form mpc: For square forms, the MPC. For rectangular forms a list of 2 MPCs on axis 0 & 1, respectively """ if isinstance(mpc, Sequence): assert len(mpc) == 2 for mpc_ in mpc: mpc_._not_finalized() # type: ignore return cpp.mpc.create_sparsity_pattern(form._cpp_object, mpc[0]._cpp_object, mpc[1]._cpp_object) else: mpc._not_finalized() # type: ignore return cpp.mpc.create_sparsity_pattern( form._cpp_object, mpc._cpp_object, # type: ignore mpc._cpp_object, # type: ignore ) # type: ignore def create_matrix_nest(a: Sequence[Sequence[_fem.Form]], constraints: Sequence[MultiPointConstraint]): """ Create a PETSc matrix of type "nest" with appropriate sparsity pattern given the provided multi points constraints Args: a: The compiled bilinear variational form provided in a rank 2 list constraints: An ordered list of multi point constraints """ assert len(constraints) == len(a) A_ = [[None for _ in range(len(a[0]))] for _ in range(len(a))] for i, a_row in enumerate(a): for j, a_block in enumerate(a_row): if a[i][j] is None: continue A_[i][j] = cpp.mpc.create_matrix( a[i][j]._cpp_object, constraints[i]._cpp_object, constraints[j]._cpp_object ) A = _PETSc.Mat().createNest( # type: ignore A_, comm=constraints[0].function_space.mesh.comm ) return A def assemble_matrix_nest( A: _PETSc.Mat, # type: ignore a: Sequence[Sequence[_fem.Form]], constraints: Sequence[MultiPointConstraint], bcs: Sequence[_fem.DirichletBC] = [], diagval: _PETSc.ScalarType = 1, # type: ignore ): """ Assemble a compiled DOLFINx bilinear form into a PETSc matrix of type "nest" with corresponding multi point constraints and Dirichlet boundary conditions. Args: a: The compiled bilinear variational form provided in a rank 2 list constraints: An ordered list of multi point constraints bcs: Sequence of Dirichlet boundary conditions diagval: Value to set on the diagonal of the matrix (Default 1) A: PETSc matrix to assemble into """ for i, a_row in enumerate(a): for j, a_block in enumerate(a_row): if a_block is not None: Asub = A.getNestSubMatrix(i, j) assemble_matrix(a_block, (constraints[i], constraints[j]), bcs=bcs, diagval=diagval, A=Asub) dolfinx_mpc-0.9.1/python/dolfinx_mpc/assemble_vector.py000066400000000000000000000075261476141270300234140ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations import contextlib from typing import List, Optional, Sequence from petsc4py import PETSc as _PETSc import dolfinx.cpp as _cpp import dolfinx.fem as _fem import dolfinx.la as _la import numpy import ufl from dolfinx import default_scalar_type from dolfinx.common import Timer import dolfinx_mpc.cpp from .multipointconstraint import MultiPointConstraint, _float_classes def apply_lifting( b: _PETSc.Vec, # type: ignore form: List[_fem.Form], bcs: List[List[_fem.DirichletBC]], constraint: MultiPointConstraint, x0: List[_PETSc.Vec] = [], # type: ignore scale: _float_classes = default_scalar_type(1.0), ): # type: ignore """ Apply lifting to vector b, i.e. :math:`b = b - scale \\cdot K^T (A_j (g_j - x0_j))` Args: b: PETSc vector to assemble into form: The linear form bcs: List of Dirichlet boundary conditions constraint: The multi point constraint x0: List of vectors scale: Scaling for lifting """ if isinstance(scale, numpy.generic): # nanobind conversion of numpy dtypes to general Python types scale = scale.item() # type: ignore t = Timer("~MPC: Apply lifting (C++)") with contextlib.ExitStack() as stack: x0 = [stack.enter_context(x.localForm()) for x in x0] x0_r = [x.array_r for x in x0] b_local = stack.enter_context(b.localForm()) _forms = [f._cpp_object for f in form] _bcs = [[bc._cpp_object for bc in bcs0] for bcs0 in bcs] dolfinx_mpc.cpp.mpc.apply_lifting(b_local.array_w, _forms, _bcs, x0_r, scale, constraint._cpp_object) t.stop() def assemble_vector( form: ufl.form.Form, constraint: MultiPointConstraint, b: Optional[_PETSc.Vec] = None, # type: ignore ) -> _PETSc.Vec: # type: ignore """ Assemble a linear form into vector `b` with corresponding multi point constraint Args: form: The linear form constraint: The multi point constraint b: PETSc vector to assemble Returns: The vector with the assembled linear form (`b` if supplied) """ if b is None: b = _la.create_petsc_vector( constraint.function_space.dofmap.index_map, constraint.function_space.dofmap.index_map_bs, ) t = Timer("~MPC: Assemble vector (C++)") with b.localForm() as b_local: b_local.set(0.0) dolfinx_mpc.cpp.mpc.assemble_vector(b_local.array_w, form._cpp_object, constraint._cpp_object) t.stop() return b def create_vector_nest(L: Sequence[_fem.Form], constraints: Sequence[MultiPointConstraint]) -> _PETSc.Vec: # type: ignore """ Create a PETSc vector of type "nest" appropriate for the provided multi point constraints Args: L: A sequence of linear forms constraints: An ordered list of multi point constraints Returns: PETSc.Vec: A PETSc vector of type "nest" #type: ignore """ assert len(constraints) == len(L) maps = [ (constraint.function_space.dofmap.index_map, constraint.function_space.dofmap.index_map_bs) for constraint in constraints ] return _cpp.fem.petsc.create_vector_nest(maps) def assemble_vector_nest( b: _PETSc.Vec, # type: ignore L: Sequence[_fem.Form], constraints: Sequence[MultiPointConstraint], ): """ Assemble a linear form into a PETSc vector of type "nest" Args: b: A PETSc vector of type "nest" L: A sequence of linear forms constraints: An ordered list of multi point constraints """ assert len(constraints) == len(L) assert b.getType() == "nest" b_sub_vecs = b.getNestSubVecs() for i, L_row in enumerate(L): assemble_vector(L_row, constraints[i], b=b_sub_vecs[i]) dolfinx_mpc-0.9.1/python/dolfinx_mpc/dictcondition.py000066400000000000000000000256411476141270300230670ustar00rootroot00000000000000# Copyright (C) 2020-2021 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations import typing import dolfinx import dolfinx.fem as fem import numpy as np from dolfinx import default_scalar_type def close_to( point: np.typing.NDArray[typing.Union[np.float64, np.float32]], atol=1000 * np.finfo(dolfinx.default_real_type).resolution, ): """ Convenience function for locating a point [x,y,z] within an array x [[x0,...,xN],[y0,...,yN], [z0,...,zN]]. Args: point: The point should be padded to 3D """ return lambda x: np.isclose(x, point, atol=atol).all(axis=0) @typing.no_type_check def create_dictionary_constraint( V: fem.functionspace, slave_master_dict: typing.Dict[bytes, typing.Dict[bytes, float]], subspace_slave: typing.Optional[int] = None, subspace_master: typing.Optional[int] = None, ): """ Returns a multi point constraint for a given function space and dictionary constraint. Args: V: The function space slave_master_dict: The dictionary subspace_slave: If using mixed or vector space, and only want to use dofs from a sub space as slave add index here. subspace_master: Subspace index for mixed or vector spaces Examples: If the dof `D` located at `[d0,d1]` should be constrained to the dofs `E` and F at `[e0,e1]` and `[f0,f1]` as :math:`D = \\alpha E + \\beta F` the dictionary should be: .. highlight:: python .. code-block:: python {np.array([d0, d1], dtype=mesh.geometry.x.dtype).tobytes(): {numpy.array([e0, e1], dtype=mesh.geometry.x.dtype).tobytes(): alpha, numpy.array([f0, f1], dtype=mesh.geometry.x.dtype).tobytes(): beta}} """ comm = V.mesh.comm bs = V.dofmap.index_map_bs local_size = V.dofmap.index_map.size_local * bs index_map = V.dofmap.index_map owned_entities = {} ghosted_entities = {} non_local_entities = {} slaves_local = {} slaves_ghost = {} dfloat = V.mesh.geometry.x.dtype slave_point_nd = np.zeros((3, 1), dtype=dfloat) for i, slave_point in enumerate(slave_master_dict.keys()): num_masters = len(list(slave_master_dict[slave_point].keys())) # Status for current slave, -1 if not on proc, 0 if ghost, 1 if owned slave_status = -1 # Wrap slave point as numpy array sp = np.frombuffer(slave_point, dtype=dfloat) for j, coord in enumerate(sp): slave_point_nd[j] = coord slave_point_nd[len(sp) :] = 0 if subspace_slave is None: slave_dofs = fem.locate_dofs_geometrical(V, close_to(slave_point_nd)) else: Vsub = V.sub(subspace_slave).collapse()[0] slave_dofs = fem.locate_dofs_geometrical((V.sub(subspace_slave), Vsub), close_to(slave_point_nd))[0] if len(slave_dofs) == 1: # Decide if slave is ghost or not if slave_dofs[0] < local_size: slaves_local[i] = slave_dofs[0] owned_entities[i] = { "masters": np.full(num_masters, -1, dtype=np.int64), "coeffs": np.full(num_masters, -1, dtype=dolfinx.default_scalar_type), "owners": np.full(num_masters, -1, dtype=np.int32), "master_count": 0, "local_index": [], } slave_status = 1 else: slaves_ghost[i] = slave_dofs[0] ghosted_entities[i] = { "masters": np.full(num_masters, -1, dtype=np.int64), "coeffs": np.full(num_masters, -1, dtype=dolfinx.default_scalar_type), "owners": np.full(num_masters, -1, dtype=np.int32), "master_count": 0, "local_index": [], } slave_status = 0 elif len(slave_dofs) > 1: raise RuntimeError("Multiple slaves found at same point. " + "You should use sub-space locators.") # Wrap as list to ensure order later master_points = list(slave_master_dict[slave_point].keys()) master_points_nd = np.zeros((3, len(master_points)), dtype=dfloat) for j, master_point in enumerate(master_points): # Wrap bytes as numpy array for k, coord in enumerate(np.frombuffer(master_point, dtype=dfloat)): master_points_nd[k, j] = coord if subspace_master is None: master_dofs = fem.locate_dofs_geometrical(V, close_to(master_points_nd[:, j : j + 1])) else: Vsub = V.sub(subspace_master).collapse()[0] master_dofs = fem.locate_dofs_geometrical( (V.sub(subspace_master), Vsub), close_to(master_points_nd[:, j : j + 1]) )[0] # Only add masters owned by this processor master_dofs = master_dofs[master_dofs < local_size] if len(master_dofs) == 1: master_block = master_dofs[0] // bs master_rem = master_dofs[0] % bs glob_master = index_map.local_to_global(np.asarray([master_block], dtype=np.int32))[0] if slave_status == -1: if i in non_local_entities.keys(): non_local_entities[i]["masters"].append(glob_master * bs + master_rem) non_local_entities[i]["coeffs"].append(slave_master_dict[slave_point][master_point]) (non_local_entities[i]["owners"].append(comm.rank),) non_local_entities[i]["local_index"].append(j) else: non_local_entities[i] = { "masters": [glob_master * bs + master_rem], "coeffs": [slave_master_dict[slave_point][master_point]], "owners": [comm.rank], "local_index": [j], } elif slave_status == 0: ghosted_entities[i]["masters"][j] = glob_master * bs + master_rem ghosted_entities[i]["owners"][j] = comm.rank ghosted_entities[i]["coeffs"][j] = slave_master_dict[slave_point][master_point] ghosted_entities[i]["local_index"].append(j) elif slave_status == 1: owned_entities[i]["masters"][j] = glob_master * bs + master_rem owned_entities[i]["owners"][j] = comm.rank owned_entities[i]["coeffs"][j] = slave_master_dict[slave_point][master_point] owned_entities[i]["local_index"].append(j) else: raise RuntimeError("Invalid slave status: {0:d} (-1,0,1 are valid options)".format(slave_status)) elif len(master_dofs) > 1: raise RuntimeError("Multiple masters found at same point. You should use sub-space locators.") # Send the ghost and owned entities to processor 0 to gather them data_to_send = [owned_entities, ghosted_entities, non_local_entities] if comm.rank != 0: comm.send(data_to_send, dest=0, tag=1) del owned_entities, ghosted_entities, non_local_entities # Gather all info on proc 0 and sort data owned_slaves, ghosted_slaves = None, None if comm.rank == 0: recv = {0: data_to_send} for proc in range(1, comm.size): recv[proc] = comm.recv(source=proc, tag=1) for proc in range(comm.size): # Loop through all masters other_procs = np.arange(comm.size) other_procs = other_procs[other_procs != proc] # Loop through all owned slaves and ghosts, and update # the master entries for pair in [[0, 1], [1, 0]]: i, j = pair for slave in recv[proc][i].keys(): for o_proc in other_procs: # If slave is ghost on other proc add local masters if slave in recv[o_proc][j].keys(): # Update master with possible entries from ghost o_masters = recv[o_proc][j][slave]["local_index"] for o_master in o_masters: recv[proc][i][slave]["masters"][o_master] = recv[o_proc][j][slave]["masters"][o_master] recv[proc][i][slave]["coeffs"][o_master] = recv[o_proc][j][slave]["coeffs"][o_master] recv[proc][i][slave]["owners"][o_master] = recv[o_proc][j][slave]["owners"][o_master] # If proc only has master, but not the slave if slave in recv[o_proc][2].keys(): o_masters = recv[o_proc][2][slave]["local_index"] # As non owned indices only store non-zero entries for k, o_master in enumerate(o_masters): recv[proc][i][slave]["masters"][o_master] = recv[o_proc][2][slave]["masters"][k] recv[proc][i][slave]["coeffs"][o_master] = recv[o_proc][2][slave]["coeffs"][k] recv[proc][i][slave]["owners"][o_master] = recv[o_proc][2][slave]["owners"][k] if proc == comm.rank: owned_slaves = recv[proc][0] ghosted_slaves = recv[proc][1] else: # If no owned masters, do not send masters if len(recv[proc][0].keys()) > 0: comm.send(recv[proc][0], dest=proc, tag=55) if len(recv[proc][1].keys()) > 0: comm.send(recv[proc][1], dest=proc, tag=66) else: if len(slaves_local.keys()) > 0: owned_slaves = comm.recv(source=0, tag=55) if len(slaves_ghost.keys()) > 0: ghosted_slaves = comm.recv(source=0, tag=66) # Flatten slaves (local) slaves, masters, coeffs, owners, offsets = [], [], [], [], [0] for slave_index in slaves_local.keys(): slaves.append(slaves_local[slave_index]) masters.extend(owned_slaves[slave_index]["masters"]) # type: ignore owners.extend(owned_slaves[slave_index]["owners"]) # type: ignore coeffs.extend(owned_slaves[slave_index]["coeffs"]) # type: ignore offsets.append(len(masters)) for slave_index in slaves_ghost.keys(): slaves.append(slaves_ghost[slave_index]) masters.extend(ghosted_slaves[slave_index]["masters"]) # type: ignore owners.extend(ghosted_slaves[slave_index]["owners"]) # type: ignore coeffs.extend(ghosted_slaves[slave_index]["coeffs"]) # type: ignore offsets.append(len(masters)) return ( np.asarray(slaves, dtype=np.int32), np.asarray(masters, dtype=np.int64), np.asarray(coeffs, dtype=default_scalar_type), np.asarray(owners, dtype=np.int32), np.asarray(offsets, dtype=np.int32), ) dolfinx_mpc-0.9.1/python/dolfinx_mpc/dolfinx_mpc.cpp000066400000000000000000000011631476141270300226620ustar00rootroot00000000000000// Copyright (C) 2020 Jørgen S. Dokken // // This file is part of DOLFINX_MPC // // SPDX-License-Identifier: MIT #include namespace nb = nanobind; namespace dolfinx_mpc_wrappers { void mpc(nb::module_& m); } // namespace dolfinx_mpc_wrappers NB_MODULE(cpp, m) { // Create module for C++ wrappers m.doc() = "DOLFINX MultiPointConstraint Python interface"; m.attr("__version__") = DOLFINX_MPC_VERSION; #ifdef NDEBUG nanobind::set_leak_warnings(false); #endif // Create mpc submodule [mpc] nb::module_ mpc = m.def_submodule("mpc", "General module"); dolfinx_mpc_wrappers::mpc(mpc); } dolfinx_mpc-0.9.1/python/dolfinx_mpc/mpc.cpp000066400000000000000000000354211476141270300211430ustar00rootroot00000000000000// Copyright (C) 2020 Jørgen S. Dokken // // This file is part of DOLFINX-MPC // // SPDX-License-Identifier: MIT #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace nb = nanobind; using namespace nb::literals; namespace { // Templating over mesh resolution template void declare_mpc(nb::module_& m, std::string type) { std::string nbclass_name = "MultiPointConstraint_" + type; // dolfinx_mpc::MultiPointConstraint nb::class_>( m, nbclass_name.c_str(), "Object for representing contact (non-penetrating) conditions") .def("__init__", [](dolfinx_mpc::MultiPointConstraint* mpc, std::shared_ptr> V, nb::ndarray>& slaves, nb::ndarray>& masters, nb::ndarray>& coeffs, nb::ndarray>& owners, nb::ndarray>& offsets) { new (mpc) dolfinx_mpc::MultiPointConstraint( V, std::span(slaves.data(), slaves.size()), std::span(masters.data(), masters.size()), std::span(coeffs.data(), coeffs.size()), std::span(owners.data(), owners.size()), std::span(offsets.data(), offsets.size())); }) .def_prop_ro("masters", &dolfinx_mpc::MultiPointConstraint::masters) .def("coefficients", [](dolfinx_mpc::MultiPointConstraint& self) { std::shared_ptr> adj = self.coefficients(); const std::vector& offsets = adj->offsets(); const std::vector& data = adj->array(); return std::make_pair( nb::ndarray>( data.data(), {data.size()}, nb::handle()), nb::ndarray>( offsets.data(), {offsets.size()}, nb::handle())); }) .def_prop_ro("constants", [](dolfinx_mpc::MultiPointConstraint& self) { const std::vector& consts = self.constant_values(); return nb::ndarray>( consts.data(), {consts.size()}, nb::handle()); }) .def_prop_ro("owners", &dolfinx_mpc::MultiPointConstraint::owners) .def_prop_ro( "slaves", [](dolfinx_mpc::MultiPointConstraint& self) { const std::vector& slaves = self.slaves(); return nb::ndarray>( slaves.data(), {slaves.size()}, nb::handle()); }) .def_prop_ro( "is_slave", [](dolfinx_mpc::MultiPointConstraint& self) { std::span slaves = self.is_slave(); return nb::ndarray>( slaves.data(), {slaves.size()}, nb::handle()); }) .def_prop_ro("cell_to_slaves", &dolfinx_mpc::MultiPointConstraint::cell_to_slaves) .def_prop_ro("num_local_slaves", &dolfinx_mpc::MultiPointConstraint::num_local_slaves) .def_prop_ro("function_space", &dolfinx_mpc::MultiPointConstraint::function_space) .def_prop_ro("owners", &dolfinx_mpc::MultiPointConstraint::owners) .def( "backsubstitution", [](dolfinx_mpc::MultiPointConstraint& self, nb::ndarray, nb::c_contig> u) { self.backsubstitution(std::span(u.data(), u.size())); }, "u"_a, "Backsubstitute slave values into vector") .def( "homogenize", [](dolfinx_mpc::MultiPointConstraint& self, nb::ndarray, nb::c_contig> u) { self.homogenize(std::span(u.data(), u.size())); }, "u"_a, "Homogenize (set to zero) values at slave DoF indices"); // .def("ghost_masters", &dolfinx_mpc::mpc_data::ghost_masters); } template void declare_functions(nb::module_& m) { m.def("compute_shared_indices", &dolfinx_mpc::compute_shared_indices); m.def("create_sparsity_pattern", &dolfinx_mpc::create_sparsity_pattern); m.def("create_contact_slip_condition", &dolfinx_mpc::create_contact_slip_condition); m.def("create_slip_condition", &dolfinx_mpc::create_slip_condition); m.def("create_contact_inelastic_condition", &dolfinx_mpc::create_contact_inelastic_condition); m.def( "create_periodic_constraint_geometrical", [](std::shared_ptr> V, const std::function, nb::c_contig>( nb::ndarray, nb::numpy>&)>& indicator, const std::function, nb::numpy>( nb::ndarray, nb::numpy>&)>& relation, const std::vector>>& bcs, T scale, bool collapse) { auto _indicator = [&indicator](MDSPAN_IMPL_STANDARD_NAMESPACE::mdspan< const U, MDSPAN_IMPL_STANDARD_NAMESPACE::extents< std::size_t, 3, MDSPAN_IMPL_STANDARD_NAMESPACE::dynamic_extent>> x) -> std::vector { assert(x.size() % 3 == 0); nb::ndarray, nb::numpy> x_view( x.data_handle(), {3, x.size() / 3}, nb::handle()); auto m = indicator(x_view); std::vector s(m.data(), m.data() + m.size()); return s; }; auto _relation = [&relation](std::span x) -> std::vector { assert(x.size() % 3 == 0); nb::ndarray, nb::numpy> x_view( x.data(), {3, x.size() / 3}, nb::handle()); auto v = relation(x_view); std::vector output(v.data(), v.data() + v.size()); return output; }; return dolfinx_mpc::create_periodic_condition_geometrical( V, _indicator, _relation, bcs, scale, collapse); }, "V"_a, "indicator"_a, "relation"_a, "bcs"_a, nb::arg("scale").noconvert(), nb::arg("collapse").noconvert()); m.def( "create_periodic_constraint_topological", [](std::shared_ptr>& V, std::shared_ptr>& meshtags, const int dim, const std::function, nb::numpy>( nb::ndarray, nb::numpy>&)>& relation, const std::vector>>& bcs, T scale, bool collapse) { auto _relation = [&relation](std::span x) -> std::vector { nb::ndarray, nb::numpy> x_view( x.data(), {3, x.size() / 3}, nb::handle()); auto v = relation(x_view); std::vector output(v.data(), v.data() + v.size()); return output; }; return dolfinx_mpc::create_periodic_condition_topological( V, meshtags, dim, _relation, bcs, scale, collapse); }, "V"_a, "meshtags"_a, "dim"_a, "relation"_a, "bcs"_a, nb::arg("scale").noconvert(), nb::arg("collapse").noconvert()); } template void declare_mpc_data(nb::module_& m, std::string type) { std::string nbclass_name = "mpc_data_" + type; nb::class_>(m, nbclass_name.c_str(), "Object with data arrays for mpc") .def_prop_ro( "slaves", [](dolfinx_mpc::mpc_data& self) { const std::vector& slaves = self.slaves; return nb::ndarray>( slaves.data(), {slaves.size()}, nb::handle()); }) .def_prop_ro( "masters", [](dolfinx_mpc::mpc_data& self) { const std::vector& masters = self.masters; return nb::ndarray>( masters.data(), {masters.size()}, nb::handle()); }) .def_prop_ro("coeffs", [](dolfinx_mpc::mpc_data& self) { const std::vector& coeffs = self.coeffs; return nb::ndarray>( coeffs.data(), {coeffs.size()}, nb::handle()); }) .def_prop_ro( "owners", [](dolfinx_mpc::mpc_data& self) { const std::vector& owners = self.owners; return nb::ndarray>( owners.data(), {owners.size()}, nb::handle()); }) .def_prop_ro( "offsets", [](dolfinx_mpc::mpc_data& self) { const std::vector& offsets = self.offsets; return nb::ndarray>( offsets.data(), {offsets.size()}, nb::handle()); }); } template void declare_petsc_functions(nb::module_& m) { import_petsc4py(); m.def("create_normal_approximation", [](std::shared_ptr> V, std::int32_t dim, const nb::ndarray, nb::c_contig>& entities) { return dolfinx_mpc::create_normal_approximation( V, dim, std::span(entities.data(), entities.size())); }); m.def( "assemble_matrix", [](Mat A, const dolfinx::fem::Form& a, const std::shared_ptr>& mpc0, const std::shared_ptr>& mpc1, const std::vector>>& bcs, const T diagval) { dolfinx_mpc::assemble_matrix( dolfinx::la::petsc::Matrix::set_block_fn(A, ADD_VALUES), dolfinx::la::petsc::Matrix::set_fn(A, ADD_VALUES), a, mpc0, mpc1, bcs, diagval); }); m.def( "assemble_vector", [](nb::ndarray, nb::c_contig> b, const dolfinx::fem::Form& L, const std::shared_ptr>& mpc) { dolfinx_mpc::assemble_vector(std::span(b.data(), b.size()), L, mpc); }, "b"_a, "L"_a, "mpc"_a, "Assemble linear form into an existing vector"); m.def( "apply_lifting", [](nb::ndarray, nb::c_contig> b, std::vector>>& a, const std::vector>>>& bcs1, const std::vector, nb::c_contig>>& x0, T scale, std::shared_ptr>& mpc) { std::vector> _x0; for (const auto& x : x0) _x0.emplace_back(x.data(), x.size()); dolfinx_mpc::apply_lifting(std::span(b.data(), b.size()), a, bcs1, _x0, scale, mpc); }, nb::arg("b"), nb::arg("a"), nb::arg("bcs"), nb::arg("x0"), nb::arg("scale"), nb::arg("mpc"), "Assemble apply lifting from form a on vector b"); m.def( "create_matrix", [](const dolfinx::fem::Form& a, const std::shared_ptr>& mpc) { auto A = dolfinx_mpc::create_matrix(a, mpc); Mat _A = A.mat(); PetscObjectReference((PetscObject)_A); return _A; }, nb::rv_policy::take_ownership, "Create a PETSc Mat for bilinear form."); m.def( "create_matrix", [](const dolfinx::fem::Form& a, const std::shared_ptr>& mpc0, const std::shared_ptr>& mpc1) { auto A = dolfinx_mpc::create_matrix(a, mpc0, mpc1); Mat _A = A.mat(); PetscObjectReference((PetscObject)_A); return _A; }, nb::rv_policy::take_ownership, "Create a PETSc Mat for bilinear form."); } } // namespace namespace dolfinx_mpc_wrappers { void mpc(nb::module_& m) { declare_mpc(m, "float"); declare_mpc, float>(m, "complex_float"); declare_mpc(m, "double"); declare_mpc, double>(m, "complex_double"); declare_functions(m); declare_functions, float>(m); declare_functions(m); declare_functions, double>(m); declare_mpc_data(m, "float"); declare_mpc_data, float>(m, "complex_float"); declare_mpc_data(m, "double"); declare_mpc_data, double>(m, "complex_double"); declare_petsc_functions(m); } } // namespace dolfinx_mpc_wrappers dolfinx_mpc-0.9.1/python/dolfinx_mpc/multipointconstraint.py000066400000000000000000000560011476141270300245400ustar00rootroot00000000000000# Copyright (C) 2020-2023 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from typing import Callable, Dict, List, Optional, Tuple, Union from petsc4py import PETSc as _PETSc import dolfinx.cpp as _cpp import dolfinx.fem as _fem import dolfinx.mesh as _mesh import numpy import numpy.typing as npt from dolfinx import default_scalar_type import dolfinx_mpc.cpp from .dictcondition import create_dictionary_constraint _mpc_classes = Union[ dolfinx_mpc.cpp.mpc.MultiPointConstraint_double, dolfinx_mpc.cpp.mpc.MultiPointConstraint_float, dolfinx_mpc.cpp.mpc.MultiPointConstraint_complex_double, dolfinx_mpc.cpp.mpc.MultiPointConstraint_complex_float, ] _float_classes = Union[numpy.float32, numpy.float64, numpy.complex128, numpy.complex64] _float_array_types = Union[ npt.NDArray[numpy.float32], npt.NDArray[numpy.float64], npt.NDArray[numpy.complex64], npt.NDArray[numpy.complex128], ] _mpc_data_classes = Union[ dolfinx_mpc.cpp.mpc.mpc_data_double, dolfinx_mpc.cpp.mpc.mpc_data_float, dolfinx_mpc.cpp.mpc.mpc_data_complex_double, dolfinx_mpc.cpp.mpc.mpc_data_complex_float, ] class MPCData: _cpp_object: _mpc_data_classes def __init__( self, slaves: npt.NDArray[numpy.int32], masters: npt.NDArray[numpy.int64], coeffs: _float_array_types, owners: npt.NDArray[numpy.int32], offsets: npt.NDArray[numpy.int32], ): if coeffs.dtype.type == numpy.float32: self._cpp_object = dolfinx_mpc.cpp.mpc.mpc_data_float(slaves, masters, coeffs, owners, offsets) elif coeffs.dtype.type == numpy.float64: self._cpp_object = dolfinx_mpc.cpp.mpc.mpc_data_double(slaves, masters, coeffs, owners, offsets) elif coeffs.dtype.type == numpy.complex64: self._cpp_object = dolfinx_mpc.cpp.mpc.mpc_data_complex_float(slaves, masters, coeffs, owners, offsets) elif coeffs.dtype.type == numpy.complex128: self._cpp_object = dolfinx_mpc.cpp.mpc.mpc_data_complex_double(slaves, masters, coeffs, owners, offsets) else: raise ValueError("Unsupported dtype {coeffs.dtype.type} for coefficients") @property def slaves(self): return self._cpp_object.slaves @property def masters(self): return self._cpp_object.masters @property def coeffs(self): return self._cpp_object.coeffs @property def owners(self): return self._cpp_object.owners @property def offsets(self): return self._cpp_object.offsets class MultiPointConstraint: """ Hold data for multi point constraint relation ships, including new index maps for local assembly of matrices and vectors. Args: V: The function space dtype: The dtype of the underlying functions """ _slaves: npt.NDArray[numpy.int32] _masters: npt.NDArray[numpy.int64] _coeffs: _float_array_types _owners: npt.NDArray[numpy.int32] _offsets: npt.NDArray[numpy.int32] V: _fem.FunctionSpace finalized: bool _cpp_object: _mpc_classes _dtype: numpy.floating __slots__ = tuple(__annotations__) def __init__(self, V: _fem.FunctionSpace, dtype: numpy.floating = default_scalar_type): self._slaves = numpy.array([], dtype=numpy.int32) self._masters = numpy.array([], dtype=numpy.int64) self._coeffs = numpy.array([], dtype=dtype) # type: ignore self._owners = numpy.array([], dtype=numpy.int32) self._offsets = numpy.array([0], dtype=numpy.int32) self.V = V self.finalized = False self._dtype = dtype def add_constraint( self, V: _fem.FunctionSpace, slaves: npt.NDArray[numpy.int32], masters: npt.NDArray[numpy.int64], coeffs: _float_array_types, owners: npt.NDArray[numpy.int32], offsets: npt.NDArray[numpy.int32], ): """ Add new constraint given by numpy arrays. Args: V: The function space for the constraint slaves: List of all slave dofs (using local dof numbering) on this process masters: List of all master dofs (using global dof numbering) on this process coeffs: The coefficients corresponding to each master. owners: The process each master is owned by. offsets: Array indicating the location in the masters array for the i-th slave in the slaves arrays, i.e. .. highlight:: python .. code-block:: python masters_of_owned_slave[i] = masters[offsets[i]:offsets[i+1]] """ assert V == self.V self._already_finalized() if len(slaves) > 0: self._offsets = numpy.append(self._offsets, offsets[1:] + len(self._masters)) self._slaves = numpy.append(self._slaves, slaves) self._masters = numpy.append(self._masters, masters) self._coeffs = numpy.append(self._coeffs, coeffs) self._owners = numpy.append(self._owners, owners) def add_constraint_from_mpc_data(self, V: _fem.FunctionSpace, mpc_data: Union[_mpc_data_classes, MPCData]): """ Add new constraint given by an `dolfinc_mpc.cpp.mpc.mpc_data`-object """ self._already_finalized() self.add_constraint( V, mpc_data.slaves, mpc_data.masters, mpc_data.coeffs, mpc_data.owners, mpc_data.offsets, ) def finalize(self) -> None: """ Finializes the multi point constraint. After this function is called, no new constraints can be added to the constraint. This function creates a map from the cells (local to index) to the slave degrees of freedom and builds a new index map and function space where unghosted master dofs are added as ghosts. """ self._already_finalized() self._coeffs.astype(numpy.dtype(self._dtype)) # Initialize C++ object and create slave->cell maps if self._dtype == numpy.float32: self._cpp_object = dolfinx_mpc.cpp.mpc.MultiPointConstraint_float( self.V._cpp_object, self._slaves, self._masters, self._coeffs.astype(self._dtype), self._owners, self._offsets, ) elif self._dtype == numpy.float64: self._cpp_object = dolfinx_mpc.cpp.mpc.MultiPointConstraint_double( self.V._cpp_object, self._slaves, self._masters, self._coeffs.astype(self._dtype), self._owners, self._offsets, ) elif self._dtype == numpy.complex64: self._cpp_object = dolfinx_mpc.cpp.mpc.MultiPointConstraint_complex_float( self.V._cpp_object, self._slaves, self._masters, self._coeffs.astype(self._dtype), self._owners, self._offsets, ) elif self._dtype == numpy.complex128: self._cpp_object = dolfinx_mpc.cpp.mpc.MultiPointConstraint_complex_double( self.V._cpp_object, self._slaves, self._masters, self._coeffs.astype(self._dtype), self._owners, self._offsets, ) else: raise ValueError("Unsupported dtype {coeffs.dtype.type} for coefficients") # Replace function space self.V = _fem.FunctionSpace(self.V.mesh, self.V.ufl_element(), self._cpp_object.function_space) self.finalized = True # Delete variables that are no longer required del (self._slaves, self._masters, self._coeffs, self._owners, self._offsets) def create_periodic_constraint_topological( self, V: _fem.FunctionSpace, meshtag: _mesh.MeshTags, tag: int, relation: Callable[[numpy.ndarray], numpy.ndarray], bcs: List[_fem.DirichletBC], scale: _float_classes = default_scalar_type(1.0), ): """ Create periodic condition for all closure dofs of on all entities in `meshtag` with value `tag`. :math:`u(x_i) = scale * u(relation(x_i))` for all of :math:`x_i` on marked entities. Args: V: The function space to assign the condition to. Should either be the space of the MPC or a sub space. meshtag: MeshTag for entity to apply the periodic condition on tag: Tag indicating which entities should be slaves relation: Lambda-function describing the geometrical relation bcs: Dirichlet boundary conditions for the problem (Periodic constraints will be ignored for these dofs) scale: Float for scaling bc """ bcs_ = [bc._cpp_object for bc in bcs] if isinstance(scale, numpy.generic): # nanobind conversion of numpy dtypes to general Python types scale = scale.item() # type: ignore if V is self.V: mpc_data = dolfinx_mpc.cpp.mpc.create_periodic_constraint_topological( self.V._cpp_object, meshtag._cpp_object, tag, relation, bcs_, scale, False ) elif self.V.contains(V): mpc_data = dolfinx_mpc.cpp.mpc.create_periodic_constraint_topological( V._cpp_object, meshtag._cpp_object, tag, relation, bcs_, scale, True ) else: raise RuntimeError("The input space has to be a sub space (or the full space) of the MPC") self.add_constraint_from_mpc_data(self.V, mpc_data=mpc_data) def create_periodic_constraint_geometrical( self, V: _fem.FunctionSpace, indicator: Callable[[numpy.ndarray], numpy.ndarray], relation: Callable[[numpy.ndarray], numpy.ndarray], bcs: List[_fem.DirichletBC], scale: _float_classes = default_scalar_type(1.0), ): """ Create a periodic condition for all degrees of freedom whose physical location satisfies :math:`indicator(x_i)==True`, i.e. :math:`u(x_i) = scale * u(relation(x_i))` for all :math:`x_i` Args: V: The function space to assign the condition to. Should either be the space of the MPC or a sub space. indicator: Lambda-function to locate degrees of freedom that should be slaves relation: Lambda-function describing the geometrical relation to master dofs bcs: Dirichlet boundary conditions for the problem (Periodic constraints will be ignored for these dofs) scale: Float for scaling bc """ if isinstance(scale, numpy.generic): # nanobind conversion of numpy dtypes to general Python types scale = scale.item() # type: ignore bcs = [] if bcs is None else [bc._cpp_object for bc in bcs] if V is self.V: mpc_data = dolfinx_mpc.cpp.mpc.create_periodic_constraint_geometrical( self.V._cpp_object, indicator, relation, bcs, scale, False ) elif self.V.contains(V): mpc_data = dolfinx_mpc.cpp.mpc.create_periodic_constraint_geometrical( V._cpp_object, indicator, relation, bcs, scale, True ) else: raise RuntimeError("The input space has to be a sub space (or the full space) of the MPC") self.add_constraint_from_mpc_data(self.V, mpc_data=mpc_data) def create_slip_constraint( self, space: _fem.FunctionSpace, facet_marker: Tuple[_mesh.MeshTags, int], v: _fem.Function, bcs: List[_fem.DirichletBC] = [], ): """ Create a slip constraint :math:`u \\cdot v=0` over the entities defined in `facet_marker` with the given index. Args: space: Function space (possible sub space) for the current constraint facet_marker: Tuple containomg the mesh tag and marker used to locate degrees of freedom v: Function containing the directional vector to dot your slip condition (most commonly a normal vector) bcs: List of Dirichlet BCs (slip conditions will be ignored on these dofs) Examples: Create constaint :math:`u\\cdot n=0` of all indices in `mt` marked with `i` .. highlight:: python .. code-block:: python V = dolfinx.fem.functionspace(mesh, ("CG", 1)) mpc = MultiPointConstraint(V) n = dolfinx.fem.Function(V) mpc.create_slip_constaint(V, (mt, i), n) Create slip constaint for a mixed function space: .. highlight:: python .. code-block:: python cellname = mesh.ufl_cell().cellname() Ve = basix.ufl.element(basix.ElementFamily.P, cellname , 2, shape=(mesh.geometry.dim,)) Qe = basix.ufl.element(basix.ElementFamily.P, cellname , 1) me = basix.ufl.mixed_element([Ve, Qe]) W = dolfinx.fem.functionspace(mesh, me) mpc = MultiPointConstraint(W) n_space, _ = W.sub(0).collapse() normal = dolfinx.fem.Function(n_space) mpc.create_slip_constraint(W.sub(0), (mt, i), normal, bcs=[]) A slip condition cannot be applied on the same degrees of freedom as a Dirichlet BC, and therefore any Dirichlet bc for the space of the multi point constraint should be supplied. .. highlight:: python .. code-block:: python cellname = mesh.ufl_cell().cellname() Ve = basix.ufl.element(basix.ElementFamily.P, cellname , 2, shape=(mesh.geometry.dim,)) Qe = basix.ufl.element(basix.ElementFamily.P, cellname , 1) me = basix.ufl.mixed_element([Ve, Qe]) W = dolfinx.fem.functionspace(mesh, me) mpc = MultiPointConstraint(W) n_space, _ = W.sub(0).collapse() normal = Function(n_space) bc = dolfinx.fem.dirichletbc(inlet_velocity, dofs, W.sub(0)) mpc.create_slip_constraint(W.sub(0), (mt, i), normal, bcs=[bc]) """ bcs = [] if bcs is None else [bc._cpp_object for bc in bcs] if space is self.V: sub_space = False elif self.V.contains(space): sub_space = True else: raise ValueError("Input space has to be a sub space of the MPC space") mpc_data = dolfinx_mpc.cpp.mpc.create_slip_condition( space._cpp_object, facet_marker[0]._cpp_object, facet_marker[1], v._cpp_object, bcs, sub_space, ) self.add_constraint_from_mpc_data(self.V, mpc_data=mpc_data) def create_general_constraint( self, slave_master_dict: Dict[bytes, Dict[bytes, float]], subspace_slave: Optional[int] = None, subspace_master: Optional[int] = None, ): """ Args: V: The function space slave_master_dict: Nested dictionary, where the first key is the bit representing the slave dof's coordinate in the mesh. The item of this key is a dictionary, where each key of this dictionary is the bit representation of the master dof's coordinate, and the item the coefficient for the MPC equation. subspace_slave: If using mixed or vector space, and only want to use dofs from a sub space as slave add index here subspace_master: Subspace index for mixed or vector spaces Example: If the dof `D` located at `[d0, d1]` should be constrained to the dofs `E` and `F` at `[e0, e1]` and `[f0, f1]` as :math:`D = \\alpha E + \\beta F` the dictionary should be: .. highlight:: python .. code-block:: python {numpy.array([d0, d1], dtype=mesh.geometry.x.dtype).tobytes(): {numpy.array([e0, e1], dtype=mesh.geometry.x.dtype).tobytes(): alpha, numpy.array([f0, f1], dtype=mesh.geometry.x.dtype).tobytes(): beta}} """ slaves, masters, coeffs, owners, offsets = create_dictionary_constraint( self.V, slave_master_dict, subspace_slave, subspace_master ) self.add_constraint(self.V, slaves, masters, coeffs, owners, offsets) def create_contact_slip_condition( self, meshtags: _mesh.MeshTags, slave_marker: int, master_marker: int, normal: _fem.Function, eps2: float = 1e-20, ): """ Create a slip condition between two sets of facets marker with individual markers. The interfaces should be within machine precision of eachother, but the vertices does not need to align. The condition created is :math:`u_s \\cdot normal_s = u_m \\cdot normal_m` where `s` is the restriction to the slave facets, `m` to the master facets. Args: meshtags: The meshtags of the set of facets to tie together slave_marker: The marker of the slave facets master_marker: The marker of the master facets normal: The function used in the dot-product of the constraint eps2: The tolerance for the squared distance between cells to be considered as a collision """ if isinstance(eps2, numpy.generic): # nanobind conversion of numpy dtypes to general Python types eps2 = eps2.item() # type: ignore mpc_data = dolfinx_mpc.cpp.mpc.create_contact_slip_condition( self.V._cpp_object, meshtags._cpp_object, slave_marker, master_marker, normal._cpp_object, eps2, ) self.add_constraint_from_mpc_data(self.V, mpc_data) def create_contact_inelastic_condition( self, meshtags: _cpp.mesh.MeshTags_int32, slave_marker: int, master_marker: int, eps2: float = 1e-20, ): """ Create a contact inelastic condition between two sets of facets marker with individual markers. The interfaces should be within machine precision of eachother, but the vertices does not need to align. The condition created is :math:`u_s = u_m` where `s` is the restriction to the slave facets, `m` to the master facets. Args: meshtags: The meshtags of the set of facets to tie together slave_marker: The marker of the slave facets master_marker: The marker of the master facets eps2: The tolerance for the squared distance between cells to be considered as a collision """ if isinstance(eps2, numpy.generic): # nanobind conversion of numpy dtypes to general Python types eps2 = eps2.item() # type: ignore mpc_data = dolfinx_mpc.cpp.mpc.create_contact_inelastic_condition( self.V._cpp_object, meshtags._cpp_object, slave_marker, master_marker, eps2 ) self.add_constraint_from_mpc_data(self.V, mpc_data) @property def is_slave(self) -> numpy.ndarray: """ Returns a vector of integers where the ith entry indicates if a degree of freedom (local to process) is a slave. """ self._not_finalized() return self._cpp_object.is_slave @property def slaves(self): """ Returns the degrees of freedom for all slaves local to process """ self._not_finalized() return self._cpp_object.slaves @property def masters(self) -> _cpp.graph.AdjacencyList_int32: """ Returns an adjacency-list whose ith node corresponds to a degree of freedom (local to process), and links the corresponding master dofs (local to process). Examples: .. highlight:: python .. code-block:: python masters = mpc.masters masters_of_dof_i = masters.links(i) """ self._not_finalized() return self._cpp_object.masters def coefficients(self) -> _float_array_types: """ Returns a vector containing the coefficients for the constraint, and the corresponding offsets for the ith degree of freedom. Examples: .. highlight:: python .. code-block:: python coeffs, offsets = mpc.coefficients() coeffs_of_slave_i = coeffs[offsets[i]:offsets[i+1]] """ self._not_finalized() return self._cpp_object.coefficients() @property def num_local_slaves(self): """ Return the number of slaves owned by the current process. """ self._not_finalized() return self._cpp_object.num_local_slaves @property def cell_to_slaves(self): """ Returns an `dolfinx.cpp.graph.AdjacencyList_int32` whose ith node corresponds to the ith cell (local to process), and links the corresponding slave degrees of freedom in the cell (local to process). Examples: .. highlight:: python .. code-block:: python cell_to_slaves = mpc.cell_to_slaves() slaves_in_cell_i = cell_to_slaves.links(i) """ self._not_finalized() return self._cpp_object.cell_to_slaves @property def function_space(self): """ Return the function space for the multi-point constraint with the updated index map """ self._not_finalized() return self.V def backsubstitution(self, u: Union[_fem.Function, _PETSc.Vec]) -> None: # type: ignore """ For a Function, impose the multi-point constraint by backsubstiution. This function is used after solving the reduced problem to obtain the values at the slave degrees of freedom .. note:: It is the users responsibility to destroy the PETSc vector Args: u: The input function """ try: self._cpp_object.backsubstitution(u.x.array) u.x.scatter_forward() except AttributeError: with u.localForm() as vector_local: self._cpp_object.backsubstitution(vector_local.array_w) u.ghostUpdate(addv=_PETSc.InsertMode.INSERT, mode=_PETSc.ScatterMode.FORWARD) # type: ignore def homogenize(self, u: _fem.Function) -> None: """ For a vector, homogenize (set to zero) the vector components at the multi-point constraint slave DoF indices. This is particularly useful for nonlinear problems. Args: u: The input vector """ self._cpp_object.homogenize(u.x.array) u.x.scatter_forward() def _already_finalized(self): """ Check if we have already finalized the multi point constraint """ if self.finalized: raise RuntimeError("MultiPointConstraint has already been finalized") def _not_finalized(self): """ Check if we have finalized the multi point constraint """ if not self.finalized: raise RuntimeError("MultiPointConstraint has not been finalized") dolfinx_mpc-0.9.1/python/dolfinx_mpc/numba/000077500000000000000000000000001476141270300207555ustar00rootroot00000000000000dolfinx_mpc-0.9.1/python/dolfinx_mpc/numba/__init__.py000066400000000000000000000007311476141270300230670ustar00rootroot00000000000000# Copyright (C) 2021 Jørgen Schartum Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT """Numba extension for dolfinx_mpc""" # flake8: noqa from __future__ import annotations try: import numba except ModuleNotFoundError: raise ModuleNotFoundError("Numba is required to use numba assembler") from .assemble_matrix import assemble_matrix from .assemble_vector import assemble_vector __all__ = ["assemble_matrix", "assemble_vector"] dolfinx_mpc-0.9.1/python/dolfinx_mpc/numba/assemble_matrix.py000066400000000000000000000517401476141270300245150ustar00rootroot00000000000000# Copyright (C) 2020-2021 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from typing import List, Optional, Tuple, Union from petsc4py import PETSc as _PETSc import cffi import dolfinx import dolfinx.cpp as _cpp import dolfinx.fem as _fem import numpy import numpy.typing as npt from dolfinx.common import Timer import numba from dolfinx_mpc.assemble_matrix import create_sparsity_pattern from dolfinx_mpc.multipointconstraint import MultiPointConstraint from .helpers import _bcs, _forms, extract_slave_cells, pack_slave_facet_info from .numba_setup import initialize_petsc, sink mode = _PETSc.InsertMode.ADD_VALUES # type: ignore insert = _PETSc.InsertMode.INSERT_VALUES # type: ignore ffi, set_values_local = initialize_petsc() def assemble_matrix( form: _forms, constraint: MultiPointConstraint, bcs: Optional[List[_bcs]] = None, diagval: _PETSc.ScalarType = 1.0, # type: ignore A: Optional[_PETSc.Mat] = None, # type: ignore ): """ Assembles a compiled DOLFINx form with given a multi point constraint and possible Dirichlet boundary conditions. NOTE: Strong Dirichlet conditions cannot be on master dofs. Args: form: The compiled bilinear form constraint: The multi point constraint bcs: List of Dirichlet boundary conditions diagval: Value to set on the diagonal of the matrix A: PETSc matrix to assemble into (optional) """ timer_matrix = Timer("~MPC: Assemble matrix (numba)") V = constraint.function_space dofmap = V.dofmap dofs = dofmap.list # Pack MPC data for numba kernels coefficients = constraint.coefficients()[0] masters_adj = constraint.masters c_to_s_adj = constraint.cell_to_slaves cell_to_slave = c_to_s_adj.array c_to_s_off = c_to_s_adj.offsets is_slave = constraint.is_slave mpc_data = ( masters_adj.array, coefficients, masters_adj.offsets, cell_to_slave, c_to_s_off, is_slave, ) slave_cells = extract_slave_cells(c_to_s_off) # Create 1D bc indicator for matrix assembly num_dofs_local = (dofmap.index_map.size_local + dofmap.index_map.num_ghosts) * dofmap.index_map_bs is_bc = numpy.zeros(num_dofs_local, dtype=bool) bcs = [] if bcs is None else [bc._cpp_object for bc in bcs] if len(bcs) > 0: for bc in bcs: is_bc[bc.dof_indices()[0]] = True # Get data from mesh x_dofs = V.mesh.geometry.dofmap x = V.mesh.geometry.x # Pack constants and coefficients form_coeffs = _cpp.fem.pack_coefficients(form._cpp_object) form_consts = _cpp.fem.pack_constants(form._cpp_object) # Create sparsity pattern and matrix if not supplied if A is None: pattern = create_sparsity_pattern(form, constraint) pattern.finalize() A = _cpp.la.petsc.create_matrix(V.mesh.comm, pattern) A.zeroEntries() # Assemble the matrix with all entries _cpp.fem.petsc.assemble_matrix(A, form._cpp_object, form_consts, form_coeffs, bcs, False) # General assembly data block_size = dofmap.dof_layout.block_size num_dofs_per_element = dofmap.dof_layout.num_dofs tdim = V.mesh.topology.dim # Assemble over cells subdomain_ids = form._cpp_object.integral_ids(_fem.IntegralType.cell) num_cell_integrals = len(subdomain_ids) e0 = form.function_spaces[0].element e1 = form.function_spaces[1].element # Get dof transformations needs_transformation_data = ( e0.needs_dof_transformations or e1.needs_dof_transformations or form._cpp_object.needs_facet_permutations ) cell_perms = numpy.array([], dtype=numpy.uint32) if needs_transformation_data: V.mesh.topology.create_entity_permutations() cell_perms = V.mesh.topology.get_cell_permutation_info() # NOTE: Here we need to add the apply_dof_transformation and apply_dof_transformation transpose functions # to support more exotic elements if e0.needs_dof_transformations or e1.needs_dof_transformations: raise NotImplementedError("Dof transformations not implemented") if _PETSc.ScalarType == numpy.float32: # type: ignore nptype = "float32" elif _PETSc.ScalarType == numpy.float64: # type: ignore nptype = "float64" elif _PETSc.ScalarType == numpy.complex64: # type: ignore nptype = "complex64" elif _PETSc.ScalarType == numpy.complex128: # type: ignore nptype = "complex128" else: raise RuntimeError(f"Unsupported scalar type {_PETSc.ScalarType}.") # type: ignore ufcx_form = form.ufcx_form if num_cell_integrals > 0: # NOTE: This depends on enum ordering in ufcx.h cell_form_pos = ufcx_form.form_integral_offsets[0] V.mesh.topology.create_entity_permutations() for i, id in enumerate(subdomain_ids): coeffs_i = form_coeffs[(_fem.IntegralType.cell, id)] cell_kernel = getattr(ufcx_form.form_integrals[cell_form_pos + i], f"tabulate_tensor_{nptype}") active_cells = form._cpp_object.domains(_fem.IntegralType.cell, id) assemble_slave_cells( A.handle, cell_kernel, active_cells[numpy.isin(active_cells, slave_cells)], (x_dofs, x), coeffs_i, form_consts, cell_perms, dofs, block_size, num_dofs_per_element, mpc_data, is_bc, ) # Assemble over exterior facets subdomain_ids = form._cpp_object.integral_ids(_fem.IntegralType.exterior_facet) num_exterior_integrals = len(subdomain_ids) if num_exterior_integrals > 0: V.mesh.topology.create_entities(tdim - 1) V.mesh.topology.create_connectivity(tdim - 1, tdim) # Get facet permutations if required facet_perms = numpy.array([], dtype=numpy.uint8) if form._cpp_object.needs_facet_permutations: facet_perms = V.mesh.topology.get_facet_permutations() perm = (cell_perms, form._cpp_object.needs_facet_permutations, facet_perms) # NOTE: This depends on enum ordering in ufcx.h ext_facet_pos = ufcx_form.form_integral_offsets[1] for i, id in enumerate(subdomain_ids): facet_kernel = getattr(ufcx_form.form_integrals[ext_facet_pos + i], f"tabulate_tensor_{nptype}") facets = form._cpp_object.domains(_fem.IntegralType.exterior_facet, id) coeffs_i = form_coeffs[(_fem.IntegralType.exterior_facet, id)] facet_info = pack_slave_facet_info(facets, slave_cells) num_facets_per_cell = len(V.mesh.topology.connectivity(tdim, tdim - 1).links(0)) assemble_exterior_slave_facets( A.handle, facet_kernel, (x_dofs, x), coeffs_i, form_consts, perm, dofs, block_size, num_dofs_per_element, facet_info, mpc_data, is_bc, num_facets_per_cell, ) # Add mpc entries on diagonal slaves = constraint.slaves num_local_slaves = constraint.num_local_slaves add_diagonal(A.handle, slaves[:num_local_slaves], diagval) # Add one on diagonal for diriclet bc and slave dofs # NOTE: In the future one could use a constant in the dirichletbc if form.function_spaces[0] is form.function_spaces[1]: A.assemblyBegin(_PETSc.Mat.AssemblyType.FLUSH) # type: ignore A.assemblyEnd(_PETSc.Mat.AssemblyType.FLUSH) # type: ignore _cpp.fem.petsc.insert_diagonal(A, form.function_spaces[0], bcs, diagval) A.assemble() timer_matrix.stop() return A @numba.njit def add_diagonal(A: int, dofs: numba.int32[:], diagval: _PETSc.ScalarType = 1): # type: ignore """ Insert value on diagonal of matrix for given dofs. """ ffi_fb = ffi.from_buffer dof_list = numpy.zeros(1, dtype=numpy.int32) dof_value = numpy.full(1, diagval, dtype=_PETSc.ScalarType) # type: ignore for dof in dofs: dof_list[0] = dof ierr_loc = set_values_local(A, 1, ffi_fb(dof_list), 1, ffi_fb(dof_list), ffi_fb(dof_value), mode) # type: ignore assert ierr_loc == 0 sink(dof_list, dof_value) @numba.njit def assemble_slave_cells( A: int, kernel: cffi.FFI.CData, active_cells: numba.int32[:], mesh: Tuple[numba.int32[:, :], Union[numba.float64, numba.float32]], coeffs: Union[numba.float32[:, :], numba.float64[:, :], numba.complex64[:, :], numba.complex128[:, :]], constants: Union[numba.float32[:], numba.float64[:], numba.complex64[:], numba.complex128[:]], permutation_info: numba.uint32[:], dofmap: numba.int32[:, :], block_size: int, num_dofs_per_element: int, mpc: Tuple[ # type: ignore numba.int32[:], Union[numba.float32[:], numba.float64[:], numba.complex64[:], numba.complex128[:]], numba.int32[:], numba.int32[:], numba.int32[:], numba.int32[:], ], is_bc: numba.bool_[:], ): """ Assemble MPC contributions for cell integrals """ ffi_fb = ffi.from_buffer # Get mesh and geometry data x_dofmap, x = mesh # Empty arrays mimicking Nullpointers facet_index = numpy.zeros(0, dtype=numpy.intc) facet_perm = numpy.zeros(0, dtype=numpy.uint8) # NOTE: All cells are assumed to be of the same type num_xdofs_per_cell = x_dofmap.shape[1] geometry = numpy.zeros((num_xdofs_per_cell, 3), dtype=dolfinx.default_real_type) A_local = numpy.zeros( (block_size * num_dofs_per_element, block_size * num_dofs_per_element), dtype=_PETSc.ScalarType, # type: ignore ) masters, coefficients, offsets, c_to_s, c_to_s_off, is_slave = mpc # Loop over all cells local_dofs = numpy.zeros(block_size * num_dofs_per_element, dtype=numpy.int32) for cell in active_cells: # Compute vertices of cell from mesh data geometry[:, :] = x[x_dofmap[cell]] # Assemble local contributions A_local.fill(0.0) kernel( ffi_fb(A_local), # type: ignore ffi_fb(coeffs[cell, :]), ffi_fb(constants), ffi_fb(geometry), # type: ignore ffi_fb(facet_index), # type: ignore ffi_fb(facet_perm), # type: ignore ) # NOTE: Here we need to apply dof transformations local_blocks = dofmap[cell] # Remove all contributions for dofs that are in the Dirichlet bcs for j in range(num_dofs_per_element): for k in range(block_size): if is_bc[local_blocks[j] * block_size + k]: A_local[j * block_size + k, :] = 0 A_local[:, j * block_size + k] = 0 A_local_copy: numpy.typing.NDArray[_PETSc.ScalarType] = A_local.copy() # type: ignore # Find local position of slaves slaves = c_to_s[c_to_s_off[cell] : c_to_s_off[cell + 1]] mpc_cell = (slaves, masters, coefficients, offsets, is_slave) modify_mpc_cell(A, num_dofs_per_element, block_size, A_local, local_blocks, mpc_cell) # Remove already assembled contribution to matrix A_contribution = A_local - A_local_copy # Expand local blocks to dofs for i in range(num_dofs_per_element): for j in range(block_size): local_dofs[i * block_size + j] = local_blocks[i] * block_size + j # Insert local contribution ierr_loc = set_values_local( A, block_size * num_dofs_per_element, ffi_fb(local_dofs), # type: ignore block_size * num_dofs_per_element, ffi_fb(local_dofs), # type: ignore ffi_fb(A_contribution), # type: ignore mode, ) assert ierr_loc == 0 sink(A_contribution, local_dofs) @numba.njit def modify_mpc_cell( A: int, num_dofs: int, block_size: int, Ae: Union[numba.float32[:, :], numba.float64[:, :], numba.complex128[:, :], numba.complex64[:, :]], local_blocks: numba.int32[:], mpc_cell: Tuple[ # type: ignore numba.int32[:], numba.int32[:], Union[numba.float32[:], numba.float64[:], numba.complex64[:], numba.complex128[:]], numba.int32[:], numba.int8[:], ], ): """ Given an element matrix Ae, modify the contributions to respect the MPCs, and add contributions to appropriate places in the global matrix A. """ slaves, masters, coefficients, offsets, is_slave = mpc_cell # Locate which local dofs are slave dofs and compute the local index of the slave # Additionally count the number of masters we will needed in the flattened structures local_index0 = numpy.empty(len(slaves), dtype=numpy.int32) num_flattened_masters = 0 for i in range(num_dofs): for j in range(block_size): slave = local_blocks[i] * block_size + j if is_slave[slave]: location = numpy.flatnonzero(slaves == slave)[0] local_index0[location] = i * block_size + j num_flattened_masters += offsets[slave + 1] - offsets[slave] # Strip a copy of Ae of all columns and rows belonging to a slave Ae_original = numpy.copy(Ae) Ae_stripped = numpy.zeros((block_size * num_dofs, block_size * num_dofs), dtype=_PETSc.ScalarType) # type: ignore for i in range(num_dofs): for b in range(block_size): is_slave0 = is_slave[local_blocks[i] * block_size + b] for j in range(num_dofs): for c in range(block_size): is_slave1 = is_slave[local_blocks[j] * block_size + c] Ae_stripped[i * block_size + b, j * block_size + c] = (not (is_slave0 and is_slave1)) * Ae_original[ i * block_size + b, j * block_size + c ] flattened_masters = numpy.zeros(num_flattened_masters, dtype=numpy.int32) flattened_slaves = numpy.zeros(num_flattened_masters, dtype=numpy.int32) flattened_coeffs = numpy.zeros(num_flattened_masters, dtype=_PETSc.ScalarType) # type: ignore c = 0 for i, slave in enumerate(slaves): local_masters = masters[offsets[slave] : offsets[slave + 1]] local_coeffs = coefficients[offsets[slave] : offsets[slave + 1]] num_masters = len(local_masters) for j in range(num_masters): flattened_slaves[c + j] = local_index0[i] flattened_masters[c + j] = local_masters[j] flattened_coeffs[c + j] = local_coeffs[j] c += num_masters m0 = numpy.zeros(1, dtype=numpy.int32) m1 = numpy.zeros(1, dtype=numpy.int32) Am0m1 = numpy.zeros((1, 1), dtype=_PETSc.ScalarType) # type: ignore Arow = numpy.zeros(block_size * num_dofs, dtype=_PETSc.ScalarType) # type: ignore Acol = numpy.zeros(block_size * num_dofs, dtype=_PETSc.ScalarType) # type: ignore mpc_dofs = numpy.zeros(block_size * num_dofs, dtype=numpy.int32) ffi_fb = ffi.from_buffer for i in range(num_flattened_masters): local_index = flattened_slaves[i] master = flattened_masters[i] coeff = flattened_coeffs[i] Ae[:, local_index] = 0 Ae[local_index, :] = 0 m0[0] = master Arow[:] = coeff * Ae_stripped[:, local_index] Acol[:] = coeff * Ae_stripped[local_index, :] Am0m1[0, 0] = coeff**2 * Ae_original[local_index, local_index] for j in range(num_dofs): for k in range(block_size): mpc_dofs[j * block_size + k] = local_blocks[j] * block_size + k mpc_dofs[local_index] = master ierr_row = set_values_local( A, block_size * num_dofs, ffi_fb(mpc_dofs), # type: ignore 1, ffi_fb(m0), # type: ignore ffi_fb(Arow), # type: ignore mode, ) assert ierr_row == 0 # Add slave row to master row ierr_col = set_values_local( A, 1, ffi_fb(m0), # type: ignore block_size * num_dofs, ffi_fb(mpc_dofs), # type: ignore ffi_fb(Acol), # type: ignore mode, ) assert ierr_col == 0 ierr_master = set_values_local(A, 1, ffi_fb(m0), 1, ffi_fb(m0), ffi_fb(Am0m1), mode) # type: ignore assert ierr_master == 0 # Add contributions for other masters relating to slaves on the given cell for j in range(num_flattened_masters): if i == j: continue other_local_index = flattened_slaves[j] other_master = flattened_masters[j] other_coeff = flattened_coeffs[j] m1[0] = other_master Am0m1[0, 0] = coeff * other_coeff * Ae_original[local_index, other_local_index] ierr_other_masters = set_values_local(A, 1, ffi_fb(m0), 1, ffi_fb(m1), ffi_fb(Am0m1), mode) # type: ignore assert ierr_other_masters == 0 sink(Arow, Acol, Am0m1, m0, m1, mpc_dofs) @numba.njit def assemble_exterior_slave_facets( A: int, kernel: cffi.FFI.CData, mesh: Tuple[numba.int32[:, :], Union[numba.float64, numba.float32]], coeffs: Union[numba.float32[:, :], numba.float64[:, :], numba.complex64[:, :], numba.complex128[:, :]], consts: Union[numba.float32[:], numba.float64[:], numba.complex64[:], numba.complex128[:]], perm: Tuple[numba.uint32[:], bool, numba.uint8[:]], dofmap: numba.int32[:, :], block_size: int, num_dofs_per_element: int, facet_info: numba.int32[:, 2], mpc: Tuple[ numba.int32[:], Union[numba.float32[:], numba.float64[:], numba.complex64[:], numba.complex128[:]], numba.int32[:], numba.int32[:], numba.int32[:], numba.int32[:], ], is_bc: npt.NDArray[numpy.bool_], num_facets_per_cell: int, ): """Assemble MPC contributions over exterior facet integrals""" # Unpack mpc data masters, coefficients, offsets, c_to_s, c_to_s_off, is_slave = mpc # Mesh data x_dofmap, x = mesh # Empty arrays for facet information facet_index = numpy.zeros(1, dtype=numpy.int32) facet_perm = numpy.zeros(1, dtype=numpy.uint8) # NOTE: All cells are assumed to be of the same type geometry = numpy.zeros((x_dofmap.shape[1], 3), dtype=x.dtype) # Numpy data used in facet loop A_local = numpy.zeros( (num_dofs_per_element * block_size, num_dofs_per_element * block_size), dtype=_PETSc.ScalarType, # type: ignore ) local_dofs = numpy.zeros(block_size * num_dofs_per_element, dtype=numpy.int32) # Permutation info cell_perms, needs_facet_perm, facet_perms = perm # Loop over all external facets that are active for i in range(facet_info.shape[0]): # Get cell index (local to process) and facet index (local to cell) cell_index, local_facet = facet_info[i] # Get mesh geometry facet_index[0] = local_facet geometry[:, :] = x[x_dofmap[cell_index]] # Assemble local matrix A_local.fill(0.0) if needs_facet_perm: facet_perm[0] = facet_perms[cell_index * num_facets_per_cell + local_facet] kernel( ffi.from_buffer(A_local), # type: ignore ffi.from_buffer(coeffs[cell_index, :]), # type: ignore ffi.from_buffer(consts), # type: ignore ffi.from_buffer(geometry), # type: ignore ffi.from_buffer(facet_index), # type: ignore ffi.from_buffer(facet_perm), # type: ignore ) # NOTE: Here we need to add the apply_dof_transformation and apply_dof_transformation transpose functions # Extract local blocks of dofs local_blocks = dofmap[cell_index] # Remove all contributions for dofs that are in the Dirichlet bcs for j in range(num_dofs_per_element): for k in range(block_size): if is_bc[local_blocks[j] * block_size + k]: A_local[j * block_size + k, :] = 0 A_local[:, j * block_size + k] = 0 A_local_copy: numpy.typing.NDArray[_PETSc.ScalarType] = A_local.copy() # type: ignore slaves = c_to_s[c_to_s_off[cell_index] : c_to_s_off[cell_index + 1]] mpc_cell = (slaves, masters, coefficients, offsets, is_slave) modify_mpc_cell(A, num_dofs_per_element, block_size, A_local, local_blocks, mpc_cell) # Remove already assembled contribution to matrix A_contribution = A_local - A_local_copy # Expand local blocks to dofs for i in range(num_dofs_per_element): for j in range(block_size): local_dofs[i * block_size + j] = local_blocks[i] * block_size + j # Insert local contribution ierr_loc = set_values_local( A, block_size * num_dofs_per_element, ffi.from_buffer(local_dofs), # type: ignore block_size * num_dofs_per_element, ffi.from_buffer(local_dofs), # type: ignore ffi.from_buffer(A_contribution), # type: ignore mode, ) assert ierr_loc == 0 sink(A_contribution, local_dofs) dolfinx_mpc-0.9.1/python/dolfinx_mpc/numba/assemble_vector.py000066400000000000000000000317061476141270300245130ustar00rootroot00000000000000# Copyright (C) 2021 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from typing import Optional, Tuple from petsc4py import PETSc as _PETSc import cffi import dolfinx import dolfinx.cpp as _cpp import dolfinx.fem as _fem import dolfinx.la as _la import dolfinx.log as _log import numpy import numpy.typing as npt from dolfinx.common import Timer import numba from dolfinx_mpc.multipointconstraint import MultiPointConstraint from .helpers import _forms, extract_slave_cells, pack_slave_facet_info from .numba_setup import initialize_petsc ffi, _ = initialize_petsc() def assemble_vector(form: _forms, constraint: MultiPointConstraint, b: Optional[_PETSc.Vec] = None) -> _PETSc.Vec: # type: ignore """ Assemble a compiled DOLFINx form into vector b. Args: form: The complied linear form constraint: The multi point constraint b: PETSc vector to assemble into (optional) """ _log.log(_log.LogLevel.INFO, "Assemble MPC vector") timer_vector = Timer("~MPC: Assemble vector (numba)") # Unpack Function space data V = form.function_spaces[0] x_dofs = V.mesh.geometry.dofmap x = V.mesh.geometry.x dofs = V.dofmap.map() block_size = V.dofmap.index_map_bs # Data from multipointconstraint coefficients = constraint.coefficients()[0] masters_adj = constraint.masters c_to_s_adj = constraint.cell_to_slaves cell_to_slave = c_to_s_adj.array c_to_s_off = c_to_s_adj.offsets is_slave = constraint.is_slave mpc_data = ( masters_adj.array, coefficients, masters_adj.offsets, cell_to_slave, c_to_s_off, is_slave, ) slave_cells = extract_slave_cells(c_to_s_off) # Get index map and ghost info if b is None: index_map = constraint.function_space.dofmap.index_map vector = _la.create_petsc_vector(index_map, block_size) else: vector = b # Pack constants and coefficients form_coeffs = _cpp.fem.pack_coefficients(form._cpp_object) form_consts = _cpp.fem.pack_constants(form._cpp_object) tdim = V.mesh.topology.dim num_dofs_per_element = V.dofmap.dof_layout.num_dofs # Assemble vector with all entries with vector.localForm() as b_local: _cpp.fem.assemble_vector(b_local.array_w, form._cpp_object, form_consts, form_coeffs) # Check if we need facet permutations # FIXME: access apply_dof_transformations here e0 = form.function_spaces[0].element needs_transformation_data = e0.needs_dof_transformations or form._cpp_object.needs_facet_permutations cell_perms = numpy.array([], dtype=numpy.uint32) if needs_transformation_data: V.mesh.topology.create_entity_permutations() cell_perms = V.mesh.topology.get_cell_permutation_info() if e0.needs_dof_transformations: raise NotImplementedError("Dof transformations not implemented") # Assemble over cells subdomain_ids = form._cpp_object.integral_ids(_fem.IntegralType.cell) num_cell_integrals = len(subdomain_ids) if _PETSc.ScalarType == numpy.float32: # type: ignore nptype = "float32" elif _PETSc.ScalarType == numpy.float64: # type: ignore nptype = "float64" elif _PETSc.ScalarType == numpy.complex64: # type: ignore nptype = "complex64" elif _PETSc.ScalarType == numpy.complex128: # type: ignore nptype = "complex128" else: raise RuntimeError(f"Unsupported scalar type {_PETSc.ScalarType}.") # type: ignore ufcx_form = form.ufcx_form if num_cell_integrals > 0: V.mesh.topology.create_entity_permutations() # NOTE: This depends on enum ordering in ufcx.h cell_form_pos = ufcx_form.form_integral_offsets[0] for i, id in enumerate(subdomain_ids): cell_kernel = getattr(ufcx_form.form_integrals[cell_form_pos + i], f"tabulate_tensor_{nptype}") active_cells = form._cpp_object.domains(_fem.IntegralType.cell, id) coeffs_i = form_coeffs[(_fem.IntegralType.cell, id)] with vector.localForm() as b: assemble_cells( numpy.asarray(b), cell_kernel, active_cells[numpy.isin(active_cells, slave_cells)], (x_dofs, x), coeffs_i, form_consts, cell_perms, dofs, block_size, num_dofs_per_element, mpc_data, ) # Assemble exterior facet integrals subdomain_ids = form._cpp_object.integral_ids(_fem.IntegralType.exterior_facet) num_exterior_integrals = len(subdomain_ids) if num_exterior_integrals > 0: V.mesh.topology.create_entities(tdim - 1) V.mesh.topology.create_connectivity(tdim - 1, tdim) # Get facet permutations if required facet_perms = numpy.array([], dtype=numpy.uint8) if form._cpp_object.needs_facet_permutations: facet_perms = V.mesh.topology.get_facet_permutations() perm = (cell_perms, form._cpp_object.needs_facet_permutations, facet_perms) # NOTE: This depends on enum ordering in ufcx.h ext_facet_pos = ufcx_form.form_integral_offsets[1] for i, id in enumerate(subdomain_ids): facet_kernel = getattr(ufcx_form.form_integrals[ext_facet_pos + i], f"tabulate_tensor_{nptype}") coeffs_i = form_coeffs[(_fem.IntegralType.exterior_facet, id)] facets = form._cpp_object.domains(_fem.IntegralType.exterior_facet, id) facet_info = pack_slave_facet_info(facets, slave_cells) num_facets_per_cell = len(V.mesh.topology.connectivity(tdim, tdim - 1).links(0)) with vector.localForm() as b: assemble_exterior_slave_facets( numpy.asarray(b), facet_kernel, facet_info, (x_dofs, x), coeffs_i, form_consts, perm, dofs, block_size, num_dofs_per_element, mpc_data, num_facets_per_cell, ) timer_vector.stop() return vector @numba.njit def assemble_cells( b: npt.NDArray[_PETSc.ScalarType], # type: ignore kernel: cffi.FFI.CData, active_cells: npt.NDArray[numpy.int32], mesh: Tuple[npt.NDArray[numpy.int32], npt.NDArray[dolfinx.default_real_type]], coeffs: npt.NDArray[_PETSc.ScalarType], # type: ignore constants: npt.NDArray[_PETSc.ScalarType], # type: ignore permutation_info: npt.NDArray[numpy.uint32], dofmap: npt.NDArray[numpy.int32], block_size: int, num_dofs_per_element: int, mpc: Tuple[ # type: ignore npt.NDArray[numpy.int32], npt.NDArray[_PETSc.ScalarType], npt.NDArray[numpy.int32], npt.NDArray[numpy.int32], npt.NDArray[numpy.int32], npt.NDArray[numpy.int32], ], ): """Assemble additional MPC contributions for cell integrals""" ffi_fb = ffi.from_buffer # Empty arrays mimicking Nullpointers facet_index = numpy.zeros(0, dtype=numpy.int32) facet_perm = numpy.zeros(0, dtype=numpy.uint8) # Unpack mesh data x_dofmap, x = mesh # NOTE: All cells are assumed to be of the same typecd geometry = numpy.zeros((x_dofmap.shape[1], 3), dtype=dolfinx.default_real_type) b_local = numpy.zeros(block_size * num_dofs_per_element, dtype=_PETSc.ScalarType) # type: ignore for cell_index in active_cells: # Compute mesh geometry for cell geometry[:, :] = x[x_dofmap[cell_index]] # Assemble local element vector b_local.fill(0.0) kernel( ffi_fb(b_local), # type: ignore ffi_fb(coeffs[cell_index, :]), # type: ignore ffi_fb(constants), # type: ignore ffi_fb(geometry), # type: ignore ffi_fb(facet_index), # type: ignore ffi_fb(facet_perm), # type: ignore ) # NOTE: Here we need to add the apply_dof_transformation function # Modify global vector and local cell contributions b_local_copy = b_local.copy() modify_mpc_contributions(b, cell_index, b_local, b_local_copy, mpc, dofmap, block_size, num_dofs_per_element) for j in range(num_dofs_per_element): for k in range(block_size): position = dofmap[cell_index, j] * block_size + k b[position] += b_local[j * block_size + k] - b_local_copy[j * block_size + k] @numba.njit def assemble_exterior_slave_facets( b: npt.NDArray[_PETSc.ScalarType], # type: ignore kernel: cffi.FFI.CData, facet_info: npt.NDArray[numpy.int32], mesh: Tuple[npt.NDArray[numpy.int32], npt.NDArray[numpy.float64]], coeffs: npt.NDArray[_PETSc.ScalarType], # type: ignore constants: npt.NDArray[_PETSc.ScalarType], # type: ignore permutation_info: npt.NDArray[numpy.uint32], dofmap: npt.NDArray[numpy.int32], block_size: int, num_dofs_per_element: int, mpc: Tuple[ # type: ignore npt.NDArray[numpy.int32], npt.NDArray[_PETSc.ScalarType], npt.NDArray[numpy.int32], npt.NDArray[numpy.int32], npt.NDArray[numpy.int32], npt.NDArray[numpy.int32], ], num_facets_per_cell: int, ): """Assemble additional MPC contributions for facets""" ffi_fb = ffi.from_buffer # Unpack facet permutation info cell_perms, needs_facet_perm, facet_perms = permutation_info facet_index = numpy.zeros(1, dtype=numpy.int32) facet_perm = numpy.zeros(1, dtype=numpy.uint8) # Unpack mesh data x_dofmap, x = mesh geometry = numpy.zeros((x_dofmap.shape[1], 3), dtype=x.dtype) b_local = numpy.zeros(block_size * num_dofs_per_element, dtype=_PETSc.ScalarType) # type: ignore for i in range(facet_info.shape[0]): # Extract cell index (local to process) and facet index (local to cell) for kernel cell_index, local_facet = facet_info[i] facet_index[0] = local_facet # Extract cell geometry geometry[:, :] = x[x_dofmap[cell_index]] # Compute local facet kernel if needs_facet_perm: facet_perm[0] = facet_perms[cell_index * num_facets_per_cell + local_facet] b_local.fill(0.0) kernel( ffi_fb(b_local), # type: ignore ffi_fb(coeffs[cell_index, :]), # type: ignore ffi_fb(constants), # type: ignore ffi_fb(geometry), # type: ignore ffi_fb(facet_index), # type: ignore ffi_fb(facet_perm), # type: ignore ) # NOTE: Here we need to add the apply_dof_transformation # Modify local contributions and add global MPC contributions b_local_copy = b_local.copy() modify_mpc_contributions(b, cell_index, b_local, b_local_copy, mpc, dofmap, block_size, num_dofs_per_element) for j in range(num_dofs_per_element): for k in range(block_size): position = dofmap[cell_index, j] * block_size + k b[position] += b_local[j * block_size + k] - b_local_copy[j * block_size + k] @numba.njit(cache=True) def modify_mpc_contributions( b: npt.NDArray[_PETSc.ScalarType], # type: ignore cell_index: int, # type: ignore b_local: npt.NDArray[_PETSc.ScalarType], # type: ignore b_copy: npt.NDArray[_PETSc.ScalarType], # type: ignore mpc: Tuple[ # type: ignore npt.NDArray[numpy.int32], npt.NDArray[_PETSc.ScalarType], npt.NDArray[numpy.int32], npt.NDArray[numpy.int32], npt.NDArray[numpy.int32], npt.NDArray[numpy.int32], ], dofmap: npt.NDArray[numpy.int32], block_size: int, num_dofs_per_element: int, ): """ Modify local entries of b_local with MPC info and add modified entries to global vector b. """ # Unwrap MPC data masters, coefficients, offsets, cell_to_slave, cell_to_slave_offset, is_slave = mpc # Determine which slaves are in this cell, # and which global index they have in 1D arrays cell_slaves = cell_to_slave[cell_to_slave_offset[cell_index] : cell_to_slave_offset[cell_index + 1]] # Get local index of slaves in cell cell_blocks = dofmap[cell_index] local_index = numpy.empty(len(cell_slaves), dtype=numpy.int32) for i in range(num_dofs_per_element): for j in range(block_size): dof = cell_blocks[i] * block_size + j if is_slave[dof]: location = numpy.flatnonzero(cell_slaves == dof)[0] local_index[location] = i * block_size + j # Move contribution from each slave to the corresponding master dof # and zero out local b for local, slave in zip(local_index, cell_slaves): cell_masters = masters[offsets[slave] : offsets[slave + 1]] cell_coeffs = coefficients[offsets[slave] : offsets[slave + 1]] for m0, c0 in zip(cell_masters, cell_coeffs): b[m0] += c0 * b_copy[local] b_local[local] = 0 dolfinx_mpc-0.9.1/python/dolfinx_mpc/numba/helpers.py000066400000000000000000000030301476141270300227650ustar00rootroot00000000000000# Copyright (C) 2020-2021 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from typing import Union import dolfinx.cpp as _cpp import numpy import numpy.typing as npt import numba _forms = Union[_cpp.fem.Form_float32, _cpp.fem.Form_float64, _cpp.fem.Form_complex128] _bcs = Union[ _cpp.fem.DirichletBC_float32, _cpp.fem.DirichletBC_float64, _cpp.fem.DirichletBC_complex64, _cpp.fem.DirichletBC_complex128, ] @numba.njit(fastmath=True, cache=True) def extract_slave_cells(cell_offset: npt.NDArray[numpy.int32]) -> npt.NDArray[numpy.int32]: """From an offset determine which entries are nonzero""" slave_cells = numpy.zeros(len(cell_offset) - 1, dtype=numpy.int32) c = 0 for cell in range(len(cell_offset) - 1): num_cells = cell_offset[cell + 1] - cell_offset[cell] if num_cells > 0: slave_cells[c] = cell c += 1 return slave_cells[:c] @numba.njit(fastmath=True, cache=True) def pack_slave_facet_info( facets: npt.NDArray[numpy.int32], slave_cells: npt.NDArray[numpy.int32] ) -> npt.NDArray[numpy.int32]: """ Given an MPC and a set of facets (cell index, local_facet_index), compress the set to those that only contain slave cells """ facet_info = numpy.zeros((len(facets), 2), dtype=numpy.int32) i = 0 for facet in facets: if sum(slave_cells == facet[0]) > 0: facet_info[i, :] = [facet[0], facet[1]] i += 1 return facet_info[:i, :] dolfinx_mpc-0.9.1/python/dolfinx_mpc/numba/numba_setup.py000066400000000000000000000125441476141270300236570ustar00rootroot00000000000000# Copyright (C) 2020-2021 Garth Wells and Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations import ctypes import ctypes.util import importlib import os import typing import petsc4py.lib from mpi4py import MPI from petsc4py import PETSc from petsc4py import get_config as PETSc_get_config import cffi import numpy as np import numba import numba.core.typing.cffi_utils as cffi_support def initialize_petsc() -> typing.Tuple[cffi.FFI, typing.Any]: """ Initialize petsc and CFFI for usage in numba """ # Get details of PETSc install petsc_dir = PETSc_get_config()["PETSC_DIR"] petsc_arch = petsc4py.lib.getPathArchPETSc()[1] # Get PETSc int and scalar types cmplx = True if np.dtype(PETSc.ScalarType).kind == "c" else False # type: ignore scalar_size = np.dtype(PETSc.ScalarType).itemsize # type: ignore index_size = np.dtype(PETSc.IntType).itemsize # type: ignore if index_size == 8: c_int_t = "int64_t" ctypes_index = ctypes.c_int64 # type: ignore elif index_size == 4: c_int_t = "int32_t" ctypes_index = ctypes.c_int32 # type: ignore else: raise RuntimeError("Cannot translate PETSc index size into a C type, index_size: {}.".format(index_size)) if cmplx and scalar_size == 16: c_scalar_t = "double _Complex" numba_scalar_t = numba.types.complex128 elif cmplx and scalar_size == 8: c_scalar_t = "float _Complex" numba_scalar_t = numba.types.complex64 elif not cmplx and scalar_size == 8: c_scalar_t = "double" numba_scalar_t = numba.types.float64 elif not cmplx and scalar_size == 4: c_scalar_t = "float" numba_scalar_t = numba.types.float32 else: raise RuntimeError( "Cannot translate PETSc scalar type to a C type, complex: {} size: {}.".format(complex, scalar_size) ) # Load PETSc library via ctypes petsc_lib_name = ctypes.util.find_library("petsc") if petsc_lib_name is not None: petsc_lib_ctypes = ctypes.CDLL(petsc_lib_name) else: try: petsc_lib_ctypes = ctypes.CDLL(os.path.join(petsc_dir, petsc_arch, "lib", "libpetsc.so")) except OSError: try: petsc_lib_ctypes = ctypes.CDLL(os.path.join(petsc_dir, petsc_arch, "lib", "libpetsc.dylib")) except OSError: raise RuntimeError("Could not load PETSc library for CFFI (ABI mode).") # Get the PETSc MatSetValuesLocal function via ctypes MatSetValues_ctypes = petsc_lib_ctypes.MatSetValuesLocal MatSetValues_ctypes.argtypes = ( ctypes.c_void_p, ctypes_index, ctypes.POINTER(ctypes_index), ctypes_index, ctypes.POINTER(ctypes_index), ctypes.c_void_p, ctypes.c_int, ) del petsc_lib_ctypes # CFFI - register complex types ffi = cffi.FFI() cffi_support.register_type(ffi.typeof("double _Complex"), numba.types.complex128) cffi_support.register_type(ffi.typeof("float _Complex"), numba.types.complex64) # Get MatSetValuesLocal from PETSc available via cffi in ABI mode ffi.cdef( """int MatSetValuesLocal(void* mat, {0} nrow, const {0}* irow, {0} ncol, const {0}* icol, const {1}* y, int addv); """.format(c_int_t, c_scalar_t) ) if petsc_lib_name is not None: ffi.dlopen(petsc_lib_name) else: try: ffi.dlopen(os.path.join(petsc_dir, petsc_arch, "lib", "libpetsc.so")) except OSError: try: ffi.dlopen(os.path.join(petsc_dir, petsc_arch, "lib", "libpetsc.dylib")) except OSError: raise RuntimeError("Could not load PETSc library for CFFI (ABI mode).") # Make MatSetValuesLocal from PETSc available via cffi in API mode worker = os.getenv("ASSEMBLE_XDIST_WORKER", None) module_name = "_petsc_cffi_{}".format(worker) if MPI.COMM_WORLD.Get_rank() == 0: os.environ["CC"] = "mpicc" ffibuilder = cffi.FFI() ffibuilder.cdef( """ typedef int... PetscInt; typedef ... PetscScalar; typedef int... InsertMode; int MatSetValuesLocal(void* mat, PetscInt nrow, const PetscInt* irow, PetscInt ncol, const PetscInt* icol, const PetscScalar* y, InsertMode addv); """ ) ffibuilder.set_source( module_name, """ # include "petscmat.h" """, libraries=["petsc"], include_dirs=[ os.path.join(petsc_dir, petsc_arch, "include"), os.path.join(petsc_dir, "include"), ], library_dirs=[os.path.join(petsc_dir, petsc_arch, "lib")], extra_compile_args=[], ) # Build module in same directory as python script ffibuilder.compile(".", verbose=False) MPI.COMM_WORLD.Barrier() module = importlib.import_module(module_name, ".") cffi_support.register_module(module) MatSetValuesLocal_api = module.lib.MatSetValuesLocal cffi_support.register_type(module.ffi.typeof("PetscScalar"), numba_scalar_t) return ffi, MatSetValuesLocal_api @numba.njit def sink(*args): # See https://github.com/numba/numba/issues/4036 for why we need 'sink' pass dolfinx_mpc-0.9.1/python/dolfinx_mpc/numba/py.typed000066400000000000000000000000001476141270300224420ustar00rootroot00000000000000dolfinx_mpc-0.9.1/python/dolfinx_mpc/problem.py000066400000000000000000000137171476141270300216760ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Copyright (C) 2021 Jørgen S. Dokken # # This file is part of DOLFINx MPC # # SPDX-License-Identifier: MIT from __future__ import annotations import typing from petsc4py import PETSc import dolfinx.fem.petsc import ufl from dolfinx import cpp as _cpp from dolfinx import fem as _fem from dolfinx import la as _la from .assemble_matrix import assemble_matrix, create_sparsity_pattern from .assemble_vector import apply_lifting, assemble_vector from .multipointconstraint import MultiPointConstraint class LinearProblem(dolfinx.fem.petsc.LinearProblem): """ Class for solving a linear variational problem with multi point constraints of the form a(u, v) = L(v) for all v using PETSc as a linear algebra backend. Args: a: A bilinear UFL form, the left hand side of the variational problem. L: A linear UFL form, the right hand side of the variational problem. mpc: The multi point constraint. bcs: A list of Dirichlet boundary conditions. u: The solution function. It will be created if not provided. The function has to be based on the functionspace in the mpc, i.e. .. highlight:: python .. code-block:: python u = dolfinx.fem.Function(mpc.function_space) petsc_options: Parameters that is passed to the linear algebra backend PETSc. #type: ignore For available choices for the 'petsc_options' kwarg, see the PETSc-documentation https://www.mcs.anl.gov/petsc/documentation/index.html. form_compiler_options: Parameters used in FFCx compilation of this form. Run `ffcx --help` at the commandline to see all available options. Takes priority over all other parameter values, except for `scalar_type` which is determined by DOLFINx. jit_options: Parameters used in CFFI JIT compilation of C code generated by FFCx. See https://github.com/FEniCS/dolfinx/blob/main/python/dolfinx/jit.py#L22-L37 for all available parameters. Takes priority over all other parameter values. Examples: Example usage: .. highlight:: python .. code-block:: python problem = LinearProblem(a, L, mpc, [bc0, bc1], petsc_options={"ksp_type": "preonly", "pc_type": "lu"}) """ u: _fem.Function _a: _fem.Form _L: _fem.Form _mpc: MultiPointConstraint _A: PETSc.Mat # type: ignore _b: PETSc.Vec # type: ignore _solver: PETSc.KSP # type: ignore _x: PETSc.Vec # type: ignore bcs: typing.List[_fem.DirichletBC] __slots__ = tuple(__annotations__) def __init__( self, a: ufl.Form, L: ufl.Form, mpc: MultiPointConstraint, bcs: typing.Optional[typing.List[_fem.DirichletBC]] = None, u: typing.Optional[_fem.Function] = None, petsc_options: typing.Optional[dict] = None, form_compiler_options: typing.Optional[dict] = None, jit_options: typing.Optional[dict] = None, ): # Compile forms form_compiler_options = {} if form_compiler_options is None else form_compiler_options jit_options = {} if jit_options is None else jit_options self._a = _fem.form(a, jit_options=jit_options, form_compiler_options=form_compiler_options) self._L = _fem.form(L, jit_options=jit_options, form_compiler_options=form_compiler_options) if not mpc.finalized: raise RuntimeError("The multi point constraint has to be finalized before calling initializer") self._mpc = mpc # Create function containing solution vector if u is None: self.u = _fem.Function(self._mpc.function_space) else: if u.function_space is self._mpc.function_space: self.u = u else: raise ValueError( "The input function has to be in the function space in the multi-point constraint", "i.e. u = dolfinx.fem.Function(mpc.function_space)", ) self._x = self.u.x.petsc_vec # Create MPC matrix pattern = create_sparsity_pattern(self._a, self._mpc) pattern.finalize() self._A = _cpp.la.petsc.create_matrix(self._mpc.function_space.mesh.comm, pattern) self._b = _la.create_petsc_vector( self._mpc.function_space.dofmap.index_map, self._mpc.function_space.dofmap.index_map_bs ) self.bcs = [] if bcs is None else bcs self._solver = PETSc.KSP().create(self.u.function_space.mesh.comm) # type: ignore self._solver.setOperators(self._A) # Give PETSc solver options a unique prefix solver_prefix = "dolfinx_mpc_solve_{}".format(id(self)) self._solver.setOptionsPrefix(solver_prefix) # Set PETSc options opts = PETSc.Options() # type: ignore opts.prefixPush(solver_prefix) if petsc_options is not None: for k, v in petsc_options.items(): opts[k] = v opts.prefixPop() self._solver.setFromOptions() def solve(self) -> _fem.Function: """Solve the problem. Returns: Function containing the solution""" # Assemble lhs self._A.zeroEntries() assemble_matrix(self._a, self._mpc, bcs=self.bcs, A=self._A) self._A.assemble() assert self._A.assembled # Assemble rhs with self._b.localForm() as b_loc: b_loc.set(0) assemble_vector(self._L, self._mpc, b=self._b) # Apply boundary conditions to the rhs apply_lifting(self._b, [self._a], [self.bcs], self._mpc) self._b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) # type: ignore _fem.petsc.set_bc(self._b, self.bcs) # Solve linear system and update ghost values in the solution self._solver.solve(self._b, self._x) self.u.x.scatter_forward() self._mpc.backsubstitution(self.u) return self.u dolfinx_mpc-0.9.1/python/dolfinx_mpc/py.typed000066400000000000000000000000001476141270300213400ustar00rootroot00000000000000dolfinx_mpc-0.9.1/python/dolfinx_mpc/utils/000077500000000000000000000000001476141270300210135ustar00rootroot00000000000000dolfinx_mpc-0.9.1/python/dolfinx_mpc/utils/__init__.py000066400000000000000000000020561476141270300231270ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen Schartum Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT """Helper functions for tests in Dolfinx mpc""" # flake8: noqa from __future__ import annotations from .mpc_utils import ( create_normal_approximation, create_point_to_point_constraint, determine_closest_block, facet_normal_approximation, log_info, rigid_motions_nullspace, rotation_matrix, ) from .test import ( compare_CSR, compare_mpc_lhs, compare_mpc_rhs, gather_constants, gather_PETScMatrix, gather_PETScVector, gather_transformation_matrix, get_assemblers, ) __all__ = [ "get_assemblers", "gather_PETScVector", "gather_PETScMatrix", "compare_mpc_lhs", "compare_mpc_rhs", "gather_transformation_matrix", "compare_CSR", "gather_constants", "rotation_matrix", "facet_normal_approximation", "log_info", "rigid_motions_nullspace", "determine_closest_block", "create_normal_approximation", "create_point_to_point_constraint", ] dolfinx_mpc-0.9.1/python/dolfinx_mpc/utils/mpc_utils.py000066400000000000000000000432401476141270300233670ustar00rootroot00000000000000# Copyright (C) 2020-2021 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import dolfinx.common as _common import dolfinx.cpp as _cpp import dolfinx.fem as _fem import dolfinx.geometry as _geometry import dolfinx.la as _la import dolfinx.log as _log import dolfinx.mesh as _mesh import numpy as np import ufl from dolfinx import default_scalar_type as _dt import dolfinx_mpc.cpp __all__ = [ "rotation_matrix", "facet_normal_approximation", "log_info", "rigid_motions_nullspace", "determine_closest_block", "create_normal_approximation", "create_point_to_point_constraint", ] def rotation_matrix(axis, angle): # See https://en.wikipedia.org/wiki/Rotation_matrix, # Subsection: Rotation_matrix_from_axis_and_angle. if np.isclose(np.inner(axis, axis), 1): n_axis = axis else: # Normalize axis n_axis = axis / np.sqrt(np.inner(axis, axis)) # Define cross product matrix of axis axis_x = np.array([[0, -n_axis[2], n_axis[1]], [n_axis[2], 0, -n_axis[0]], [-n_axis[1], n_axis[0], 0]]) identity = np.cos(angle) * np.eye(3) outer = (1 - np.cos(angle)) * np.outer(n_axis, n_axis) return np.sin(angle) * axis_x + identity + outer def facet_normal_approximation( V, mt: _mesh.MeshTags, mt_id: int, tangent=False, jit_options: dict = {}, form_compiler_options: dict = {}, ): """ Approximate the facet normal by projecting it into the function space for a set of facets Args: V: The function space to project into mt: The `dolfinx.mesh.MeshTagsMetaClass` containing facet markers mt_id: The id for the facets in `mt` we want to represent the normal at tangent: To approximate the tangent to the facet set this flag to `True` jit_options: Parameters used in CFFI JIT compilation of C code generated by FFCx. See https://github.com/FEniCS/dolfinx/blob/main/python/dolfinx/jit.py#L22-L37 for all available parameters. Takes priority over all other parameter values. form_compiler_options: Parameters used in FFCx compilation of this form. Run `ffcx - -help` at the commandline to see all available options. Takes priority over all other parameter values, except for `scalar_type` which is determined by DOLFINx. """ timer = _common.Timer("~MPC: Facet normal projection") comm = V.mesh.comm n = ufl.FacetNormal(V.mesh) nh = _fem.Function(V) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) ds = ufl.ds(domain=V.mesh, subdomain_data=mt, subdomain_id=mt_id) if tangent: if V.mesh.geometry.dim == 1: raise ValueError("Tangent not defined for 1D problem") elif V.mesh.geometry.dim == 2: a = ufl.inner(u, v) * ds L = ufl.inner(ufl.as_vector([-n[1], n[0]]), v) * ds else: def tangential_proj(u, n): """ See for instance: https://link.springer.com/content/pdf/10.1023/A:1022235512626.pdf """ return (ufl.Identity(u.ufl_shape[0]) - ufl.outer(n, n)) * u c = _fem.Constant(V.mesh, [1, 1, 1]) a = ufl.inner(u, v) * ds L = ufl.inner(tangential_proj(c, n), v) * ds else: a = ufl.inner(u, v) * ds L = ufl.inner(n, v) * ds # Find all dofs that are not boundary dofs imap = V.dofmap.index_map all_blocks = np.arange(imap.size_local, dtype=np.int32) top_blocks = _fem.locate_dofs_topological(V, V.mesh.topology.dim - 1, mt.find(mt_id)) deac_blocks = all_blocks[np.isin(all_blocks, top_blocks, invert=True)] # Note there should be a better way to do this # Create sparsity pattern only for constraint + bc bilinear_form = _fem.form(a, jit_options=jit_options, form_compiler_options=form_compiler_options) pattern = _fem.create_sparsity_pattern(bilinear_form) pattern.insert_diagonal(deac_blocks) pattern.finalize() u_0 = _fem.Function(V) u_0.x.petsc_vec.set(0) bc_deac = _fem.dirichletbc(u_0, deac_blocks) A = _cpp.la.petsc.create_matrix(comm, pattern) A.zeroEntries() # Assemble the matrix with all entries form_coeffs = _cpp.fem.pack_coefficients(bilinear_form._cpp_object) form_consts = _cpp.fem.pack_constants(bilinear_form._cpp_object) _cpp.fem.petsc.assemble_matrix(A, bilinear_form._cpp_object, form_consts, form_coeffs, [bc_deac._cpp_object]) if bilinear_form.function_spaces[0] is bilinear_form.function_spaces[1]: A.assemblyBegin(PETSc.Mat.AssemblyType.FLUSH) # type: ignore A.assemblyEnd(PETSc.Mat.AssemblyType.FLUSH) # type: ignore _cpp.fem.petsc.insert_diagonal(A, bilinear_form.function_spaces[0], [bc_deac._cpp_object], 1.0) A.assemble() linear_form = _fem.form(L, jit_options=jit_options, form_compiler_options=form_compiler_options) b = _fem.petsc.assemble_vector(linear_form) _fem.petsc.apply_lifting(b, [bilinear_form], [[bc_deac]]) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore _fem.petsc.set_bc(b, [bc_deac]) # Solve Linear problem solver = PETSc.KSP().create(V.mesh.comm) # type: ignore solver.setType("cg") solver.rtol = 1e-8 solver.setOperators(A) solver.solve(b, nh.x.petsc_vec) nh.x.petsc_vec.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # type: ignore timer.stop() solver.destroy() b.destroy() return nh def log_info(message): """ Wrapper for logging a simple string on the zeroth communicator Reverting the log level """ old_level = _log.get_log_level() if MPI.COMM_WORLD.rank == 0: _log.set_log_level(_log.LogLevel.INFO) _log.log(_log.LogLevel.INFO, message) _log.set_log_level(old_level) def rigid_motions_nullspace(V: _fem.FunctionSpace): """ Function to build nullspace for 2D/3D elasticity. Args: V: The function space """ _x = _fem.Function(V) # Get geometric dim gdim = V.mesh.geometry.dim assert gdim == 2 or gdim == 3 # Set dimension of nullspace dim = 3 if gdim == 2 else 6 # Create list of vectors for null space nullspace_basis = [ _la.vector(V.dofmap.index_map, bs=V.dofmap.index_map_bs, dtype=PETSc.ScalarType) # type: ignore for i in range(dim) ] basis = [b.array for b in nullspace_basis] dofs = [V.sub(i).dofmap.list.reshape(-1) for i in range(gdim)] # Build translational null space basis for i in range(gdim): basis[i][dofs[i]] = 1.0 # Build rotational null space basis x = V.tabulate_dof_coordinates() dofs_block = V.dofmap.list.reshape(-1) x0, x1, x2 = x[dofs_block, 0], x[dofs_block, 1], x[dofs_block, 2] if gdim == 2: basis[2][dofs[0]] = -x1 basis[2][dofs[1]] = x0 elif gdim == 3: basis[3][dofs[0]] = -x1 basis[3][dofs[1]] = x0 basis[4][dofs[0]] = x2 basis[4][dofs[2]] = -x0 basis[5][dofs[2]] = x1 basis[5][dofs[1]] = -x2 for b in nullspace_basis: b.scatter_forward() _la.orthonormalize(nullspace_basis) assert _la.is_orthonormal(nullspace_basis, float(np.finfo(_x.x.array.dtype).eps)) local_size = V.dofmap.index_map.size_local * V.dofmap.index_map_bs basis_petsc = [ PETSc.Vec().createWithArray(x[:local_size], bsize=gdim, comm=V.mesh.comm) # type: ignore for x in basis ] return PETSc.NullSpace().create(comm=V.mesh.comm, vectors=basis_petsc) # type: ignore def determine_closest_block(V, point): """ Determine the closest dofs (in a single block) to a point and the distance """ # Create boundingboxtree of cells connected to boundary facets tdim = V.mesh.topology.dim boundary_facets = _mesh.exterior_facet_indices(V.mesh.topology) V.mesh.topology.create_connectivity(tdim - 1, tdim) f_to_c = V.mesh.topology.connectivity(tdim - 1, tdim) boundary_cells = [] for facet in boundary_facets: boundary_cells.extend(f_to_c.links(facet)) cell_imap = V.mesh.topology.index_map(tdim) boundary_cells = np.array(np.unique(boundary_cells), dtype=np.int32) boundary_cells = boundary_cells[boundary_cells < cell_imap.size_local] bb_tree = _geometry.bb_tree(V.mesh, tdim, boundary_cells) midpoint_tree = _geometry.create_midpoint_tree(V.mesh, tdim, boundary_cells) # Find facet closest point = np.reshape(point, (1, 3)).astype(V.mesh.geometry.x.dtype) closest_cell = _geometry.compute_closest_entity(bb_tree, midpoint_tree, V.mesh, point)[0] # Set distance high if cell is not owned if cell_imap.size_local < closest_cell or closest_cell == -1: R = 1e5 else: # Get cell geometry p = V.mesh.geometry.x V.mesh.topology.create_connectivity(tdim, tdim) entities = _cpp.mesh.entities_to_geometry( V.mesh._cpp_object, tdim, np.array([closest_cell], dtype=np.int32), False ) R = np.linalg.norm(_cpp.geometry.compute_distance_gjk(point, p[entities[0]])) # Find processor with cell closest to point global_distances = MPI.COMM_WORLD.allgather(R) owning_processor = np.argmin(global_distances) dofmap = V.dofmap imap = dofmap.index_map ghost_owner = imap.owners local_max = imap.size_local # Determine which block of dofs is closest min_distance = max(R, 1e5) minimal_distance_block = None min_dof_owner = owning_processor if MPI.COMM_WORLD.rank == owning_processor: x = V.tabulate_dof_coordinates() cell_blocks = dofmap.cell_dofs(closest_cell) for block in cell_blocks: distance = np.linalg.norm(_cpp.geometry.compute_distance_gjk(point, x[block])) if distance < min_distance: # If cell owned by processor, but not the closest dof if block < local_max: min_dof_owner = MPI.COMM_WORLD.rank else: min_dof_owner = ghost_owner[block - local_max] minimal_distance_block = block min_distance = distance min_dof_owner = MPI.COMM_WORLD.bcast(min_dof_owner, root=owning_processor) # If dofs not owned by cell if owning_processor != min_dof_owner: owning_processor = min_dof_owner if MPI.COMM_WORLD.rank == min_dof_owner: # Re-search using the closest cell x = V.tabulate_dof_coordinates() cell_blocks = dofmap.cell_dofs(closest_cell) for block in cell_blocks: distance = np.linalg.norm(_cpp.geometry.compute_distance_gjk(point, x[block])) if distance < min_distance: # If cell owned by processor, but not the closest dof if block < local_max: min_dof_owner = MPI.COMM_WORLD.rank else: min_dof_owner = ghost_owner[block - local_max] minimal_distance_block = block min_distance = distance assert min_dof_owner == owning_processor return owning_processor, [minimal_distance_block] else: return owning_processor, [] def create_point_to_point_constraint(V, slave_point, master_point, vector=None): # Determine which processor owns the dof closest to the slave and master point slave_proc, slave_block = determine_closest_block(V, slave_point) master_proc, master_block = determine_closest_block(V, master_point) is_master_proc = MPI.COMM_WORLD.rank == master_proc is_slave_proc = MPI.COMM_WORLD.rank == slave_proc block_size = V.dofmap.index_map_bs imap = V.dofmap.index_map # Output structures slaves, masters, coeffs, owners, offsets = [], [], [], [], [] # Information required to handle vector as input zero_indices, slave_index = None, None if vector is not None: zero_indices = np.argwhere(np.isclose(vector, 0)).T[0] slave_index = np.argmax(np.abs(vector)) if is_slave_proc: assert len(slave_block) == 1 slave_block_g = imap.local_to_global(np.asarray(slave_block, dtype=np.int32))[0] if vector is None: slaves = np.arange( slave_block[0] * block_size, slave_block[0] * block_size + block_size, dtype=np.int32, ) else: assert len(vector) == block_size # Check for input vector (Should be of same length as number of slaves) # All entries should not be zero assert not np.isin(slave_index, zero_indices) # Check vector for zero contributions slaves = np.array([slave_block[0] * block_size + slave_index], dtype=np.int32) for i in range(block_size): if i != slave_index and not np.isin(i, zero_indices): masters.append(slave_block_g * block_size + i) owners.append(slave_proc) coeffs.append(-vector[i] / vector[slave_index]) global_masters = None if is_master_proc: assert len(master_block) == 1 master_block_g = imap.local_to_global(np.asarray(master_block, dtype=np.int32))[0] masters_as_glob = np.arange( master_block_g * block_size, master_block_g * block_size + block_size, dtype=np.int64 ) else: masters_as_glob = np.array([], dtype=np.int64) ghost_processors = [] shared_indices = dolfinx_mpc.cpp.mpc.compute_shared_indices(V._cpp_object) if is_master_proc and is_slave_proc: # If slaves and masters are on the same processor finalize local work if vector is None: masters = masters_as_glob owners = np.full(len(masters), master_proc, dtype=np.int32) coeffs = np.ones(len(masters), dtype=_dt) offsets = np.arange(0, len(masters) + 1, dtype=np.int32) else: for i in range(len(masters_as_glob)): if not np.isin(i, zero_indices): masters.append(masters_as_glob[i]) owners.append(master_proc) coeffs.append(vector[i] / vector[slave_index]) offsets = [0, len(masters)] else: # Send/Recv masters from other processor if is_master_proc: MPI.COMM_WORLD.send(masters_as_glob, dest=slave_proc, tag=10) if is_slave_proc: global_masters = MPI.COMM_WORLD.recv(source=master_proc, tag=10) for i, master in enumerate(global_masters): if not np.isin(i, zero_indices): masters.append(master) owners.append(master_proc) if vector is None: coeffs.append(1) else: coeffs.append(vector[i] / vector[slave_index]) if vector is None: offsets = np.arange(0, len(slaves) + 1, dtype=np.int32) else: offsets = np.array([0, len(masters)], dtype=np.int32) ghost_processors = shared_indices.links(slave_block[0]) # Broadcast processors containg slave ghost_processors = MPI.COMM_WORLD.bcast(ghost_processors, root=slave_proc) if is_slave_proc: for proc in ghost_processors: MPI.COMM_WORLD.send(slave_block_g * block_size + slaves % block_size, dest=proc, tag=20 + proc) MPI.COMM_WORLD.send(coeffs, dest=proc, tag=30 + proc) MPI.COMM_WORLD.send(owners, dest=proc, tag=40 + proc) MPI.COMM_WORLD.send(masters, dest=proc, tag=50 + proc) MPI.COMM_WORLD.send(offsets, dest=proc, tag=60 + proc) # Receive data for ghost slaves ghost_slaves, ghost_masters, ghost_coeffs, ghost_owners, ghost_offsets = [], [], [], [], [] if np.isin(MPI.COMM_WORLD.rank, ghost_processors): # Convert recieved slaves to the corresponding ghost index recv_slaves = MPI.COMM_WORLD.recv(source=slave_proc, tag=20 + MPI.COMM_WORLD.rank) ghost_coeffs = MPI.COMM_WORLD.recv(source=slave_proc, tag=30 + MPI.COMM_WORLD.rank) ghost_owners = MPI.COMM_WORLD.recv(source=slave_proc, tag=40 + MPI.COMM_WORLD.rank) ghost_masters = MPI.COMM_WORLD.recv(source=slave_proc, tag=50 + MPI.COMM_WORLD.rank) ghost_offsets = MPI.COMM_WORLD.recv(source=slave_proc, tag=60 + MPI.COMM_WORLD.rank) # Unroll ghost blocks ghosts = imap.ghosts ghost_dofs = [g * block_size + i for g in ghosts for i in range(block_size)] ghost_slaves = np.zeros(len(recv_slaves), dtype=np.int32) local_size = imap.size_local for i, slave in enumerate(recv_slaves): idx = np.argwhere(ghost_dofs == slave)[0, 0] ghost_slaves[i] = local_size * block_size + idx slaves = np.asarray(np.append(slaves, ghost_slaves), dtype=np.int32) masters = np.asarray(np.append(masters, ghost_masters), dtype=np.int64) coeffs = np.asarray(np.append(coeffs, ghost_coeffs), dtype=_dt) owners = np.asarray(np.append(owners, ghost_owners), dtype=np.int32) offsets = np.asarray(np.append(offsets, ghost_offsets), dtype=np.int32) return slaves, masters, coeffs, owners, offsets def create_normal_approximation(V: _fem.FunctionSpace, mt: _cpp.mesh.MeshTags_int32, value: int): """ Creates a normal approximation for the dofs in the closure of the attached entities. Where a dof is attached to entities facets, an average is computed Args: V: The function space mt: The meshtag containing the indices value: Value for the entities in the mesh tag to compute normal on Returns: nh: The normal vector """ nh = _fem.Function(V) n_cpp = dolfinx_mpc.cpp.mpc.create_normal_approximation(V._cpp_object, mt.dim, mt.find(value)) nh._cpp_object = n_cpp return nh dolfinx_mpc-0.9.1/python/dolfinx_mpc/utils/py.typed000066400000000000000000000000001476141270300225000ustar00rootroot00000000000000dolfinx_mpc-0.9.1/python/dolfinx_mpc/utils/test.py000066400000000000000000000243321476141270300223500ustar00rootroot00000000000000# Copyright (C) 2021-2022 Jørgen Schartum Dokken and Connor D. Pierce # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations __all__ = [ "gather_PETScVector", "gather_PETScMatrix", "compare_mpc_lhs", "compare_mpc_rhs", "gather_transformation_matrix", "compare_CSR", ] from typing import Any from mpi4py import MPI from petsc4py import PETSc import dolfinx.common import numpy as np import pytest import scipy.sparse import dolfinx_mpc @pytest.fixture def get_assemblers(request): """ Get eiher numba assembler or C++ assembler depending on the request """ if request.param == "numba": try: import numba # noqa: F401 except ModuleNotFoundError: pytest.skip("Numba not installed") from dolfinx_mpc.numba import assemble_matrix, assemble_vector return (assemble_matrix, assemble_vector) elif request.param == "C++": from dolfinx_mpc import assemble_matrix, assemble_vector return (assemble_matrix, assemble_vector) else: raise RuntimeError(f"Undefined assembler type: {request.param}.\n" + "Options are 'numba' or 'C++'") def _gather_slaves_global(constraint): """ Given a multi point constraint, return slaves for all processors with global dof numbering """ imap = constraint.function_space.dofmap.index_map num_local_slaves = constraint.num_local_slaves block_size = constraint.function_space.dofmap.index_map_bs _slaves = constraint.slaves if num_local_slaves > 0: slave_blocks = _slaves[:num_local_slaves] // block_size slave_rems = _slaves[:num_local_slaves] % block_size glob_slaves = np.asarray(imap.local_to_global(slave_blocks), dtype=np.int64) * block_size + slave_rems else: glob_slaves = np.array([], dtype=np.int64) slaves = np.hstack(MPI.COMM_WORLD.allgather(glob_slaves)) return slaves def gather_constants(constraint, root=0): """ Given a multi-point constraint, gather all constants """ imap = constraint.index_map() constants = constraint._cpp_object.constants l_range = imap.local_range ranges = MPI.COMM_WORLD.gather(np.asarray(l_range, dtype=np.int64), root=root) g_consts = MPI.COMM_WORLD.gather(constants[: l_range[1] - l_range[0]], root=root) if MPI.COMM_WORLD.rank == root: block_size = constraint.function_space().dofmap.index_map_bs global_consts = np.zeros(imap.size_global * block_size, dtype=constraint.coefficients()[0].dtype) for r, vals in zip(ranges, g_consts): global_consts[r[0] : r[1]] = vals return global_consts else: return def gather_transformation_matrix(constraint, root=0): """ Creates the transformation matrix K (dim x dim-len(slaves)) for a given MPC and gathers it as a scipy CSR matrix on process 'root'. Example: For dim=3, where: u_1 = alpha u_0 + beta u_2 Input: slaves = [1] masters = [0, 2] coeffs = [alpha, beta] offsets = [0, 1] Output: K = [[1,0], [alpha beta], [0,1]] """ # Gather slaves from all procs V = constraint.V imap = constraint.function_space.dofmap.index_map block_size = V.dofmap.index_map_bs num_local_slaves = constraint.num_local_slaves # Gather all global_slaves slaves = constraint.slaves[:num_local_slaves] if num_local_slaves > 0: local_blocks = slaves // block_size local_rems = slaves % block_size glob_slaves = np.asarray(imap.local_to_global(local_blocks), dtype=np.int64) * block_size + local_rems else: glob_slaves = np.array([], dtype=np.int64) all_slaves = np.hstack(MPI.COMM_WORLD.allgather(glob_slaves)) masters = constraint.masters.array master_blocks = masters // block_size master_rems = masters % block_size coeffs = constraint.coefficients()[0] offsets = constraint.masters.offsets # Create sparse K matrix K_val, rows, cols = [], [], [] # Add local contributions to K from local slaves for slave, global_slave in zip(slaves, glob_slaves): masters_index = ( np.asarray( imap.local_to_global(master_blocks[offsets[slave] : offsets[slave + 1]]), dtype=np.int64, ) * block_size + master_rems[offsets[slave] : offsets[slave + 1]] ) coeffs_index = coeffs[offsets[slave] : offsets[slave + 1]] # If we have a simply equality constraint (dirichletbc) if len(masters_index) > 0: for master, coeff in zip(masters_index, coeffs_index): count = sum(master > all_slaves) K_val.append(coeff) rows.append(global_slave) cols.append(master - count) else: K_val.append(1) count = sum(global_slave > all_slaves) rows.append(global_slave) cols.append(global_slave - count) # Add identity for all dofs on diagonal l_range = V.dofmap.index_map.local_range global_dofs = np.arange(l_range[0] * block_size, l_range[1] * block_size) is_slave = np.isin(global_dofs, glob_slaves) for i, dof in enumerate(global_dofs): if not is_slave[i]: K_val.append(1) rows.append(dof) cols.append(dof - sum(dof > all_slaves)) # Gather K to root K_vals = MPI.COMM_WORLD.gather(np.asarray(K_val, dtype=coeffs.dtype), root=root) rows_g = MPI.COMM_WORLD.gather(np.asarray(rows, dtype=np.int64), root=root) cols_g = MPI.COMM_WORLD.gather(np.asarray(cols, dtype=np.int64), root=root) if MPI.COMM_WORLD.rank == root: K_sparse = scipy.sparse.coo_matrix((np.hstack(K_vals), (np.hstack(rows_g), np.hstack(cols_g)))).tocsr() return K_sparse def petsc_to_local_CSR(A: PETSc.Mat, mpc: dolfinx_mpc.MultiPointConstraint): # type: ignore """ Convert a PETSc matrix to a local CSR matrix (scipy) including ghost entries """ global_indices = np.asarray(mpc.function_space.dofmap.index_map.global_indices(), dtype=PETSc.IntType) # type: ignore sort_index = np.argsort(global_indices) is_A = PETSc.IS().createGeneral(global_indices[sort_index]) # type: ignore A_loc = A.createSubMatrices(is_A)[0] ai, aj, av = A_loc.getValuesCSR() A_csr = scipy.sparse.csr_matrix((av, aj, ai)) return A_csr[global_indices[:, None], global_indices] def gather_PETScMatrix(A: PETSc.Mat, root=0) -> scipy.sparse.csr_matrix: # type: ignore """ Given a distributed PETSc matrix, gather in on process 'root' in a scipy CSR matrix """ ai, aj, av = A.getValuesCSR() aj_all = MPI.COMM_WORLD.gather(aj, root=root) # type: ignore av_all = MPI.COMM_WORLD.gather(av, root=root) # type: ignore ai_all = MPI.COMM_WORLD.gather(ai, root=root) # type: ignore if MPI.COMM_WORLD.rank == root: ai_cum = [0] for ai in ai_all: # type: ignore offsets = ai[1:] + ai_cum[-1] ai_cum.extend(offsets) return scipy.sparse.csr_matrix((np.hstack(av_all), np.hstack(aj_all), ai_cum), shape=A.getSize()) # type: ignore def gather_PETScVector(vector: PETSc.Vec, root=0) -> np.ndarray: # type: ignore """ Gather a PETScVector from different processors on process 'root' as an numpy array """ if vector.handle == 0: raise RuntimeError("Vector has been destroyed prior to this call") numpy_vec = np.zeros(vector.size, dtype=vector.array.dtype) l_min = vector.owner_range[0] l_max = vector.owner_range[1] numpy_vec[l_min:l_max] += vector.array return np.asarray(sum(MPI.COMM_WORLD.allgather(numpy_vec))) def compare_CSR(A: scipy.sparse.csr_matrix, B: scipy.sparse.csr_matrix, atol=1e-10): """Compare CSR matrices A and B""" diff = np.abs(A - B) assert diff.max() < atol def compare_mpc_lhs( A_org: PETSc.Mat, # type: ignore A_mpc: PETSc.Mat, # type: ignore mpc: dolfinx_mpc.MultiPointConstraint, root: int = 0, atol: np.floating[Any] = 5e3 * np.finfo(dolfinx.default_scalar_type).resolution, ): """ Compare an unmodified matrix for the problem with the one assembled with a multi point constraint. The unmodified matrix is multiplied with K^T A K, where K is the global transformation matrix. """ timer = dolfinx.common.Timer("~MPC: Compare matrices") comm = mpc.V.mesh.comm V = mpc.V assert root < comm.size is_complex = np.issubdtype(mpc.coefficients()[0].dtype, np.complexfloating) # type: ignore scipy_dtype = np.complex128 if is_complex else np.float64 K = gather_transformation_matrix(mpc, root=root) A_csr = gather_PETScMatrix(A_org, root=root) # Get global slaves glob_slaves = _gather_slaves_global(mpc) A_mpc_csr = gather_PETScMatrix(A_mpc, root=root) if MPI.COMM_WORLD.rank == root: K = K.astype(scipy_dtype) A_csr = A_csr.astype(scipy_dtype) KTAK = np.conj(K.T) * A_csr * K # Remove identity rows of MPC matrix all_cols = np.arange(V.dofmap.index_map.size_global * V.dofmap.index_map_bs) cols_except_slaves = np.flatnonzero(np.isin(all_cols, glob_slaves, invert=True).astype(np.int32)) mpc_without_slaves = A_mpc_csr[cols_except_slaves[:, None], cols_except_slaves] # Compute difference compare_CSR(KTAK, mpc_without_slaves, atol=atol) timer.stop() def compare_mpc_rhs( b_org: PETSc.Vec, # type: ignore b: PETSc.Vec, # type: ignore constraint: dolfinx_mpc.MultiPointConstraint, root: int = 0, ): """ Compare an unconstrained RHS with an MPC rhs. """ glob_slaves = _gather_slaves_global(constraint) b_org_np = gather_PETScVector(b_org, root=root) b_np = gather_PETScVector(b, root=root) K = gather_transformation_matrix(constraint, root=root) # constants = gather_constants(constraint) comm = constraint.V.mesh.comm if comm.rank == root: reduced_b = np.conj(K.T) @ b_org_np # - constants for RHS mpc all_cols = np.arange(constraint.V.dofmap.index_map.size_global * constraint.V.dofmap.index_map_bs) cols_except_slaves = np.flatnonzero(np.isin(all_cols, glob_slaves, invert=True).astype(np.int32)) assert np.allclose(b_np[glob_slaves], 0) assert np.allclose(b_np[cols_except_slaves], reduced_b) dolfinx_mpc-0.9.1/python/pyproject.toml000066400000000000000000000033021476141270300202630ustar00rootroot00000000000000[build-system] requires = [ "scikit-build-core[pyproject]", "nanobind>=2.0.0", "petsc4py", "mpi4py", ] build-backend = "scikit_build_core.build" [project] name = "dolfinx_mpc" version = "0.9.0" description = "DOLFINx_MPC Python interface" readme = "README.md" requires-python = ">=3.8.0" license = { file = "../LICENSE" } authors = [{ email = "dokken@simula.no", name = "Jørgen S. Dokken" }] dependencies = [ "numpy>=1.21", "cffi", "petsc4py", "mpi4py", "fenics-dolfinx>=0.9.0", ] [project.optional-dependencies] docs = ['jupyter-book', 'jupytext', "scipy"] lint = ["ruff", "mypy"] optional = ["numba"] test = ["pytest", "coverage", "scipy"] all = [ "dolfinx_mpc[docs]", "dolfinx_mpc[optional]", "dolfinx_mpc[lint]", "dolfinx_mpc[test]", ] [tool.scikit-build] wheel.packages = ["dolfinx_mpc"] sdist.exclude = ["*.cpp"] cmake.build-type = "Release" [tool.pytest] junit_family = "xunit2" [tool.mypy] ignore_missing_imports = true # Folders to exclude exclude = ["docs/", "build/"] # Folder to check with mypy files = ["src", "tests"] [tool.ruff] src = ["benchmarks", "demos", "dolfinx_mpc"] line-length = 120 indent-width = 4 [tool.ruff.lint] select = [ # Pyflakes "F", # Pycodestyle "E", "W", # isort "I001", ] [tool.ruff.lint.isort] known-first-party = ["dolfinx_mpc"] known-third-party = [ "basix", "dolfinx", "ffcx", "ufl", "gmsh", "numpy", "pytest", ] section-order = [ "future", "standard-library", "mpi", "third-party", "first-party", "local-folder", ] [tool.ruff.lint.isort.sections] "mpi" = ["mpi4py", "petsc4py"] dolfinx_mpc-0.9.1/python/tests/000077500000000000000000000000001476141270300165135ustar00rootroot00000000000000dolfinx_mpc-0.9.1/python/tests/mwe123.py000066400000000000000000000016151476141270300201060ustar00rootroot00000000000000from __future__ import annotations from mpi4py import MPI import numpy as np from dolfinx import default_scalar_type, fem from dolfinx.mesh import create_unit_square, locate_entities_boundary, meshtags import dolfinx_mpc mesh = create_unit_square(MPI.COMM_WORLD, 1, 1) V = fem.functionspace(mesh, ("Lagrange", 1)) dolfinx_mpc.cpp.mpc.test_tabulate(V._cpp_object) # Create multipoint constraint def periodic_relation(x): out_x = np.copy(x) out_x[0] = 1 - x[0] return out_x def PeriodicBoundary(x): return np.isclose(x[0], 1) facets = locate_entities_boundary(mesh, mesh.topology.dim - 1, PeriodicBoundary) arg_sort = np.argsort(facets) mt = meshtags(mesh, mesh.topology.dim - 1, facets[arg_sort], np.full(len(facets), 2, dtype=np.int32)) mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_periodic_constraint_topological(V, mt, 2, periodic_relation, [], default_scalar_type(1.0)) dolfinx_mpc-0.9.1/python/tests/nitsche_ufl.py000066400000000000000000000227651476141270300214040ustar00rootroot00000000000000# Copyright (C) 2021 Jørgen S. Dokken and Sarah Roggendorf # # SPDX-License-Identifier: MIT from __future__ import annotations from typing import Dict, Tuple from petsc4py import PETSc as _PETSc import numpy as np import ufl from dolfinx import common as _common from dolfinx import fem as _fem from dolfinx import log as _log from dolfinx import mesh as dmesh from dolfinx import nls as _nls from dolfinx_contact.helpers import ( R_minus, epsilon, lame_parameters, rigid_motions_nullspace, sigma_func, ) __all__ = ["nitsche_ufl"] def nitsche_ufl( mesh: dmesh.Mesh, mesh_data: Tuple[dmesh.MeshTags, int, int], physical_parameters: dict = {}, nitsche_parameters: Dict[str, float] = {}, plane_loc: float = 0.0, vertical_displacement: float = -0.1, nitsche_bc: bool = True, quadrature_degree: int = 5, form_compiler_options: Dict = {}, jit_options: Dict = {}, petsc_options: Dict = {}, newton_options: Dict = {}, ) -> _fem.Function: """ Use UFL to compute the one sided contact problem with a mesh coming into contact with a rigid surface (not meshed). Parameters ========== mesh The input mesh mesh_data A triplet with a mesh tag for facets and values v0, v1. v0 should be the value in the mesh tags for facets to apply a Dirichlet condition on. v1 is the value for facets which should have applied a contact condition on physical_parameters Optional dictionary with information about the linear elasticity problem. Valid (key, value) tuples are: ('E': float), ('nu', float), ('strain', bool) nitsche_parameters Optional dictionary with information about the Nitsche configuration. Valid (keu, value) tuples are: ('gamma', float), ('theta', float) where theta can be -1, 0 or 1 for skew-symmetric, penalty like or symmetric enforcement of Nitsche conditions plane_loc The location of the plane in y-coordinate (2D) and z-coordinate (3D) vertical_displacement The amount of verticial displacment enforced on Dirichlet boundary nitsche_bc Use Nitche's method to enforce Dirichlet boundary conditions quadrature_degree The quadrature degree to use for the custom contact kernels form_compiler_options Parameters used in FFCX compilation of this form. Run `ffcx --help` at the commandline to see all available options. Takes priority over all other parameter values, except for `scalar_type` which is determined by DOLFINX. jit_options Parameters used in CFFI JIT compilation of C code generated by FFCX. See https://github.com/FEniCS/dolfinx/blob/main/python/dolfinx/jit.py for all available parameters. Takes priority over all other parameter values. petsc_options Parameters that is passed to the linear algebra backend PETSc. For available choices for the 'petsc_options' kwarg, see the `PETSc-documentation ` newton_options Dictionary with Newton-solver options. Valid (key, item) tuples are: ("atol", float), ("rtol", float), ("convergence_criterion", "str"), ("max_it", int), ("error_on_nonconvergence", bool), ("relaxation_parameter", float) """ # Compute lame parameters plane_strain = physical_parameters.get("strain", False) E = physical_parameters.get("E", 1e3) nu = physical_parameters.get("nu", 0.1) mu_func, lambda_func = lame_parameters(plane_strain) mu = mu_func(E, nu) lmbda = lambda_func(E, nu) sigma = sigma_func(mu, lmbda) # Nitche parameters and variables theta = nitsche_parameters.get("theta", 1) gamma = nitsche_parameters.get("gamma", 1) (facet_marker, top_value, bottom_value) = mesh_data assert facet_marker.dim == mesh.topology.dim - 1 # Normal vector pointing into plane (but outward of the body coming into contact) # Similar to computing the normal by finding the gap vector between two meshes n_vec = np.zeros(mesh.geometry.dim) n_vec[mesh.geometry.dim - 1] = -1 n_2 = ufl.as_vector(n_vec) # Normal of plane (projection onto other body) # Scaled Nitsche parameter h = ufl.CellDiameter(mesh) gamma_scaled = gamma * E / h # Mimicking the plane y=-plane_loc x = ufl.SpatialCoordinate(mesh) gap = x[mesh.geometry.dim - 1] + plane_loc g_vec = [i for i in range(mesh.geometry.dim)] g_vec[mesh.geometry.dim - 1] = gap V = _fem.functionspace(mesh, ("CG", 1)) u = _fem.Function(V) v = ufl.TestFunction(V) metadata = {"quadrature_degree": quadrature_degree} dx = ufl.Measure("dx", domain=mesh) ds = ufl.Measure("ds", domain=mesh, metadata=metadata, subdomain_data=facet_marker) a = ufl.inner(sigma(u), epsilon(v)) * dx zero = np.full( mesh.geometry.dim, 0, dtype=_PETSc.ScalarType, # type: ignore ) L = ufl.inner(_fem.Constant(mesh, zero), v) * dx # Derivation of one sided Nitsche with gap function n = ufl.FacetNormal(mesh) def sigma_n(v): # NOTE: Different normals, see summary paper return ufl.dot(sigma(v) * n, n_2) F = a - theta / gamma_scaled * sigma_n(u) * sigma_n(v) * ds(bottom_value) - L F += ( 1 / gamma_scaled * R_minus(sigma_n(u) + gamma_scaled * (gap - ufl.dot(u, n_2))) * (theta * sigma_n(v) - gamma_scaled * ufl.dot(v, n_2)) * ds(bottom_value) ) # Compute corresponding Jacobian du = ufl.TrialFunction(V) q = sigma_n(u) + gamma_scaled * (gap - ufl.dot(u, n_2)) J = ufl.inner(sigma(du), epsilon(v)) * ufl.dx - theta / gamma_scaled * sigma_n(du) * sigma_n(v) * ds(bottom_value) J += ( 1 / gamma_scaled * 0.5 * (1 - ufl.sign(q)) * (sigma_n(du) - gamma_scaled * ufl.dot(du, n_2)) * (theta * sigma_n(v) - gamma_scaled * ufl.dot(v, n_2)) * ds(bottom_value) ) # Nitsche for Dirichlet, another theta-scheme. # https://doi.org/10.1016/j.cma.2018.05.024 if nitsche_bc: disp_vec = np.zeros(mesh.geometry.dim) disp_vec[mesh.geometry.dim - 1] = vertical_displacement u_D = ufl.as_vector(disp_vec) F += ( -ufl.inner(sigma(u) * n, v) * ds(top_value) - theta * ufl.inner(sigma(v) * n, u - u_D) * ds(top_value) + gamma_scaled / h * ufl.inner(u - u_D, v) * ds(top_value) ) bcs = [] J += ( -ufl.inner(sigma(du) * n, v) * ds(top_value) - theta * ufl.inner(sigma(v) * n, du) * ds(top_value) + gamma_scaled / h * ufl.inner(du, v) * ds(top_value) ) else: # strong Dirichlet boundary conditions def _u_D(x): values = np.zeros((mesh.geometry.dim, x.shape[1])) values[mesh.geometry.dim - 1] = vertical_displacement return values u_D = _fem.Function(V) u_D.interpolate(_u_D) u_D.name = "u_D" u_D.x.scatter_forward() tdim = mesh.topology.dim dirichlet_dofs = _fem.locate_dofs_topological(V, tdim - 1, facet_marker.find(top_value)) bc = _fem.dirichletbc(u_D, dirichlet_dofs) bcs = [bc] # DEBUG: Write each step of Newton iterations # Create nonlinear problem and Newton solver # def form(self, x: _PETSc.Vec): # x.ghostUpdate(addv=_PETSc.InsertMode.INSERT, mode=_PETSc.ScatterMode.FORWARD) # self.i += 1 # xdmf.write_function(u, self.i) # setattr(_fem.petsc.NonlinearProblem, "form", form) problem = _fem.petsc.NonlinearProblem( F, u, bcs, J=J, jit_options=jit_options, form_compiler_options=form_compiler_options ) # DEBUG: Write each step of Newton iterations # problem.i = 0 # from pathlib import Path # outdir = Path("results") # outdir.mkdir(exist_ok=True, parents=True) # xdmf = _io.XDMFFile(mesh.comm, outdir / "tmp_sol.xdmf", "w") # xdmf.write_mesh(mesh) # xdmf.close() solver = _nls.petsc.NewtonSolver(mesh.comm, problem) # type: ignore null_space = rigid_motions_nullspace(V) solver.A.setNearNullSpace(null_space) # Set Newton solver options solver.atol = newton_options.get("atol", 1e-9) solver.rtol = newton_options.get("rtol", 1e-9) solver.convergence_criterion = newton_options.get("convergence_criterion", "incremental") solver.max_it = newton_options.get("max_it", 50) solver.error_on_nonconvergence = newton_options.get("error_on_nonconvergence", True) solver.relaxation_parameter = newton_options.get("relaxation_parameter", 0.8) def _u_initial(x): values = np.zeros((mesh.geometry.dim, x.shape[1])) values[-1] = -0.01 - plane_loc return values # Set initial_condition: u.interpolate(_u_initial) # Define solver and options ksp = solver.krylov_solver opts = _PETSc.Options() # type: ignore option_prefix = ksp.getOptionsPrefix() # type: ignore # Set PETSc options opts = _PETSc.Options() # type: ignore opts.prefixPush(option_prefix) for k, v in petsc_options.items(): opts[k] = v opts.prefixPop() ksp.setFromOptions() # Solve non-linear problem _log.set_log_level(_log.LogLevel.INFO) num_dofs_global = V.dofmap.index_map_bs * V.dofmap.index_map.size_global with _common.Timer(f"{num_dofs_global} Solve Nitsche"): n, converged = solver.solve(u) u.x.scatter_forward() if solver.error_on_nonconvergence: assert converged print(f"{num_dofs_global}, Number of interations: {n:d}") return u dolfinx_mpc-0.9.1/python/tests/test_cube_contact.py000066400000000000000000000314641476141270300225650ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT # # Multi point constraint problem for linear elasticity with slip conditions # between two cubes. from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import dolfinx.fem as fem import gmsh import numpy as np import numpy.testing as nt import pytest import scipy.sparse.linalg import ufl from dolfinx import default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.io import gmshio import dolfinx_mpc import dolfinx_mpc.utils from dolfinx_mpc.utils import get_assemblers # noqa: F401 theta = np.pi / 5 @pytest.fixture def generate_hex_boxes(): """ Generate the stacked boxes [x0,y0,z0]x[y1,y1,z1] and [x0,y0,z1] x [x1,y1,z2] with different resolution in each box. The markers are is a list of arrays containing markers array of markers for [back, bottom, right, left, top, front] per box volume_markers a list of marker per volume """ res = 0.2 x0, y0, z0, x1, y1, z1, z2 = 0, 0, 0, 1, 1, 1, 2 facet_markers = [[11, 5, 12, 13, 4, 14], [21, 9, 22, 23, 3, 24]] volume_markers = [1, 2] r_matrix = dolfinx_mpc.utils.rotation_matrix([1, 1, 0], -theta) # Check if GMSH is initialized gmsh.initialize() gmsh.clear() if MPI.COMM_WORLD.rank == 0: gmsh.option.setNumber("Mesh.RecombinationAlgorithm", 2) gmsh.option.setNumber("Mesh.RecombineAll", 2) gmsh.option.setNumber("General.Terminal", 0) bottom = gmsh.model.occ.addRectangle(x0, y0, z0, x1 - x0, y1 - y0) top = gmsh.model.occ.addRectangle(x0, y0, z2, x1 - x0, y1 - y0) # Set mesh size at point gmsh.model.occ.extrude([(2, bottom)], 0, 0, z1 - z0, numElements=[int(1 / (2 * res))], recombine=True) gmsh.model.occ.extrude([(2, top)], 0, 0, z1 - z2 - 1e-12, numElements=[int(1 / (2 * res))], recombine=True) # Syncronize to be able to fetch entities gmsh.model.occ.synchronize() # Create entity -> marker map (to be used after rotation) volumes = gmsh.model.getEntities(3) volume_entities = {"Top": [None, volume_markers[1]], "Bottom": [None, volume_markers[0]]} for i, volume in enumerate(volumes): com = gmsh.model.occ.getCenterOfMass(volume[0], volume[1]) if np.isclose(com[2], (z1 - z0) / 2): bottom_index = i volume_entities["Bottom"][0] = volume elif np.isclose(com[2], (z2 - z1) / 2 + z1): top_index = i volume_entities["Top"][0] = volume surfaces = ["Top", "Bottom", "Left", "Right", "Front", "Back"] entities = { "Bottom": {key: [[], None] for key in surfaces}, "Top": {key: [[], None] for key in surfaces}, } # Identitfy entities for each surface of top and bottom cube # Physical markers for bottom cube bottom_surfaces = gmsh.model.getBoundary([volumes[bottom_index]], recursive=False, oriented=False) for entity in bottom_surfaces: com = gmsh.model.occ.getCenterOfMass(entity[0], entity[1]) if np.allclose(com, [(x1 - x0) / 2, y1, (z1 - z0) / 2]): entities["Bottom"]["Back"][0].append(entity[1]) entities["Bottom"]["Back"][1] = facet_markers[0][0] elif np.allclose(com, [(x1 - x0) / 2, (y1 - y0) / 2, z0]): entities["Bottom"]["Bottom"][0].append(entity[1]) entities["Bottom"]["Bottom"][1] = facet_markers[0][1] elif np.allclose(com, [x1, (y1 - y0) / 2, (z1 - z0) / 2]): entities["Bottom"]["Right"][0].append(entity[1]) entities["Bottom"]["Right"][1] = facet_markers[0][2] elif np.allclose(com, [x0, (y1 - y0) / 2, (z1 - z0) / 2]): entities["Bottom"]["Left"][0].append(entity[1]) entities["Bottom"]["Left"][1] = facet_markers[0][3] elif np.allclose(com, [(x1 - x0) / 2, (y1 - y0) / 2, z1]): entities["Bottom"]["Top"][0].append(entity[1]) entities["Bottom"]["Top"][1] = facet_markers[0][4] elif np.allclose(com, [(x1 - x0) / 2, y0, (z1 - z0) / 2]): entities["Bottom"]["Front"][0].append(entity[1]) entities["Bottom"]["Front"][1] = facet_markers[0][5] # Physical markers for top top_surfaces = gmsh.model.getBoundary([volumes[top_index]], recursive=False, oriented=False) for entity in top_surfaces: com = gmsh.model.occ.getCenterOfMass(entity[0], entity[1]) if np.allclose(com, [(x1 - x0) / 2, y1, (z2 - z1) / 2 + z1]): entities["Top"]["Back"][0].append(entity[1]) entities["Top"]["Back"][1] = facet_markers[1][0] elif np.allclose(com, [(x1 - x0) / 2, (y1 - y0) / 2, z1]): entities["Top"]["Bottom"][0].append(entity[1]) entities["Top"]["Bottom"][1] = facet_markers[1][1] elif np.allclose(com, [x1, (y1 - y0) / 2, (z2 - z1) / 2 + z1]): entities["Top"]["Right"][0].append(entity[1]) entities["Top"]["Right"][1] = facet_markers[1][2] elif np.allclose(com, [x0, (y1 - y0) / 2, (z2 - z1) / 2 + z1]): entities["Top"]["Left"][0].append(entity[1]) entities["Top"]["Left"][1] = facet_markers[1][3] elif np.allclose(com, [(x1 - x0) / 2, (y1 - y0) / 2, z2]): entities["Top"]["Top"][0].append(entity[1]) entities["Top"]["Top"][1] = facet_markers[1][4] elif np.allclose(com, [(x1 - x0) / 2, y0, (z2 - z1) / 2 + z1]): entities["Top"]["Front"][0].append(entity[1]) entities["Top"]["Front"][1] = facet_markers[1][5] # gmsh.model.occ.rotate(volumes, 0, 0, 0, # 1 / np.sqrt(2), 1 / np.sqrt(2), 0, theta) # Note: Rotation cannot be used on recombined surfaces gmsh.model.occ.synchronize() for volume in volume_entities.keys(): gmsh.model.addPhysicalGroup( volume_entities[volume][0][0], [volume_entities[volume][0][1]], tag=volume_entities[volume][1], ) gmsh.model.setPhysicalName(volume_entities[volume][0][0], volume_entities[volume][1], volume) for box in entities.keys(): for surface in entities[box].keys(): gmsh.model.addPhysicalGroup(2, entities[box][surface][0], tag=entities[box][surface][1]) gmsh.model.setPhysicalName(2, entities[box][surface][1], box + ":" + surface) # Set mesh sizes on the points from the surface we are extruding bottom_nodes = gmsh.model.getBoundary([(2, bottom)], recursive=True, oriented=False) gmsh.model.occ.mesh.setSize(bottom_nodes, res) top_nodes = gmsh.model.getBoundary([(2, top)], recursive=True, oriented=False) gmsh.model.occ.mesh.setSize(top_nodes, 2 * res) # NOTE: Need to synchronize after setting mesh sizes gmsh.model.occ.synchronize() # Generate mesh gmsh.option.setNumber("Mesh.MaxNumThreads1D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads2D", MPI.COMM_WORLD.size) gmsh.option.setNumber("Mesh.MaxNumThreads3D", MPI.COMM_WORLD.size) gmsh.model.mesh.generate(3) gmsh.model.mesh.setOrder(1) mesh, _, ft = gmshio.model_to_mesh(gmsh.model, MPI.COMM_WORLD, 0) gmsh.clear() gmsh.finalize() # NOTE: Hex mesh must be rotated after generation due to gmsh API mesh.geometry.x[:] = np.dot(r_matrix, mesh.geometry.x.T).T return (mesh, ft) @pytest.mark.parametrize("get_assemblers", ["C++", "numba"], indirect=True) @pytest.mark.parametrize("nonslip", [True, False]) def test_cube_contact(generate_hex_boxes, nonslip, get_assemblers): # noqa: F811 assemble_matrix, assemble_vector = get_assemblers comm = MPI.COMM_WORLD root = 0 # Generate mesh mesh_data = generate_hex_boxes mesh, mt = mesh_data fdim = mesh.topology.dim - 1 # Create functionspaces V = fem.functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) # Helper for orienting traction # Bottom boundary is fixed in all directions u_bc = fem.Function(V) with u_bc.x.petsc_vec.localForm() as u_local: u_local.set(0.0) u_bc.x.petsc_vec.destroy() bottom_dofs = fem.locate_dofs_topological(V, fdim, mt.find(5)) bc_bottom = fem.dirichletbc(u_bc, bottom_dofs) g_vec = [0, 0, -4.25e-1] if not nonslip: # Helper for orienting traction r_matrix = dolfinx_mpc.utils.rotation_matrix([1 / np.sqrt(2), 1 / np.sqrt(2), 0], -theta) # Top boundary has a given deformation normal to the interface g_vec = np.dot(r_matrix, [0, 0, -4.25e-1]) # Top boundary has a given deformation normal to the interface def top_v(x): values = np.empty((3, x.shape[1])) values[0] = g_vec[0] values[1] = g_vec[1] values[2] = g_vec[2] return values u_top = fem.Function(V) u_top.interpolate(top_v) top_dofs = fem.locate_dofs_topological(V, fdim, mt.find(3)) bc_top = fem.dirichletbc(u_top, top_dofs) bcs = [bc_bottom, bc_top] # Elasticity parameters E = 1.0e3 nu = 0 mu = fem.Constant(mesh, default_scalar_type(E / (2.0 * (1.0 + nu)))) lmbda = fem.Constant(mesh, default_scalar_type(E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu)))) # Stress computation def sigma(v): return 2.0 * mu * ufl.sym(ufl.grad(v)) + lmbda * ufl.tr(ufl.sym(ufl.grad(v))) * ufl.Identity(len(v)) # Define variational problem u = ufl.TrialFunction(V) v = ufl.TestFunction(V) a = ufl.inner(sigma(u), ufl.grad(v)) * ufl.dx rhs = ufl.inner(fem.Constant(mesh, default_scalar_type((0, 0, 0))), v) * ufl.dx bilinear_form = fem.form(a) linear_form = fem.form(rhs) # Create LU solver solver = PETSc.KSP().create(comm) solver.setType("preonly") solver.setTolerances(rtol=1.0e-14) solver.getPC().setType("lu") # Create MPC contact condition and assemble matrices mpc = dolfinx_mpc.MultiPointConstraint(V) if nonslip: with Timer("~Contact: Create non-elastic constraint"): mpc.create_contact_inelastic_condition(mt, 4, 9, eps2=500 * np.finfo(default_scalar_type).resolution) else: with Timer("~Contact: Create contact constraint"): nh = dolfinx_mpc.utils.create_normal_approximation(V, mt, 4) mpc.create_contact_slip_condition(mt, 4, 9, nh, eps2=500 * np.finfo(default_scalar_type).resolution) mpc.finalize() with Timer("~TEST: Assemble bilinear form"): A = assemble_matrix(bilinear_form, mpc, bcs=bcs) with Timer("~TEST: Assemble vector"): b = assemble_vector(linear_form, mpc) dolfinx_mpc.apply_lifting(b, [bilinear_form], [bcs], mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) fem.petsc.set_bc(b, bcs) with Timer("~MPC: Solve"): solver.setOperators(A) uh = fem.Function(mpc.function_space) uh.x.array[:] = 0 u_vec = uh.x.petsc_vec solver.solve(b, u_vec) u_vec.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) mpc.backsubstitution(uh) # Solve the MPC problem using a global transformation matrix # and numpy solvers to get reference values dolfinx_mpc.utils.log_info("Solving reference problem with global matrix (using numpy)") with Timer("~TEST: Assemble bilinear form (unconstrained)"): A_org = fem.petsc.assemble_matrix(bilinear_form, bcs) A_org.assemble() L_org = fem.petsc.assemble_vector(linear_form) fem.petsc.apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) fem.petsc.set_bc(L_org, bcs) with Timer("~TEST: Compare"): dolfinx_mpc.utils.compare_mpc_lhs(A_org, A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(L_org, b, mpc, root=root) # Gather LHS, RHS and solution on one process A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(u_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T * A_csr * K reduced_L = K.T @ L_np # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K @ d atol = 1000 * np.finfo(default_scalar_type).resolution nt.assert_allclose(uh_numpy, u_mpc, atol=atol) L_org.destroy() b.destroy() solver.destroy() list_timings(comm, [TimingType.wall]) dolfinx_mpc-0.9.1/python/tests/test_integration_domains.py000066400000000000000000000113261476141270300241640ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import numpy as np import numpy.testing as nt import pytest import scipy.sparse.linalg import ufl from dolfinx import default_scalar_type, fem from dolfinx.common import Timer, TimingType, list_timings from dolfinx.mesh import compute_midpoints, create_unit_square, meshtags import dolfinx_mpc from dolfinx_mpc.utils import get_assemblers # noqa: F401 @pytest.mark.parametrize("get_assemblers", ["C++", "numba"], indirect=True) def test_cell_domains(get_assemblers): # noqa: F811 """ Periodic MPC conditions over integral with different cell subdomains """ assemble_matrix, assemble_vector = get_assemblers N = 5 # Create mesh and function space mesh = create_unit_square(MPI.COMM_WORLD, 15, N) V = fem.functionspace(mesh, ("Lagrange", 1)) def left_side(x): return x[0] < 0.5 tdim = mesh.topology.dim num_cells = mesh.topology.index_map(tdim).size_local cells = np.arange(num_cells, dtype=np.int32) mesh.topology.create_connectivity(tdim, tdim) cell_midpoints = compute_midpoints(mesh, tdim, cells) values = np.ones_like(cells) # All cells on right side marked one, all other with 1 values += left_side(cell_midpoints.T) ct = meshtags(mesh, mesh.topology.dim, cells, values) # Solve Problem without MPC for reference u = ufl.TrialFunction(V) v = ufl.TestFunction(V) x = ufl.SpatialCoordinate(mesh) c1 = fem.Constant(mesh, default_scalar_type(2)) c2 = fem.Constant(mesh, default_scalar_type(10)) dx = ufl.Measure("dx", domain=mesh, subdomain_data=ct) a = ( c1 * ufl.inner(ufl.grad(u), ufl.grad(v)) * dx(1) + c2 * ufl.inner(ufl.grad(u), ufl.grad(v)) * dx(2) + 0.87 * ufl.inner(u, v) * dx(1) ) rhs = ufl.inner(x[1], v) * dx(1) + ufl.inner(fem.Constant(mesh, default_scalar_type(1)), v) * dx(2) bilinear_form = fem.form(a) linear_form = fem.form(rhs) # Generate reference matrices A_org = fem.petsc.assemble_matrix(bilinear_form) A_org.assemble() L_org = fem.petsc.assemble_vector(linear_form) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = {} for i in range(0, N + 1): s_m_c[l2b([1, i / N])] = {l2b([0, i / N]): 1} mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_general_constraint(s_m_c) mpc.finalize() # Setup MPC system with Timer("~TEST: Assemble matrix old"): A = assemble_matrix(bilinear_form, mpc) with Timer("~TEST: Assemble vector"): b = assemble_vector(linear_form, mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) solver = PETSc.KSP().create(mesh.comm) solver.setType(PETSc.KSP.Type.PREONLY) pc = solver.getPC() pc.setType(PETSc.PC.Type.LU) solver.setOperators(A) # Solve uh = fem.Function(mpc.function_space) uh.x.array[:] = 0 solver.solve(b, uh.x.petsc_vec) uh.x.scatter_forward() mpc.backsubstitution(uh) root = 0 comm = mesh.comm is_complex = np.issubdtype(default_scalar_type, np.complexfloating) # type: ignore with Timer("~TEST: Compare"): dolfinx_mpc.utils.compare_mpc_lhs(A_org, A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(L_org, b, mpc, root=root) # Gather LHS, RHS and solution on one process (work with high precision ) scipy_dtype = np.complex128 if is_complex else np.float64 A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(uh.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T.astype(scipy_dtype) * A_csr.astype(scipy_dtype) * K.astype(scipy_dtype) reduced_L = K.T.astype(scipy_dtype) @ L_np.astype(scipy_dtype) # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK.astype(scipy_dtype), reduced_L.astype(scipy_dtype)) # Back substitution to full solution vector uh_numpy = K.astype(scipy_dtype) @ d.astype(scipy_dtype) nt.assert_allclose( uh_numpy.astype(u_mpc.dtype), u_mpc, rtol=500 * np.finfo(default_scalar_type).resolution, ) solver.destroy() b.destroy() A.destroy() pc.destroy() L_org.destroy() A_org.destroy() list_timings(comm, [TimingType.wall]) dolfinx_mpc-0.9.1/python/tests/test_lifting.py000066400000000000000000000100601476141270300215550ustar00rootroot00000000000000# Copyright (C) 2021 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import numpy as np import numpy.testing as nt import pytest import scipy.sparse.linalg import ufl from dolfinx import fem from dolfinx.common import Timer, TimingType, list_timings from dolfinx.mesh import CellType, create_unit_square import dolfinx_mpc import dolfinx_mpc.utils from dolfinx_mpc.utils import get_assemblers # noqa: F401 @pytest.mark.skipif(MPI.COMM_WORLD.size > 1, reason="This test should only be run in serial.") @pytest.mark.parametrize("get_assemblers", ["C++", "numba"], indirect=True) def test_lifting(get_assemblers): # noqa: F811 """ Test MPC lifting operation on a single cell """ assemble_matrix, assemble_vector = get_assemblers # Create mesh and function space mesh = create_unit_square(MPI.COMM_WORLD, 1, 1, CellType.quadrilateral) V = fem.functionspace(mesh, ("Lagrange", 1)) # Solve Problem without MPC for reference u = ufl.TrialFunction(V) v = ufl.TestFunction(V) x = ufl.SpatialCoordinate(mesh) f = x[1] * ufl.sin(2 * ufl.pi * x[0]) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx rhs = ufl.inner(f, v) * ufl.dx bilinear_form = fem.form(a) linear_form = fem.form(rhs) # Create Dirichlet boundary condition u_bc = fem.Function(V) with u_bc.x.petsc_vec.localForm() as u_local: u_local.set(2.3) u_bc.x.petsc_vec.destroy() def dirichletboundary(x): return np.isclose(x[0], 1) mesh.topology.create_connectivity(2, 1) geometrical_dofs = fem.locate_dofs_geometrical(V, dirichletboundary) bc = fem.dirichletbc(u_bc, geometrical_dofs) bcs = [bc] # Generate reference matrices A_org = fem.petsc.assemble_matrix(bilinear_form, bcs=bcs) A_org.assemble() L_org = fem.petsc.assemble_vector(linear_form) fem.petsc.apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) fem.petsc.set_bc(L_org, bcs) # Create multipoint constraint def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = {l2b([0, 0]): {l2b([0, 1]): 1}} mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_general_constraint(s_m_c) mpc.finalize() A = assemble_matrix(bilinear_form, mpc, bcs=bcs) b = assemble_vector(linear_form, mpc) dolfinx_mpc.apply_lifting(b, [bilinear_form], [bcs], mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) fem.petsc.set_bc(b, bcs) solver = PETSc.KSP().create(mesh.comm) solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) solver.setOperators(A) # Solve uh = fem.Function(mpc.function_space) uh.x.array[:] = 0 solver.solve(b, uh.x.petsc_vec) uh.x.scatter_forward() mpc.backsubstitution(uh) root = 0 comm = mesh.comm with Timer("~TEST: Compare"): dolfinx_mpc.utils.compare_mpc_lhs(A_org, A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(L_org, b, mpc, root=root) # Gather LHS, RHS and solution on one process A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(uh.x.petsc_vec, root=root) # constants = dolfinx_mpc.utils.gather_contants(mpc, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T * A_csr * K reduced_L = K.T @ (L_np) # - constants) # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K @ (d) # + constants) nt.assert_allclose(uh_numpy, u_mpc, rtol=1e-5, atol=1e-8) list_timings(comm, [TimingType.wall]) L_org.destroy() b.destroy() solver.destroy() dolfinx_mpc-0.9.1/python/tests/test_linear_problem.py000066400000000000000000000077121476141270300231250ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import numpy as np import numpy.testing as nt import pytest import scipy.sparse.linalg import ufl from dolfinx import default_scalar_type, fem from dolfinx.mesh import create_unit_square, locate_entities_boundary, meshtags import dolfinx_mpc import dolfinx_mpc.utils @pytest.mark.parametrize("u_from_mpc", [True, False]) def test_pipeline(u_from_mpc): # Create mesh and function space mesh = create_unit_square(MPI.COMM_WORLD, 5, 5) V = fem.functionspace(mesh, ("Lagrange", 1)) # Solve Problem without MPC for reference u = ufl.TrialFunction(V) v = ufl.TestFunction(V) d = fem.Constant(mesh, default_scalar_type(0.08)) x = ufl.SpatialCoordinate(mesh) f = ufl.sin(2 * ufl.pi * x[0]) * ufl.sin(ufl.pi * x[1]) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx - d * ufl.inner(u, v) * ufl.dx rhs = ufl.inner(f, v) * ufl.dx bilinear_form = fem.form(a) linear_form = fem.form(rhs) # Generate reference matrices A_org = fem.petsc.assemble_matrix(bilinear_form) A_org.assemble() L_org = fem.petsc.assemble_vector(linear_form) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # Create multipoint constraint def periodic_relation(x): out_x = np.copy(x) out_x[0] = 1 - x[0] return out_x def PeriodicBoundary(x): return np.isclose(x[0], 1) facets = locate_entities_boundary(mesh, mesh.topology.dim - 1, PeriodicBoundary) arg_sort = np.argsort(facets) mt = meshtags(mesh, mesh.topology.dim - 1, facets[arg_sort], np.full(len(facets), 2, dtype=np.int32)) mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_periodic_constraint_topological(V, mt, 2, periodic_relation, [], 1.0) mpc.finalize() if u_from_mpc: uh = fem.Function(mpc.function_space) problem = dolfinx_mpc.LinearProblem( bilinear_form, linear_form, mpc, bcs=[], u=uh, petsc_options={"ksp_type": "preonly", "pc_type": "lu"}, ) problem.solve() root = 0 dolfinx_mpc.utils.compare_mpc_lhs(A_org, problem.A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(L_org, problem.b, mpc, root=root) # Gather LHS, RHS and solution on one process is_complex = np.issubdtype(default_scalar_type, np.complexfloating) # type: ignore scipy_dtype = np.complex128 if is_complex else np.float64 A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(uh.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T.astype(scipy_dtype) * A_csr.astype(scipy_dtype) * K.astype(scipy_dtype) reduced_L = K.T.astype(scipy_dtype) @ L_np.astype(scipy_dtype) # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K.astype(scipy_dtype) @ d nt.assert_allclose( uh_numpy.astype(u_mpc.dtype), u_mpc, rtol=500 * np.finfo(default_scalar_type).resolution, atol=500 * np.finfo(default_scalar_type).resolution, ) L_org.destroy() A_org.destroy() else: uh = fem.Function(V) with pytest.raises(ValueError): problem = dolfinx_mpc.LinearProblem( bilinear_form, linear_form, mpc, bcs=[], u=uh, petsc_options={"ksp_type": "preonly", "pc_type": "lu"}, ) problem.solve() dolfinx_mpc-0.9.1/python/tests/test_matrix_assembly.py000066400000000000000000000070411476141270300233310ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI import dolfinx.fem as fem import numpy as np import pytest import ufl from dolfinx.common import Timer, TimingType, list_timings from dolfinx.mesh import CellType, create_unit_square import dolfinx_mpc import dolfinx_mpc.utils from dolfinx_mpc.utils import get_assemblers # noqa: F401 root = 0 @pytest.mark.parametrize("get_assemblers", ["C++", "numba"], indirect=True) @pytest.mark.parametrize("master_point", [[1, 1], [0, 1]]) @pytest.mark.parametrize("degree", range(1, 4)) @pytest.mark.parametrize("celltype", [CellType.quadrilateral, CellType.triangle]) def test_mpc_assembly(master_point, degree, celltype, get_assemblers): # noqa: F811 assemble_matrix, _ = get_assemblers # Create mesh and function space mesh = create_unit_square(MPI.COMM_WORLD, 5, 3, celltype) V = fem.functionspace(mesh, ("Lagrange", degree)) # Test against generated code and general assembler u = ufl.TrialFunction(V) v = ufl.TestFunction(V) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx bilinear_form = fem.form(a) def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = { l2b([1, 0]): {l2b([0, 1]): 0.43, l2b([1, 1]): 0.11}, l2b([0, 0]): {l2b(master_point): 0.69}, } mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_general_constraint(s_m_c) mpc.finalize() with Timer("~TEST: Assemble matrix"): A_mpc = assemble_matrix(bilinear_form, mpc) with Timer("~TEST: Compare with numpy"): # Create globally reduced system A_org = fem.petsc.assemble_matrix(bilinear_form) A_org.assemble() dolfinx_mpc.utils.compare_mpc_lhs(A_org, A_mpc, mpc) # Check if ordering of connected dofs matter @pytest.mark.parametrize("get_assemblers", ["C++", "numba"], indirect=True) @pytest.mark.parametrize("master_point", [[1, 1], [0, 1]]) @pytest.mark.parametrize("degree", range(1, 4)) @pytest.mark.parametrize("celltype", [CellType.triangle, CellType.quadrilateral]) def test_slave_on_same_cell(master_point, degree, celltype, get_assemblers): # noqa: F811 assemble_matrix, _ = get_assemblers # Create mesh and function space mesh = create_unit_square(MPI.COMM_WORLD, 1, 8, celltype) V = fem.functionspace(mesh, ("Lagrange", degree)) # Build master slave map s_m_c = { np.array([1, 0], dtype=mesh.geometry.x.dtype).tobytes(): { np.array([0, 1], dtype=mesh.geometry.x.dtype).tobytes(): 0.43, np.array([1, 1], dtype=mesh.geometry.x.dtype).tobytes(): 0.11, }, np.array([0, 0], dtype=mesh.geometry.x.dtype).tobytes(): { np.array(master_point, dtype=mesh.geometry.x.dtype).tobytes(): 0.69 }, } with Timer("~TEST: MPC INIT"): mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_general_constraint(s_m_c) mpc.finalize() # Test against generated code and general assembler u = ufl.TrialFunction(V) v = ufl.TestFunction(V) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx bilinear_form = fem.form(a) with Timer("~TEST: Assemble matrix"): A_mpc = assemble_matrix(bilinear_form, mpc) with Timer("~TEST: Compare with numpy"): # Create globally reduced system A_org = fem.petsc.assemble_matrix(bilinear_form) A_org.assemble() dolfinx_mpc.utils.compare_mpc_lhs(A_org, A_mpc, mpc) list_timings(mesh.comm, [TimingType.wall]) dolfinx_mpc-0.9.1/python/tests/test_mpc_pipeline.py000066400000000000000000000151531476141270300225750ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import numpy as np import numpy.testing as nt import pytest import scipy.sparse.linalg import ufl from dolfinx import default_scalar_type, fem from dolfinx.common import Timer, TimingType, list_timings from dolfinx.mesh import create_unit_square import dolfinx_mpc import dolfinx_mpc.utils from dolfinx_mpc.utils import get_assemblers # noqa: F401 @pytest.mark.parametrize("get_assemblers", ["C++", "numba"], indirect=True) @pytest.mark.parametrize("master_point", [[1, 1], [0, 1]]) def test_pipeline(master_point, get_assemblers): # noqa: F811 assemble_matrix, assemble_vector = get_assemblers # Create mesh and function space mesh = create_unit_square(MPI.COMM_WORLD, 3, 5) V = fem.functionspace(mesh, ("Lagrange", 1)) # Solve Problem without MPC for reference u = ufl.TrialFunction(V) v = ufl.TestFunction(V) d = fem.Constant(mesh, default_scalar_type(1.5)) c = fem.Constant(mesh, default_scalar_type(2)) x = ufl.SpatialCoordinate(mesh) f = c * ufl.sin(2 * ufl.pi * x[0]) * ufl.sin(ufl.pi * x[1]) g = fem.Function(V) g.interpolate(lambda x: np.sin(x[0]) * x[1]) h = fem.Function(V) h.interpolate(lambda x: 2 + x[1] * x[0]) a = d * g * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx rhs = h * ufl.inner(f, v) * ufl.dx bilinear_form = fem.form(a) linear_form = fem.form(rhs) # Generate reference matrices A_org = fem.petsc.assemble_matrix(bilinear_form) A_org.assemble() L_org = fem.petsc.assemble_vector(linear_form) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # Create multipoint constraint def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = { l2b([1, 0]): {l2b([0, 1]): 0.43, l2b([1, 1]): 0.11}, l2b([0, 0]): {l2b(master_point): 0.69}, } mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_general_constraint(s_m_c) mpc.finalize() A = assemble_matrix(bilinear_form, mpc) b = assemble_vector(linear_form, mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) solver = PETSc.KSP().create(mesh.comm) solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) solver.setOperators(A) # Solve uh = fem.Function(mpc.function_space) uh.x.array[:] = 0 solver.solve(b, uh.x.petsc_vec) uh.x.scatter_forward() mpc.backsubstitution(uh) root = 0 comm = mesh.comm with Timer("~TEST: Compare"): dolfinx_mpc.utils.compare_mpc_lhs(A_org, A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(L_org, b, mpc, root=root) # Gather LHS, RHS and solution on one process is_complex = np.issubdtype(default_scalar_type, np.complexfloating) # type: ignore scipy_dtype = np.complex128 if is_complex else np.float64 A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(uh.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T.astype(scipy_dtype) * A_csr.astype(scipy_dtype) * K.astype(scipy_dtype) reduced_L = K.T.astype(scipy_dtype) @ L_np.astype(scipy_dtype) # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K.astype(scipy_dtype) @ d nt.assert_allclose( uh_numpy.astype(u_mpc.dtype), u_mpc, rtol=500 * np.finfo(default_scalar_type).resolution, ) list_timings(comm, [TimingType.wall]) @pytest.mark.parametrize("master_point", [[1, 1], [0, 1]]) def test_linearproblem(master_point): # Create mesh and function space mesh = create_unit_square(MPI.COMM_WORLD, 3, 5) V = fem.functionspace(mesh, ("Lagrange", 1)) # Solve Problem without MPC for reference u = ufl.TrialFunction(V) v = ufl.TestFunction(V) d = fem.Constant(mesh, default_scalar_type(1.5)) c = fem.Constant(mesh, default_scalar_type(2)) x = ufl.SpatialCoordinate(mesh) f = c * ufl.sin(2 * ufl.pi * x[0]) * ufl.sin(ufl.pi * x[1]) g = fem.Function(V) g.interpolate(lambda x: np.sin(x[0]) * x[1]) h = fem.Function(V) h.interpolate(lambda x: 2 + x[1] * x[0]) a = d * g * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx rhs = h * ufl.inner(f, v) * ufl.dx # Generate reference matrices A_org = fem.petsc.assemble_matrix(fem.form(a)) A_org.assemble() L_org = fem.petsc.assemble_vector(fem.form(rhs)) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # Create multipoint constraint def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = { l2b([1, 0]): {l2b([0, 1]): 0.43, l2b([1, 1]): 0.11}, l2b([0, 0]): {l2b(master_point): 0.69}, } mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_general_constraint(s_m_c) mpc.finalize() problem = dolfinx_mpc.LinearProblem(a, rhs, mpc, bcs=[], petsc_options={"ksp_type": "preonly", "pc_type": "lu"}) uh = problem.solve() root = 0 comm = mesh.comm with Timer("~TEST: Compare"): # Gather LHS, RHS and solution on one process is_complex = np.issubdtype(default_scalar_type, np.complexfloating) # type: ignore scipy_dtype = np.complex128 if is_complex else np.float64 A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(uh.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T.astype(scipy_dtype) * A_csr.astype(scipy_dtype) * K.astype(scipy_dtype) reduced_L = K.T.astype(scipy_dtype) @ L_np.astype(scipy_dtype) # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K.astype(scipy_dtype) @ d nt.assert_allclose( uh_numpy.astype(u_mpc.dtype), u_mpc, rtol=500 * np.finfo(default_scalar_type).resolution, ) list_timings(comm, [TimingType.wall]) dolfinx_mpc-0.9.1/python/tests/test_nonlinear_assembly.py000066400000000000000000000210421476141270300240070ustar00rootroot00000000000000# Copyright (C) 2022 Nathan Sime # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import basix import dolfinx import dolfinx.fem.petsc import dolfinx.la as _la import dolfinx.nls.petsc import numpy as np import pytest import ufl import dolfinx_mpc class NonlinearMPCProblem(dolfinx.fem.petsc.NonlinearProblem): def __init__(self, F, u, mpc, bcs=[], J=None, form_compiler_options={}, jit_options={}): self.mpc = mpc super().__init__(F, u, bcs=bcs, J=J, form_compiler_options=form_compiler_options, jit_options=jit_options) def F(self, x: PETSc.Vec, F: PETSc.Vec): # type: ignore with F.localForm() as F_local: F_local.set(0.0) dolfinx_mpc.assemble_vector(self._L, self.mpc, b=F) # Apply boundary condition dolfinx_mpc.apply_lifting( F, [self._a], bcs=[self.bcs], constraint=self.mpc, x0=[x], scale=dolfinx.default_scalar_type(-1.0), ) F.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) # type: ignore dolfinx.fem.petsc.set_bc(F, self.bcs, x, -1.0) def J(self, x: PETSc.Vec, A: PETSc.Mat): # type: ignore A.zeroEntries() dolfinx_mpc.assemble_matrix(self._a, self.mpc, bcs=self.bcs, A=A) A.assemble() class NewtonSolverMPC(dolfinx.cpp.nls.petsc.NewtonSolver): def __init__( self, comm: MPI.Intracomm, problem: NonlinearMPCProblem, mpc: dolfinx_mpc.MultiPointConstraint, ): """A Newton solver for non-linear MPC problems.""" super().__init__(comm) self.mpc = mpc self.u_mpc = dolfinx.fem.Function(mpc.function_space) # Create matrix and vector to be used for assembly of the non-linear # MPC problem self._A = dolfinx_mpc.cpp.mpc.create_matrix(problem.a._cpp_object, mpc._cpp_object) self._b = _la.create_petsc_vector(mpc.function_space.dofmap.index_map, mpc.function_space.dofmap.index_map_bs) self.setF(problem.F, self._b) self.setJ(problem.J, self._A) self.set_form(problem.form) self.set_update(self.update) def update(self, solver: dolfinx.nls.petsc.NewtonSolver, dx: PETSc.Vec, x: PETSc.Vec): # type: ignore # We need to use a vector created on the MPC's space to update ghosts self.u_mpc.x.petsc_vec.array = x.array_r self.u_mpc.x.petsc_vec.axpy(-1.0, dx) self.u_mpc.x.petsc_vec.ghostUpdate( addv=PETSc.InsertMode.INSERT, # type: ignore mode=PETSc.ScatterMode.FORWARD, # type: ignore ) # type: ignore self.mpc.homogenize(self.u_mpc) self.mpc.backsubstitution(self.u_mpc) x.array = self.u_mpc.x.petsc_vec.array_r x.ghostUpdate( addv=PETSc.InsertMode.INSERT, # type: ignore mode=PETSc.ScatterMode.FORWARD, # type: ignore ) # type: ignore def solve(self, u: dolfinx.fem.Function): """Solve non-linear problem into function u. Returns the number of iterations and if the solver converged.""" n, converged = super().solve(u.x.petsc_vec) u.x.scatter_forward() return n, converged @property def A(self) -> PETSc.Mat: # type: ignore """Jacobian matrix""" return self._A @property def b(self) -> PETSc.Vec: # type: ignore """Residual vector""" return self._b @pytest.mark.skipif( np.issubdtype(dolfinx.default_scalar_type, np.complexfloating), reason="This test does not work in complex mode.", ) @pytest.mark.parametrize("poly_order", [1, 2, 3]) def test_nonlinear_poisson(poly_order): # Solve a standard Poisson problem with known solution which has # rotational symmetry of pi/2 at (x, y) = (0.5, 0.5). Therefore we may # impose MPCs on those DoFs which lie on the symmetry plane(s) and test # our numerical approximation. We do not impose any constraints at the # rotationally degenerate point (x, y) = (0.5, 0.5). N_vals = np.array([4, 8, 16], dtype=np.int32) l2_error = np.zeros_like(N_vals, dtype=np.double) for run_no, N in enumerate(N_vals): mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, N, N) V = dolfinx.fem.functionspace(mesh, ("Lagrange", poly_order)) u_bc = dolfinx.fem.Function(V) u_bc.x.array[:] = 0.0 mesh.topology.create_connectivity(mesh.topology.dim - 1, mesh.topology.dim) facets = dolfinx.mesh.exterior_facet_indices(mesh.topology) topological_dofs = dolfinx.fem.locate_dofs_topological(V, 1, facets) zero = np.array(0, dtype=dolfinx.default_scalar_type) bc = dolfinx.fem.dirichletbc(zero, topological_dofs, V) bcs = [bc] # Define variational problem u = dolfinx.fem.Function(V) v = ufl.TestFunction(V) x = ufl.SpatialCoordinate(mesh) u_soln = ufl.sin(ufl.pi * x[0]) * ufl.sin(ufl.pi * x[1]) f = -ufl.div((1 + u_soln**2) * ufl.grad(u_soln)) F = ufl.inner((1 + u**2) * ufl.grad(u), ufl.grad(v)) * ufl.dx - ufl.inner(f, v) * ufl.dx J = ufl.derivative(F, u) # -- Impose the pi/2 rotational symmetry of the solution as a constraint, # -- except at the centre DoF def periodic_boundary(x): eps = 1000 * np.finfo(x.dtype).resolution return np.isclose(x[0], 0.5, atol=eps) & ((x[1] < 0.5 - eps) | (x[1] > 0.5 + eps)) def periodic_relation(x): out_x = np.zeros_like(x) out_x[0] = x[1] out_x[1] = x[0] out_x[2] = x[2] return out_x mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_periodic_constraint_geometrical(V, periodic_boundary, periodic_relation, bcs) mpc.finalize() # Sanity check that the MPC class has some constraints to impose num_slaves_global = mesh.comm.allreduce(len(mpc.slaves), op=MPI.SUM) num_masters_global = mesh.comm.allreduce(len(mpc.masters.array), op=MPI.SUM) assert num_slaves_global > 0 assert num_masters_global == num_slaves_global problem = NonlinearMPCProblem(F, u, mpc, bcs=bcs, J=J) solver = NewtonSolverMPC(mesh.comm, problem, mpc) solver.atol = 1e1 * np.finfo(u.x.array.dtype).resolution solver.rtol = 1e1 * np.finfo(u.x.array.dtype).resolution # Ensure the solver works with nonzero initial guess u.interpolate(lambda x: x[0] ** 2 * x[1] ** 2) solver.solve(u) l2_error_local = dolfinx.fem.assemble_scalar(dolfinx.fem.form((u - u_soln) ** 2 * ufl.dx)) l2_error_global = mesh.comm.allreduce(l2_error_local, op=MPI.SUM) l2_error[run_no] = l2_error_global**0.5 rates = np.log(l2_error[:-1] / l2_error[1:]) / np.log(2.0) assert np.all(rates > poly_order + 0.9) @pytest.mark.parametrize("tensor_order", [0, 1, 2]) @pytest.mark.parametrize("poly_order", [1, 2, 3]) def test_homogenize(tensor_order, poly_order): mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 8, 8) if tensor_order == 0: shape = () elif tensor_order == 1: shape = (mesh.geometry.dim,) elif tensor_order == 2: shape = (mesh.geometry.dim, mesh.geometry.dim) else: pytest.xfail("Unknown tensor order") cellname = mesh.ufl_cell().cellname() el = basix.ufl.element(basix.ElementFamily.P, cellname, poly_order, shape=shape, dtype=mesh.geometry.x.dtype) V = dolfinx.fem.functionspace(mesh, el) def periodic_boundary(x): return np.isclose(x[0], 0.0) def periodic_relation(x): out_x = np.zeros(x.shape) out_x[0] = 1.0 - x[0] out_x[1] = x[1] out_x[2] = x[2] return out_x mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_periodic_constraint_geometrical(V, periodic_boundary, periodic_relation, []) mpc.finalize() # Sanity check that the MPC class has some constraints to impose num_slaves_global = mesh.comm.allreduce(len(mpc.slaves), op=MPI.SUM) assert num_slaves_global > 0 u = dolfinx.fem.Function(V) u.x.petsc_vec.set(1.0) assert np.isclose(u.x.petsc_vec.min()[1], u.x.petsc_vec.max()[1]) assert np.isclose(u.x.petsc_vec.array_r[0], 1.0) mpc.homogenize(u) with u.x.petsc_vec.localForm() as u_: for i in range(V.dofmap.index_map.size_local * V.dofmap.index_map_bs): if i in mpc.slaves: assert np.isclose(u_.array_r[i], 0.0) else: assert np.isclose(u_.array_r[i], 1.0) u.x.petsc_vec.destroy() dolfinx_mpc-0.9.1/python/tests/test_rectangular_assembly.py000066400000000000000000000164761476141270300243500ustar00rootroot00000000000000# Copyright (C) 2022 Nathan Sime # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import basix import dolfinx import dolfinx.fem import dolfinx.mesh import numpy as np import pytest import ufl import dolfinx_mpc import dolfinx_mpc.utils @pytest.mark.parametrize("cell_type", (dolfinx.cpp.mesh.CellType.triangle, dolfinx.cpp.mesh.CellType.quadrilateral)) @pytest.mark.parametrize("ghost_mode", (dolfinx.cpp.mesh.GhostMode.none, dolfinx.cpp.mesh.GhostMode.shared_facet)) def test_mixed_element(cell_type, ghost_mode): N = 4 mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, N, N, cell_type=cell_type, ghost_mode=ghost_mode) # Inlet velocity Dirichlet BC bc_facets = dolfinx.mesh.locate_entities_boundary( mesh, mesh.topology.dim - 1, lambda x: np.isclose(x[0], 0.0, atol=500 * np.finfo(x.dtype).resolution), ) other_facets = dolfinx.mesh.locate_entities_boundary( mesh, mesh.topology.dim - 1, lambda x: np.isclose(x[0], 1.0, atol=500 * np.finfo(x.dtype).resolution), ) arg_sort = np.argsort(other_facets) mt = dolfinx.mesh.meshtags(mesh, mesh.topology.dim - 1, other_facets[arg_sort], np.full_like(other_facets, 1)) # Rotate the mesh to induce more interesting slip BCs th = np.pi / 4.0 rot = np.array([[np.cos(th), -np.sin(th)], [np.sin(th), np.cos(th)]]) gdim = mesh.geometry.dim mesh.geometry.x[:, :gdim] = (rot @ mesh.geometry.x[:, :gdim].T).T # Create the function space cellname = mesh.ufl_cell().cellname() Ve = basix.ufl.element( basix.ElementFamily.P, cellname, 2, shape=(mesh.geometry.dim,), dtype=dolfinx.default_real_type ) Qe = basix.ufl.element(basix.ElementFamily.P, cellname, 1, dtype=dolfinx.default_real_type) V = dolfinx.fem.functionspace(mesh, Ve) Q = dolfinx.fem.functionspace(mesh, Qe) W = dolfinx.fem.functionspace(mesh, basix.ufl.mixed_element([Ve, Qe])) inlet_velocity = dolfinx.fem.Function(V) inlet_velocity.interpolate( lambda x: np.zeros((mesh.geometry.dim, x[0].shape[0]), dtype=dolfinx.default_scalar_type) ) inlet_velocity.x.scatter_forward() # -- Nested assembly dofs = dolfinx.fem.locate_dofs_topological(V, 1, bc_facets) bc1 = dolfinx.fem.dirichletbc(inlet_velocity, dofs) # Collect Dirichlet boundary conditions bcs = [bc1] mpc_v = dolfinx_mpc.MultiPointConstraint(V) n_approx = dolfinx_mpc.utils.create_normal_approximation(V, mt, 1) mpc_v.create_slip_constraint(V, (mt, 1), n_approx, bcs=bcs) mpc_v.finalize() mpc_q = dolfinx_mpc.MultiPointConstraint(Q) mpc_q.finalize() f = dolfinx.fem.Constant(mesh, dolfinx.default_scalar_type((0, 0))) (u, p) = ufl.TrialFunction(V), ufl.TrialFunction(Q) (v, q) = ufl.TestFunction(V), ufl.TestFunction(Q) a00 = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx a01 = -ufl.inner(p, ufl.div(v)) * ufl.dx a10 = -ufl.inner(ufl.div(u), q) * ufl.dx a11 = None L0 = ufl.inner(f, v) * ufl.dx L1 = ufl.inner(dolfinx.fem.Constant(mesh, dolfinx.default_scalar_type(0.0)), q) * ufl.dx n = ufl.FacetNormal(mesh) g_tau = ufl.as_vector((0.0, 0.0)) ds = ufl.Measure("ds", domain=mesh, subdomain_data=mt, subdomain_id=1) a00 -= ufl.inner(ufl.outer(n, n) * ufl.dot(ufl.grad(u), n), v) * ds a01 -= ufl.inner(ufl.outer(n, n) * ufl.dot(-p * ufl.Identity(u.ufl_shape[0]), n), v) * ds L0 += ufl.inner(g_tau, v) * ds a_nest = dolfinx.fem.form(((a00, a01), (a10, a11))) L_nest = dolfinx.fem.form((L0, L1)) # Assemble MPC nest matrix A_nest = dolfinx_mpc.create_matrix_nest(a_nest, [mpc_v, mpc_q]) dolfinx_mpc.assemble_matrix_nest(A_nest, a_nest, [mpc_v, mpc_q], bcs) A_nest.assemble() # Assemble original nest matrix A_org_nest = dolfinx.fem.petsc.assemble_matrix_nest(a_nest, bcs) A_org_nest.assemble() # MPC nested rhs b_nest = dolfinx_mpc.create_vector_nest(L_nest, [mpc_v, mpc_q]) dolfinx_mpc.assemble_vector_nest(b_nest, L_nest, [mpc_v, mpc_q]) dolfinx.fem.petsc.apply_lifting_nest(b_nest, a_nest, bcs) for b_sub in b_nest.getNestSubVecs(): b_sub.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) bcs0 = dolfinx.fem.bcs_by_block(dolfinx.fem.extract_function_spaces(L_nest), bcs) dolfinx.fem.petsc.set_bc_nest(b_nest, bcs0) # Original dolfinx rhs b_org_nest = dolfinx.fem.petsc.assemble_vector_nest(L_nest) dolfinx.fem.petsc.apply_lifting_nest(b_org_nest, a_nest, bcs) for b_sub in b_org_nest.getNestSubVecs(): b_sub.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) dolfinx.fem.petsc.set_bc_nest(b_org_nest, bcs0) # -- Monolithic assembly dofs = dolfinx.fem.locate_dofs_topological((W.sub(0), V), 1, bc_facets) bc1 = dolfinx.fem.dirichletbc(inlet_velocity, dofs, W.sub(0)) bcs = [bc1] V, _ = W.sub(0).collapse() mpc_vq = dolfinx_mpc.MultiPointConstraint(W) n_approx = dolfinx_mpc.utils.create_normal_approximation(V, mt, 1) mpc_vq.create_slip_constraint(W.sub(0), (mt, 1), n_approx, bcs=bcs) mpc_vq.finalize() f = dolfinx.fem.Constant(mesh, dolfinx.default_scalar_type((0, 0))) (u, p) = ufl.TrialFunctions(W) (v, q) = ufl.TestFunctions(W) a = ( ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx - ufl.inner(p, ufl.div(v)) * ufl.dx - ufl.inner(ufl.div(u), q) * ufl.dx ) L = ufl.inner(f, v) * ufl.dx + ufl.inner(dolfinx.fem.Constant(mesh, dolfinx.default_scalar_type(0.0)), q) * ufl.dx # No prescribed shear stress n = ufl.FacetNormal(mesh) g_tau = ufl.as_vector((0.0, 0.0)) ds = ufl.Measure("ds", domain=mesh, subdomain_data=mt, subdomain_id=1) # Terms due to slip condition # Explained in for instance: https://arxiv.org/pdf/2001.10639.pdf a -= ufl.inner(ufl.outer(n, n) * ufl.dot(ufl.grad(u), n), v) * ds a -= ufl.inner(ufl.outer(n, n) * ufl.dot(-p * ufl.Identity(u.ufl_shape[0]), n), v) * ds L += ufl.inner(g_tau, v) * ds a, L = dolfinx.fem.form(a), dolfinx.fem.form(L) # Assemble LHS matrix and RHS vector A = dolfinx_mpc.assemble_matrix(a, mpc_vq, bcs) A.assemble() A_org = dolfinx.fem.petsc.assemble_matrix(a, bcs) A_org.assemble() b = dolfinx_mpc.assemble_vector(L, mpc_vq) b_org = dolfinx.fem.petsc.assemble_vector(L) # Set Dirichlet boundary condition values in the RHS dolfinx_mpc.apply_lifting(b, [a], [bcs], mpc_vq) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) dolfinx.fem.petsc.set_bc(b, bcs) dolfinx.fem.petsc.apply_lifting(b_org, [a], [bcs]) b_org.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) dolfinx.fem.petsc.set_bc(b_org, bcs) # -- Verification def nest_matrix_norm(A): assert A.getType() == "nest" nrows, ncols = A.getNestSize() sub_A = [A.getNestSubMatrix(row, col) for row in range(nrows) for col in range(ncols)] return sum(map(lambda A_: A_.norm() ** 2 if A_ else 0.0, sub_A)) ** 0.5 # -- Ensure monolithic and nest matrices are the same assert np.isclose(nest_matrix_norm(A_nest), A.norm()) for b_sub in b_nest.getNestSubVecs(): b_sub.destroy() b_nest.destroy() for b_sub in b_org_nest.getNestSubVecs(): b_sub.destroy() b_org_nest.destroy() dolfinx_mpc-0.9.1/python/tests/test_surface_integral.py000066400000000000000000000175221476141270300234500ustar00rootroot00000000000000# Copyright (C) 2020-2021 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import dolfinx.fem as fem import numpy as np import numpy.testing as nt import pytest import scipy.sparse.linalg import ufl from dolfinx import default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.mesh import create_unit_square, locate_entities_boundary, meshtags import dolfinx_mpc import dolfinx_mpc.utils from dolfinx_mpc.utils import get_assemblers # noqa: F401 @pytest.mark.parametrize("get_assemblers", ["C++", "numba"], indirect=True) def test_surface_integrals(get_assemblers): # noqa: F811 assemble_matrix, assemble_vector = get_assemblers N = 4 mesh = create_unit_square(MPI.COMM_WORLD, N, N) V = fem.functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) # Fixed Dirichlet BC on the left wall def left_wall(x): return np.isclose(x[0], 0, atol=100 * np.finfo(x.dtype).resolution) fdim = mesh.topology.dim - 1 left_facets = locate_entities_boundary(mesh, fdim, left_wall) bc_dofs = fem.locate_dofs_topological(V, 1, left_facets) u_bc = fem.Function(V) u_bc.x.array[:] = 0 bc = fem.dirichletbc(u_bc, bc_dofs) bcs = [bc] # Traction on top of domain def top(x): return np.isclose(x[1], 1, atol=100 * np.finfo(x.dtype).resolution) top_facets = locate_entities_boundary(mesh, 1, top) arg_sort = np.argsort(top_facets) mt = meshtags(mesh, fdim, top_facets[arg_sort], np.full(len(top_facets), 3, dtype=np.int32)) ds = ufl.Measure("ds", domain=mesh, subdomain_data=mt, subdomain_id=3) g = fem.Constant(mesh, default_scalar_type((0, -9.81e2))) # Elasticity parameters E = 1.0e2 nu = 0.0 mu = fem.Constant(mesh, default_scalar_type(E / (2.0 * (1.0 + nu)))) lmbda = fem.Constant(mesh, default_scalar_type(E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu)))) # Stress computation def sigma(v): return 2.0 * mu * ufl.sym(ufl.grad(v)) + lmbda * ufl.tr(ufl.sym(ufl.grad(v))) * ufl.Identity(len(v)) # Define variational problem u = ufl.TrialFunction(V) v = ufl.TestFunction(V) a = ufl.inner(sigma(u), ufl.grad(v)) * ufl.dx rhs = ufl.inner(fem.Constant(mesh, default_scalar_type((0, 0))), v) * ufl.dx + ufl.inner(g, v) * ds bilinear_form = fem.form(a) linear_form = fem.form(rhs) # Setup LU solver solver = PETSc.KSP().create(mesh.comm) solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) # Setup multipointconstraint def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = {} for i in range(1, N): s_m_c[l2b([1, i / N])] = {l2b([1, 1]): 0.8} mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_general_constraint(s_m_c, 1, 1) mpc.finalize() with Timer("~TEST: Assemble matrix old"): A = assemble_matrix(bilinear_form, mpc, bcs=bcs) with Timer("~TEST: Assemble vector"): b = assemble_vector(linear_form, mpc) dolfinx_mpc.apply_lifting(b, [bilinear_form], [bcs], mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) fem.petsc.set_bc(b, bcs) solver.setOperators(A) uh = fem.Function(mpc.function_space) uh.x.array[:] = 0 solver.solve(b, uh.x.petsc_vec) uh.x.scatter_forward() mpc.backsubstitution(uh) # Solve the MPC problem using a global transformation matrix # and numpy solvers to get reference values # Generate reference matrices and unconstrained solution A_org = fem.petsc.assemble_matrix(bilinear_form, bcs) A_org.assemble() L_org = fem.petsc.assemble_vector(linear_form) fem.petsc.apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) fem.petsc.set_bc(L_org, bcs) root = 0 comm = mesh.comm with Timer("~TEST: Compare"): # Gather LHS, RHS and solution on one process is_complex = np.issubdtype(default_scalar_type, np.complexfloating) # type: ignore scipy_dtype = np.complex128 if is_complex else np.float64 A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(uh.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T.astype(scipy_dtype) * A_csr.astype(scipy_dtype) * K.astype(scipy_dtype) reduced_L = K.T.astype(scipy_dtype) @ L_np.astype(scipy_dtype) # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K.astype(scipy_dtype) @ d nt.assert_allclose( uh_numpy.astype(u_mpc.dtype), u_mpc, rtol=500 * np.finfo(default_scalar_type).resolution, ) L_org.destroy() b.destroy() A_org.destroy() solver.destroy() list_timings(comm, [TimingType.wall]) @pytest.mark.parametrize("get_assemblers", ["C++", "numba"], indirect=True) def test_surface_integral_dependency(get_assemblers): # noqa: F811 assemble_matrix, assemble_vector = get_assemblers N = 10 mesh = create_unit_square(MPI.COMM_WORLD, N, N) V = fem.functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) def top(x): return np.isclose(x[1], 1) fdim = mesh.topology.dim - 1 top_facets = locate_entities_boundary(mesh, fdim, top) indices = np.array([], dtype=np.intc) values = np.array([], dtype=np.intc) markers = {3: top_facets} for key in markers.keys(): indices = np.append(indices, markers[key]) values = np.append(values, np.full(len(markers[key]), key, dtype=np.intc)) sort = np.argsort(indices) mt = meshtags( mesh, mesh.topology.dim - 1, np.array(indices[sort], dtype=np.intc), np.array(values[sort], dtype=np.intc), ) ds = ufl.Measure("ds", domain=mesh, subdomain_data=mt) g = fem.Constant(mesh, default_scalar_type((2, 1))) h = fem.Constant(mesh, default_scalar_type((3, 2))) # Define variational problem u = ufl.TrialFunction(V) v = ufl.TestFunction(V) a = ufl.inner(u, v) * ds(3) + ufl.inner(ufl.grad(u), ufl.grad(v)) * ds rhs = ufl.inner(g, v) * ds + ufl.inner(h, v) * ds(3) bilinear_form = fem.form(a) linear_form = fem.form(rhs) # Create multipoint constraint and assemble system def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = {} for i in range(1, N): s_m_c[l2b([1, i / N])] = {l2b([1, 1]): 0.3} mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_general_constraint(s_m_c, 1, 1) mpc.finalize() with Timer("~TEST: Assemble matrix"): A = assemble_matrix(bilinear_form, mpc) with Timer("~TEST: Assemble vector"): b = assemble_vector(linear_form, mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # Solve the MPC problem using a global transformation matrix # and numpy solvers to get reference values # Generate reference matrices and unconstrained solution A_org = fem.petsc.assemble_matrix(bilinear_form) A_org.assemble() L_org = fem.petsc.assemble_vector(linear_form) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) root = 0 comm = mesh.comm with Timer("~TEST: Compare"): dolfinx_mpc.utils.compare_mpc_lhs(A_org, A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(L_org, b, mpc, root=root) L_org.destroy() b.destroy() A_org.destroy() A.destroy() list_timings(comm, [TimingType.wall]) dolfinx_mpc-0.9.1/python/tests/test_vector_assembly.py000066400000000000000000000041021476141270300233220ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import dolfinx.fem as fem import numpy as np import pytest import ufl from dolfinx.common import Timer, TimingType, list_timings from dolfinx.mesh import CellType, create_unit_square import dolfinx_mpc import dolfinx_mpc.utils from dolfinx_mpc.utils import get_assemblers # noqa: F401 @pytest.mark.parametrize("get_assemblers", ["C++", "numba"], indirect=True) @pytest.mark.parametrize("master_point", [[1, 1], [0, 1]]) @pytest.mark.parametrize("degree", range(1, 4)) @pytest.mark.parametrize("celltype", [CellType.quadrilateral, CellType.triangle]) def test_mpc_assembly(master_point, degree, celltype, get_assemblers): # noqa: F811 _, assemble_vector = get_assemblers # Create mesh and function space mesh = create_unit_square(MPI.COMM_WORLD, 3, 5, celltype) V = fem.functionspace(mesh, ("Lagrange", degree)) # Generate reference vector v = ufl.TestFunction(V) x = ufl.SpatialCoordinate(mesh) f = ufl.sin(2 * ufl.pi * x[0]) * ufl.sin(ufl.pi * x[1]) rhs = ufl.inner(f, v) * ufl.dx linear_form = fem.form(rhs) def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = { l2b([1, 0]): {l2b([0, 1]): 0.43, l2b([1, 1]): 0.11}, l2b([0, 0]): {l2b(master_point): 0.69}, } mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_general_constraint(s_m_c) mpc.finalize() b = assemble_vector(linear_form, mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # Reduce system with global matrix K after assembly L_org = fem.petsc.assemble_vector(linear_form) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) root = 0 comm = mesh.comm with Timer("~TEST: Compare"): dolfinx_mpc.utils.compare_mpc_rhs(L_org, b, mpc, root=root) list_timings(comm, [TimingType.wall]) b.destroy() L_org.destroy() dolfinx_mpc-0.9.1/python/tests/test_vector_poisson.py000066400000000000000000000112021476141270300231740ustar00rootroot00000000000000# Copyright (C) 2020 Jørgen S. Dokken # # This file is part of DOLFINX_MPC # # SPDX-License-Identifier: MIT from __future__ import annotations from mpi4py import MPI from petsc4py import PETSc import dolfinx.fem as fem import numpy as np import numpy.testing as nt import pytest import scipy.sparse.linalg import ufl from dolfinx import default_scalar_type from dolfinx.common import Timer, TimingType, list_timings from dolfinx.mesh import create_unit_square import dolfinx_mpc import dolfinx_mpc.utils from dolfinx_mpc.utils import get_assemblers # noqa: F401 @pytest.mark.parametrize("get_assemblers", ["C++", "numba"], indirect=True) @pytest.mark.parametrize("Nx", [4]) @pytest.mark.parametrize("Ny", [2, 3]) @pytest.mark.parametrize("slave_space", [0, 1]) @pytest.mark.parametrize("master_space", [0, 1]) def test_vector_possion(Nx, Ny, slave_space, master_space, get_assemblers): # noqa: F811 assemble_matrix, assemble_vector = get_assemblers # Create mesh and function space mesh = create_unit_square(MPI.COMM_WORLD, Nx, Ny) V = fem.functionspace(mesh, ("Lagrange", 1, (mesh.geometry.dim,))) def boundary(x): return np.isclose(x.T, [0, 0, 0], atol=500 * np.finfo(x.dtype).resolution).all(axis=1) # Define boundary conditions (HAS TO BE NON-MASTER NODES) u_bc = fem.Function(V) with u_bc.x.petsc_vec.localForm() as u_local: u_local.set(0.0) u_bc.x.petsc_vec.destroy() bdofsV = fem.locate_dofs_geometrical(V, boundary) bc = fem.dirichletbc(u_bc, bdofsV) bcs = [bc] # Define variational problem u = ufl.TrialFunction(V) v = ufl.TestFunction(V) x = ufl.SpatialCoordinate(mesh) f = ufl.as_vector((-5 * x[1], 7 * x[0])) a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx rhs = ufl.inner(f, v) * ufl.dx bilinear_form = fem.form(a) linear_form = fem.form(rhs) # Setup LU solver solver = PETSc.KSP().create(mesh.comm) solver.setType(PETSc.KSP.Type.PREONLY) pc = solver.getPC() pc.setType(PETSc.PC.Type.LU) pc.setFactorSolverType("mumps") # Create multipoint constraint def l2b(li): return np.array(li, dtype=mesh.geometry.x.dtype).tobytes() s_m_c = {l2b([1, 0]): {l2b([1, 1]): 0.1, l2b([0.5, 1]): 0.3}} mpc = dolfinx_mpc.MultiPointConstraint(V) mpc.create_general_constraint(s_m_c, slave_space, master_space) mpc.finalize() with Timer("~TEST: Assemble matrix"): A = assemble_matrix(bilinear_form, mpc, bcs=bcs) with Timer("~TEST: Assemble vector"): b = dolfinx_mpc.assemble_vector(linear_form, mpc) dolfinx_mpc.apply_lifting(b, [bilinear_form], [bcs], mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) fem.petsc.set_bc(b, bcs) solver.setOperators(A) uh = fem.Function(mpc.function_space) uh.x.array[:] = 0 solver.solve(b, uh.x.petsc_vec) uh.x.scatter_forward() mpc.backsubstitution(uh) # Generate reference matrices for unconstrained problem A_org = fem.petsc.assemble_matrix(bilinear_form, bcs) A_org.assemble() L_org = fem.petsc.assemble_vector(linear_form) fem.petsc.apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) fem.petsc.set_bc(L_org, bcs) root = 0 comm = mesh.comm with Timer("~TEST: Compare"): dolfinx_mpc.utils.compare_mpc_lhs(A_org, A, mpc, root=root) dolfinx_mpc.utils.compare_mpc_rhs(L_org, b, mpc, root=root) # Gather LHS, RHS and solution on one process is_complex = np.issubdtype(default_scalar_type, np.complexfloating) # type: ignore scipy_dtype = np.complex128 if is_complex else np.float64 A_csr = dolfinx_mpc.utils.gather_PETScMatrix(A_org, root=root) K = dolfinx_mpc.utils.gather_transformation_matrix(mpc, root=root) L_np = dolfinx_mpc.utils.gather_PETScVector(L_org, root=root) u_mpc = dolfinx_mpc.utils.gather_PETScVector(uh.x.petsc_vec, root=root) if MPI.COMM_WORLD.rank == root: KTAK = K.T.astype(scipy_dtype) * A_csr.astype(scipy_dtype) * K.astype(scipy_dtype) reduced_L = K.T.astype(scipy_dtype) @ L_np.astype(scipy_dtype) # Solve linear system d = scipy.sparse.linalg.spsolve(KTAK, reduced_L) # Back substitution to full solution vector uh_numpy = K.astype(scipy_dtype) @ d nt.assert_allclose( uh_numpy.astype(u_mpc.dtype), u_mpc, rtol=500 * np.finfo(default_scalar_type).resolution, ) b.destroy() L_org.destroy() solver.destroy() list_timings(comm, [TimingType.wall]) dolfinx_mpc-0.9.1/sonar-project.properties000066400000000000000000000011121476141270300207270ustar00rootroot00000000000000sonar.projectKey=jorgensd_dolfinx_mpc sonar.organization=jorgensd # This is the name and version displayed in the SonarCloud UI. #sonar.projectName=dolfinx_mpc #sonar.projectVersion=1.0 # Path is relative to the sonar-project.properties file. Replace "\" by "/" on Windows. #sonar.sources=. # Set python version sonar.python.version=3.7, 3.8, 3.9 sonar.cfamily.threads=2 # Disable cahce until I have time to figure out how to use it sonar.cfamily.cache.enabled=false # Python coverage report sonar.core.codeCoveragePlugin=cobertura sonar.python.coverage.reportPaths=*coverage*.xml dolfinx_mpc-0.9.1/tox.ini000066400000000000000000000000551476141270300153430ustar00rootroot00000000000000[flake8] max-line-length = 120 ignore = W503