pax_global_header00006660000000000000000000000064150002501660014505gustar00rootroot0000000000000052 comment=4da99d392f649670fcf4f50bd70aa451cd69d4aa azure-vm-utils-0.6.0/000077500000000000000000000000001500025016600144145ustar00rootroot00000000000000azure-vm-utils-0.6.0/.clang-format000066400000000000000000000000301500025016600167600ustar00rootroot00000000000000BasedOnStyle: Microsoft azure-vm-utils-0.6.0/.github/000077500000000000000000000000001500025016600157545ustar00rootroot00000000000000azure-vm-utils-0.6.0/.github/workflows/000077500000000000000000000000001500025016600200115ustar00rootroot00000000000000azure-vm-utils-0.6.0/.github/workflows/codeql.yml000066400000000000000000000103671500025016600220120ustar00rootroot00000000000000# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ "main" ] pull_request: schedule: - cron: '44 19 * * 3' jobs: analyze: name: Analyze (${{ matrix.language }}) # Runner size impacts CodeQL analysis time. To learn more, please see: # - https://gh.io/recommended-hardware-resources-for-running-codeql # - https://gh.io/supported-runners-and-hardware-resources # - https://gh.io/using-larger-runners (GitHub.com only) # Consider using larger runners or machines with greater resources for possible analysis time improvements. runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} permissions: # required for all workflows security-events: write # required to fetch internal or private CodeQL packs packages: read # only required for workflows in private repositories actions: read contents: read strategy: fail-fast: false matrix: include: - language: c-cpp build-mode: autobuild # CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' # Use `c-cpp` to analyze code written in C, C++ or both # Use 'java-kotlin' to analyze code written in Java, Kotlin or both # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # queries: security-extended,security-and-quality # If the analyze step fails for one of the languages you are analyzing with # "We were unable to automatically build your code", modify the matrix above # to set the build mode to "manual" for that language. Then modify this step # to build your code. # â„šī¸ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - if: matrix.build-mode == 'manual' shell: bash run: | echo 'If you are using a "manual" build mode for one or more of the' \ 'languages you are analyzing, replace this with the commands to build' \ 'your code, for example:' echo ' make bootstrap' echo ' make release' exit 1 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 with: category: "/language:${{matrix.language}}" azure-vm-utils-0.6.0/.github/workflows/debs.yml000066400000000000000000000032321500025016600214510ustar00rootroot00000000000000name: Debian Packaging CI on: [push, pull_request] jobs: build: runs-on: ubuntu-latest strategy: fail-fast: false matrix: container: - ubuntu:20.04 - ubuntu:22.04 - ubuntu:24.04 - debian:12 - debian:sid defaults: run: shell: bash steps: - name: Checkout code uses: actions/checkout@v4 with: ref: ${{ github.event.sha }} fetch-depth: 0 fetch-tags: true - name: Get tags from upstream, if needed if: github.repository != 'Azure/azure-vm-utils' run: | git remote add upstream https://github.com/Azure/azure-vm-utils.git git fetch upstream --tags - name: Build debs run: | ./scripts/build-deb.sh - name: Lintian check run: | lintian --fail-on error,warning out/*.deb - name: Install debs run: | sudo dpkg -i out/*.deb - name: Verify installation run: | set -x dpkg -L azure-vm-utils test -f /usr/lib/systemd/network/01-azure-unmanaged-sriov.network test -f /usr/lib/udev/rules.d/10-azure-unmanaged-sriov.rules test -f /usr/lib/udev/rules.d/80-azure-disk.rules test -x /usr/sbin/azure-nvme-id test -x /usr/sbin/azure-vm-utils-selftest test -f /usr/share/initramfs-tools/hooks/azure-disk test -f /usr/share/initramfs-tools/hooks/azure-unmanaged-sriov test -f /usr/share/man/man8/azure-nvme-id.8.gz test -f /usr/share/man/man8/azure-vm-utils-selftest.8.gz azure-nvme-id --version azure-vm-utils-selftest --skip-imds-validation --skip-symlink-validation azure-vm-utils-0.6.0/.github/workflows/main.yml000066400000000000000000000056701500025016600214700ustar00rootroot00000000000000name: Main CI on: [push, pull_request] jobs: build: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-20.04, ubuntu-22.04, ubuntu-24.04] defaults: run: shell: bash steps: - name: Checkout code uses: actions/checkout@v4 with: ref: ${{ github.event.sha }} fetch-depth: 0 fetch-tags: true - name: Get tags from upstream, if needed if: github.repository != 'Azure/azure-vm-utils' run: | git remote add upstream https://github.com/Azure/azure-vm-utils.git git fetch upstream --tags - name: Setup run: | sudo apt update sudo apt install gcc pandoc cmake libjson-c-dev -y - name: Build & install project with cmake with tests disabled run: | cmake -B build -S . -DENABLE_TESTS=0 make -C build sudo make -C build install - name: Verify installation run: | set -x test -f /usr/local/lib/systemd/network/01-azure-unmanaged-sriov.network test -f /usr/local/lib/udev/rules.d/10-azure-unmanaged-sriov.rules test -f /usr/local/lib/udev/rules.d/80-azure-disk.rules test -x /usr/local/sbin/azure-nvme-id test -x /usr/local/sbin/azure-vm-utils-selftest test -f /usr/local/share/initramfs-tools/hooks/azure-disk test -f /usr/local/share/initramfs-tools/hooks/azure-unmanaged-sriov test -f /usr/local/share/man/man8/azure-nvme-id.8 test -f /usr/local/share/man/man8/azure-vm-utils-selftest.8 azure-nvme-id --version azure-vm-utils-selftest --skip-imds-validation --skip-symlink-validation - name: Verify manpages can be generated if: matrix.os != 'ubuntu-20.04' run: | make -C build generate-manpages - name: Rebuild with tests enabled and run tests run: | set -x sudo apt install -y libcmocka-dev cppcheck clang-format rm -rf build cmake -B build -S . cd build make # ctest should invoke all tests, but to be sure let's run them individually test -x ./debug_tests ./debug_tests test -x ./identify_disks_tests ./identify_disks_tests test -x ./identify_udev_tests ./identify_udev_tests test -x ./nvme_tests ./nvme_tests ctest --verbose -j - name: Check source formatting with clang-format run: | make -C build check-clang-format || (echo "Run 'make clang-format' to fix formatting issues" && exit 1) - name: Check cppcheck run: | make -C build cppcheck - name: Check python scripts run: | python -m venv venv source venv/bin/activate if [ ${{ matrix.os }} == "ubuntu-20.04" ]; then pip install -r selftest/test-requirements-frozen-py38.txt else pip install -r selftest/test-requirements-frozen.txt fi make -C build python-lint azure-vm-utils-0.6.0/.github/workflows/rpms.yml000066400000000000000000000057301500025016600215220ustar00rootroot00000000000000name: RPM Packaging CI on: [push, pull_request] jobs: build: runs-on: ubuntu-latest strategy: fail-fast: false matrix: container: - fedora:41 - fedora:42 - fedora:rawhide - ghcr.io/almalinux/9-base:latest - ghcr.io/almalinux/8-base:latest - mcr.microsoft.com/cbl-mariner/base/core:2.0 - mcr.microsoft.com/azurelinux/base/core:3.0 container: image: ${{ matrix.container }} options: --user root defaults: run: shell: bash steps: - name: Identify OS run: cat /etc/os-release - name: Install dependencies run: | set -x id case "${{ matrix.container }}" in *almalinux/8*) dnf install -y 'dnf-command(config-manager)' epel-release dnf config-manager --set-enabled powertools dnf install -y python39 alternatives --set python3 /usr/bin/python3.9 /usr/bin/env python3 --version ;; *almalinux/9*) dnf install -y 'dnf-command(config-manager)' dnf config-manager --set-enabled crb ;; *azurelinux*|*cbl-mariner*) export HOME=/root # tdnf requires HOME to be set to /root tdnf install -y awk ca-certificates dnf --verbose ;; *fedora*) dnf install -y awk ;; esac dnf install -y git sudo - name: Checkout code uses: actions/checkout@v4 with: ref: ${{ github.sha }} fetch-depth: 0 fetch-tags: true - name: Get tags from upstream, if needed if: github.repository != 'Azure/azure-vm-utils' run: | git remote add upstream https://github.com/Azure/azure-vm-utils.git git fetch upstream --tags - name: Build RPMs run: | set -x git config --global --add safe.directory "$(pwd)" ./scripts/build-rpm.sh - name: Install rpms run: | rpm -Uvh -i out/*.rpm - name: Verify installation run: | set -x rpm -qa azure-vm-utils test -f /usr/lib/dracut/modules.d/97azure-disk/module-setup.sh test -f /usr/lib/dracut/modules.d/97azure-unmanaged-sriov/module-setup.sh test -f /usr/lib/systemd/network/01-azure-unmanaged-sriov.network test -f /usr/lib/udev/rules.d/10-azure-unmanaged-sriov.rules test -f /usr/lib/udev/rules.d/80-azure-disk.rules test -x /usr/sbin/azure-nvme-id || test -x /usr/bin/azure-nvme-id test -x /usr/sbin/azure-vm-utils-selftest || test -x /usr/bin/azure-vm-utils-selftest test -f /usr/share/man/man8/azure-nvme-id.8.gz test -f /usr/share/man/man8/azure-vm-utils-selftest.8.gz azure-nvme-id --version azure-vm-utils-selftest --skip-imds-validation --skip-symlink-validation azure-vm-utils-0.6.0/.gitignore000066400000000000000000000013661500025016600164120ustar00rootroot00000000000000# Object files *.o *.ko *.obj *.elf # Linker output *.ilk *.map *.exp # Precompiled Headers *.gch *.pch # Libraries *.lib *.a *.la *.lo # Shared objects (inc. Windows DLLs) *.dll *.so *.so.* *.dylib # Executables *.exe *.out *.app *.i*86 *.x86_64 *.hex # Debug files *.dSYM/ *.su *.idb *.pdb # Kernel Module Compile Results *.mod* *.cmd .tmp_versions/ modules.order Module.symvers Mkfile.old dkms.conf # CMake CMakeLists.txt.user CMakeCache.txt CMakeFiles CMakeScripts CPackConfig.cmake CPackSourceConfig.cmake DartConfiguration.tcl Testing Makefile cmake_install.cmake install_manifest.txt compile_commands.json CTestTestfile.cmake _deps # VS Code .vscode # Compiled artifacts azure-nvme-id obj-* /build/ /doc/azure-nvme-id.8 /src/version.h /out/ azure-vm-utils-0.6.0/CMakeLists.txt000066400000000000000000000042771500025016600171660ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.5) project(azure-vm-utils VERSION 0.1.0 LANGUAGES C) if(NOT DEFINED VERSION) execute_process( COMMAND git describe --tags --always --dirty WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE VERSION OUTPUT_STRIP_TRAILING_WHITESPACE ) endif() set(AZURE_NVME_ID_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/sbin" CACHE PATH "azure-nvme-id installation directory") set(DRACUT_MODULES_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib/dracut/modules.d" CACHE PATH "dracut modules installation directory") set(INITRAMFS_HOOKS_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/share/initramfs-tools/hooks" CACHE PATH "initramfs-tools hooks installation directory") set(NETWORKD_CONFIGS_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib/systemd/network" CACHE PATH "networkd configs installation directory") set(UDEV_RULES_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib/udev/rules.d" CACHE PATH "udev rules installation directory") include(cmake/cppcheck.cmake) include(cmake/clangformat.cmake) include(cmake/doc.cmake) include(cmake/initramfs.cmake) include(cmake/networkd.cmake) include(cmake/python.cmake) include(cmake/udev.cmake) include(CTest) enable_testing() find_package(PkgConfig REQUIRED) pkg_check_modules(JSON_C REQUIRED json-c) include_directories( ${CMAKE_SOURCE_DIR}/src ${JSON_C_INCLUDE_DIRS} ) add_compile_options(-Wextra -Wall $<$:-Werror> -std=gnu11 -D_GNU_SOURCE=1) add_executable(azure-nvme-id src/debug.c src/identify_disks.c src/identify_udev.c src/main.c src/nvme.c) target_link_libraries(azure-nvme-id ${JSON_C_LIBRARIES}) configure_file( "${CMAKE_SOURCE_DIR}/src/version.h.in" "${CMAKE_SOURCE_DIR}/src/version.h" @ONLY ) install(TARGETS azure-nvme-id DESTINATION ${AZURE_NVME_ID_INSTALL_DIR}) install(FILES ${CMAKE_SOURCE_DIR}/selftest/selftest.py DESTINATION ${AZURE_NVME_ID_INSTALL_DIR} RENAME azure-vm-utils-selftest PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ) set(CPACK_PROJECT_NAME ${PROJECT_NAME}) set(CPACK_PROJECT_VERSION ${PROJECT_VERSION}) include(CPack) option(ENABLE_TESTS "Enable unit tests" ON) if(ENABLE_TESTS) include(cmake/tests.cmake) endif() azure-vm-utils-0.6.0/CODE_OF_CONDUCT.md000066400000000000000000000006741500025016600172220ustar00rootroot00000000000000# Microsoft Open Source Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). Resources: - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns azure-vm-utils-0.6.0/LICENSE000066400000000000000000000021651500025016600154250ustar00rootroot00000000000000 MIT License Copyright (c) Microsoft Corporation. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE azure-vm-utils-0.6.0/README.md000066400000000000000000000105531500025016600156770ustar00rootroot00000000000000# azure-vm-utils A collection of utilities and udev rules to make the most of the Linux experience on Azure. ## Quick Start To build: ``` cmake . make ``` To install: ``` sudo make install ``` # Executables ## azure-nvme-id `azure-nvme-id` is a utility to help identify Azure NVMe devices. To run: ``` sudo azure-nvme-id ``` To run in udev mode: ``` DEVNAME=/dev/nvme0n1 azure-nvme-id --udev ``` # Rules for udev ## 80-azure-disk.rules Provides helpful symlinks in /dev/disk/azure for local, data, and os disks. # Testing ## unit tests There are unit test executables provided to validate functionality at build-time. They are automatically enabled, but can be disabled with -DENABLE_TESTS=0 passed to cmake. To build without tests: ``` mkdir build && cd build cmake .. -DENABLE_TESTS=0 make ``` To build and execute tests: ``` mkdir build && cd build cmake .. make ctest --output-on-failure ``` Optionally, ctest can be skipped in favor of running tests individually: ``` mkdir build && cd build cmake .. make for test in *_tests; do ./$test; done ``` ## selfcheck selfcheck is provided to validate the runtime environment of a VM. With azure-vm-utils installed in a VM on Azure, simply copy the selfcheck.py executable to the target and execute with sudo. ``` scp selfcheck/selftest.py $ip: && ssh $ip -- sudo ./selftest.py ``` ## test_images To help automate a spread of tests, test_images provides functional testing for a set of pre-existing images, assuming azure-vm-utils is already installed. It depends on az-cli, ssh, and ssh-keygen to create VMs and ssh into them to run the tests. To run tests against marketplace and community images with azure-vm-utils: ``` SELFTEST_AZURE_SUBSCRIPTION= \ SELFTEST_AZURE_LOCATION=eastus2 \ pytest -v selftest ``` To run tests for custom images and vm sizes, the following variables are supported with comma-separated values: - SELFTEST_ARM64_IMAGES - SELFTEST_ARM64_VM_SIZES - SELFTEST_GEN1_IMAGES - SELFTEST_GEN1_VM_SIZES - SELFTEST_GEN2_IMAGES - SELFTEST_GEN2_VM_SIZES For example: ``` SELFTEST_AZURE_SUBSCRIPTION= \ SELFTEST_AZURE_LOCATION=eastus2 \ SELFTEST_GEN2_IMAGES=/my/image1,/my/image2,... \ SELFTEST_GEN2_VM_SIZES=Standard_D2ds_v5,Standard_D2ds_v6,... \ pytest -xvvvs -k test_gen2 ``` For convenience, the default spread of VM sizes is assumed if SELFTEST_{ARM64|GEN1|GEN2}_VM_SIZES is unset. If unable to allocate VM, test is "skipped". Supported environment variables: - SELFTEST_ADMIN_USERNAME: admin name to configure VMs with - SELFTEST_ADMIN_PASSWORD: optional password to enable easy serial-console access - SELFTEST_ARTIFACTS_PATH: directory path to store artifacts under - SELFTEST_AZURE_LOCATION: location to use - SELFTEST_AZURE_SUBSCRIPTION: subscription to use - SELFTEST_HOLD_FAILURES: skip cleanup for failures - SELFTEST_REBOOTS: number of reboots to perform in test (default=50) - SELFTEST_RESOURCE_GROUP: optional RG to use, otherwise one is created # Contributing This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. When you submit a pull request, a CLA bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. azure-vm-utils-0.6.0/SECURITY.md000066400000000000000000000051401500025016600162050ustar00rootroot00000000000000 ## Security Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. ## Reporting Security Issues **Please do not report security vulnerabilities through public GitHub issues.** Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) * Full paths of source file(s) related to the manifestation of the issue * The location of the affected source code (tag/branch/commit or direct URL) * Any special configuration required to reproduce the issue * Step-by-step instructions to reproduce the issue * Proof-of-concept or exploit code (if possible) * Impact of the issue, including how an attacker might exploit the issue This information will help us triage your report more quickly. If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. ## Preferred Languages We prefer all communications to be in English. ## Policy Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). azure-vm-utils-0.6.0/SUPPORT.md000066400000000000000000000023341500025016600161140ustar00rootroot00000000000000# TODO: The maintainer of this repo has not yet edited this file **REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project? - **No CSS support:** Fill out this template with information about how to file issues and get help. - **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps. - **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide. *Then remove this first heading from this SUPPORT.MD file before publishing your repo.* # Support ## How to file issues and get help This project uses GitHub Issues to track bugs and feature requests. Please search the existing issues before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new Issue. For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER CHANNEL. WHERE WILL YOU HELP PEOPLE?**. ## Microsoft Support Policy Support for this **PROJECT or PRODUCT** is limited to the resources listed above. azure-vm-utils-0.6.0/cmake/000077500000000000000000000000001500025016600154745ustar00rootroot00000000000000azure-vm-utils-0.6.0/cmake/clangformat.cmake000066400000000000000000000005321500025016600207730ustar00rootroot00000000000000file(GLOB ALL_SOURCE_FILES src/*.[ch] tests/*.[ch]) add_custom_target(clang-format COMMAND clang-format -i ${ALL_SOURCE_FILES} COMMENT "Running clang-format on source files" ) add_custom_target(check-clang-format COMMAND clang-format --dry-run --Werror ${ALL_SOURCE_FILES} COMMENT "Running clang-format check on source files" ) azure-vm-utils-0.6.0/cmake/cppcheck.cmake000066400000000000000000000004611500025016600202570ustar00rootroot00000000000000file(GLOB ALL_SOURCE_FILES src/*.[ch] tests/*.[ch]) set(CPPCHECK_COMMAND cppcheck --enable=all --suppress=missingIncludeSystem -I${CMAKE_SOURCE_DIR}/src ${ALL_SOURCE_FILES} ) add_custom_target(cppcheck COMMAND ${CPPCHECK_COMMAND} COMMENT "Running cppcheck on source files" ) azure-vm-utils-0.6.0/cmake/doc.cmake000066400000000000000000000026461500025016600172530ustar00rootroot00000000000000function(generate_manpage manpage_name) set(manpage_in "${CMAKE_SOURCE_DIR}/doc/${manpage_name}.8.in") set(manpage_output "${CMAKE_CURRENT_BINARY_DIR}/doc/${manpage_name}.8") configure_file( "${manpage_in}" "${manpage_output}" @ONLY ) set(MANPAGES_OUTPUT ${MANPAGES_OUTPUT} ${manpage_output} PARENT_SCOPE) endfunction() string(TIMESTAMP TODAY "%B %d, %Y") set(PANDOC_GENERATE_COMMAND pandoc --standalone --from markdown --to man --template "${CMAKE_SOURCE_DIR}/doc/pandoc.template" --variable footer="${manpage_name} ${VERSION}" --variable date="${TODAY}" ) set(PANDOC_FIXUPS_COMMAND sed -i 's/f\\[C]/f[CR]/g') add_custom_target( generate-manpages COMMAND ${PANDOC_GENERATE_COMMAND} "${CMAKE_SOURCE_DIR}/doc/azure-nvme-id.md" -o ${CMAKE_SOURCE_DIR}/doc/azure-nvme-id.8.in COMMAND ${PANDOC_FIXUPS_COMMAND} "${CMAKE_SOURCE_DIR}/doc/azure-nvme-id.8.in" COMMAND ${PANDOC_GENERATE_COMMAND} "${CMAKE_SOURCE_DIR}/doc/azure-vm-utils-selftest.md" -o ${CMAKE_SOURCE_DIR}/doc/azure-vm-utils-selftest.8.in COMMAND ${PANDOC_FIXUPS_COMMAND} "${CMAKE_SOURCE_DIR}/doc/azure-vm-utils-selftest.8.in" ) generate_manpage("azure-nvme-id") generate_manpage("azure-vm-utils-selftest") add_custom_target( doc DEPENDS ${MANPAGES_OUTPUT} ) set(MANPAGES_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/share/man") install(FILES ${MANPAGES_OUTPUT} DESTINATION ${MANPAGES_INSTALL_DIR}/man8) azure-vm-utils-0.6.0/cmake/initramfs.cmake000066400000000000000000000024111500025016600204700ustar00rootroot00000000000000find_program(INITRAMFS_TOOLS update-initramfs) find_program(DRACUT dracut) if(INITRAMFS_TOOLS AND INITRAMFS_HOOKS_INSTALL_DIR) message(STATUS "initramfs-tools found, installing initramfs hooks") install(FILES ${CMAKE_SOURCE_DIR}/initramfs/initramfs-tools/hooks/azure-disk ${CMAKE_SOURCE_DIR}/initramfs/initramfs-tools/hooks/azure-unmanaged-sriov DESTINATION ${INITRAMFS_HOOKS_INSTALL_DIR} PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ OWNER_EXECUTE GROUP_EXECUTE WORLD_EXECUTE) elseif(DRACUT AND DRACUT_MODULES_INSTALL_DIR) message(STATUS "dracut found, installing dracut modules") install(FILES ${CMAKE_SOURCE_DIR}/initramfs/dracut/modules.d/97azure-disk/module-setup.sh DESTINATION ${DRACUT_MODULES_INSTALL_DIR}/97azure-disk PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ OWNER_EXECUTE GROUP_EXECUTE WORLD_EXECUTE) install(FILES ${CMAKE_SOURCE_DIR}/initramfs/dracut/modules.d/97azure-unmanaged-sriov/module-setup.sh DESTINATION ${DRACUT_MODULES_INSTALL_DIR}/97azure-unmanaged-sriov PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ OWNER_EXECUTE GROUP_EXECUTE WORLD_EXECUTE) else() message(STATUS "initramfs-tools and dracut not found, skipping installation of azure-disk hook") endif() azure-vm-utils-0.6.0/cmake/networkd.cmake000066400000000000000000000001711500025016600203320ustar00rootroot00000000000000install(FILES ${CMAKE_SOURCE_DIR}/networkd/01-azure-unmanaged-sriov.network DESTINATION ${NETWORKD_CONFIGS_INSTALL_DIR}) azure-vm-utils-0.6.0/cmake/python.cmake000066400000000000000000000012601500025016600200160ustar00rootroot00000000000000file(GLOB PYTHON_SOURCES */*.py) add_custom_target( python-autoformat COMMAND isort ${PYTHON_SOURCES} COMMAND black ${PYTHON_SOURCES} COMMAND autoflake -r --in-place --remove-unused-variables --remove-all-unused-imports --ignore-init-module-imports ${PYTHON_SOURCES} COMMENT "Running autoformatting tools" ) add_custom_target( python-lint COMMAND isort --check-only ${PYTHON_SOURCES} COMMAND black --check --diff ${PYTHON_SOURCES} COMMAND mypy --ignore-missing-imports ${PYTHON_SOURCES} COMMAND flake8 --ignore=W503,E501,E402 ${PYTHON_SOURCES} COMMAND pylint ${PYTHON_SOURCES} COMMENT "Running Python lint checks and formatting checks" ) azure-vm-utils-0.6.0/cmake/tests.cmake000066400000000000000000000033461500025016600176460ustar00rootroot00000000000000find_package(PkgConfig REQUIRED) pkg_check_modules(CMOCKA REQUIRED cmocka) function(add_test_executable test_name) set(multiValueArgs WRAPPED_FUNCTIONS SOURCES CFLAGS) cmake_parse_arguments(TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_executable(${test_name} ${TEST_SOURCES} tests/capture.c) add_test(NAME ${test_name} COMMAND ${test_name}) # LTO may be enabled by packaging, but it must be disabled for --wrap to work. target_compile_options(${test_name} PRIVATE -g -O0 -fno-lto ${CMOCKA_CFLAGS_OTHER} ${TEST_CFLAGS}) target_link_options(${test_name} PRIVATE -flto=n) target_include_directories(${test_name} PRIVATE ${CMOCKA_INCLUDE_DIRS} src tests) target_link_directories(${test_name} PRIVATE ${CMOCKA_LIBRARY_DIRS}) target_link_libraries(${test_name} PRIVATE ${JSON_C_LIBRARIES} ${CMOCKA_LIBRARIES} dl) if(TEST_WRAPPED_FUNCTIONS) foreach(func ${TEST_WRAPPED_FUNCTIONS}) target_link_options(${test_name} PRIVATE -Wl,--wrap=${func}) endforeach() endif() endfunction() add_test_executable(debug_tests WRAPPED_FUNCTIONS SOURCES src/debug.c tests/debug_tests.c ) add_test_executable(identify_disks_tests WRAPPED_FUNCTIONS nvme_identify_namespace_vs_for_namespace_device SOURCES src/debug.c src/identify_disks.c src/nvme.c tests/identify_disks_tests.c CFLAGS -DUNIT_TESTING_SYS_CLASS_NVME=1 ) add_test_executable(identify_udev_tests WRAPPED_FUNCTIONS nvme_identify_namespace_vs_for_namespace_device SOURCES src/debug.c src/identify_udev.c src/nvme.c tests/identify_udev_tests.c ) add_test_executable(nvme_tests WRAPPED_FUNCTIONS open open64 posix_memalign ioctl __ioctl_time64 close SOURCES src/nvme.c tests/nvme_tests.c ) azure-vm-utils-0.6.0/cmake/udev.cmake000066400000000000000000000011501500025016600174360ustar00rootroot00000000000000# Install Azure unmanaged SR-IOV rules and configuration. install(FILES ${CMAKE_SOURCE_DIR}/udev/10-azure-unmanaged-sriov.rules DESTINATION ${UDEV_RULES_INSTALL_DIR}) # Install udev rules after updating "/usr/sbin/azure-nvme-id" with installed path $AZURE_NVME_ID_INSTALL_DIR. file(READ ${CMAKE_SOURCE_DIR}/udev/80-azure-disk.rules CONTENT) string(REPLACE "/usr/sbin/azure-nvme-id" "${AZURE_NVME_ID_INSTALL_DIR}/azure-nvme-id" CONTENT "${CONTENT}") file(WRITE ${CMAKE_BINARY_DIR}/udev/80-azure-disk.rules ${CONTENT}) install(FILES ${CMAKE_BINARY_DIR}/udev/80-azure-disk.rules DESTINATION ${UDEV_RULES_INSTALL_DIR}) azure-vm-utils-0.6.0/doc/000077500000000000000000000000001500025016600151615ustar00rootroot00000000000000azure-vm-utils-0.6.0/doc/azure-nvme-id.8.in000066400000000000000000000035471500025016600203530ustar00rootroot00000000000000.TH "azure-nvme-id" "8" "February\ 27,\ 2025" "azure-nvme-id @VERSION@" "User Manual" .hy .SH NAME .PP azure-nvme-id - identify Azure NVMe devices .SH SYNOPSIS .PP \f[B]azure-nvme-id\f[R] [--debug] [--format {plain|json}] [--help | --version | --udev] .SH DESCRIPTION .PP \f[B]azure-nvme-id\f[R] provides identification metadata in the response to Identify Namespace command for some models of NVMe devices. This is found in vendor-specific (vs) field which contains various identification details with a comma-separated, key=value format. .PP \f[B]azure-nvme-id\f[R] combines this metadata with the make and model of NVMe device namespaces to identify a device by type, name, index, etc. Output options are plain or json. .PP To suppoort udev rules, --udev option will invoke \f[B]azure-nvme-id\f[R] in udev mode. .SH OPTIONS .TP \f[B]\f[CB]--debug\f[B]\f[R] Debug mode with additional logging. .TP \f[B]\f[CB]--format {plain|json}\f[B]\f[R] Output format, default is plain. .TP \f[B]\f[CB]--help\f[B]\f[R] Show usage information and exit. .TP \f[B]\f[CB]--udev\f[B]\f[R] Run in udev mode, printing a set of \f[CR]=\f[R] variables consumed by udev rules. Requires DEVNAME to be set in environment. .TP \f[B]\f[CB]--version\f[B]\f[R] Show version information and exit. .SH EXAMPLES .PP Identify NVMe namespaces: .IP .nf \f[CR] $ sudo azure-nvme-id /dev/nvme0n1: /dev/nvme1n1: type=local,index=1,name=nvme-110G-1 \f[R] .fi .PP Parse device identifiers for udev consumption: .IP .nf \f[CR] $ sudo env DEVNAME=/dev/nvme1n1 azure-nvme-id --udev AZURE_DISK_VS=type=local,index=1,name=nvme-110G-1 AZURE_DISK_TYPE=local AZURE_DISK_INDEX=1 AZURE_DISK_NAME=nvme-110G-1 \f[R] .fi .PP Check \f[CR]azure-nvme-id\f[R] version: .IP .nf \f[CR] $ azure-nvme-id --version azure-nvme-id 0.1.2 \f[R] .fi .SH SEE ALSO .PP Source and documentation available at: azure-vm-utils-0.6.0/doc/azure-nvme-id.md000066400000000000000000000032741500025016600201740ustar00rootroot00000000000000--- title: azure-nvme-id section: 8 header: User Manual footer: azure-nvme-id __VERSION__ date: __DATE__ --- # NAME azure-nvme-id - identify Azure NVMe devices # SYNOPSIS **azure-nvme-id** [\-\-debug] [\-\-format {plain|json}] [\-\-help | \-\-version | \-\-udev] # DESCRIPTION **azure-nvme-id** provides identification metadata in the response to Identify Namespace command for some models of NVMe devices. This is found in vendor-specific (vs) field which contains various identification details with a comma-separated, key=value format. **azure-nvme-id** combines this metadata with the make and model of NVMe device namespaces to identify a device by type, name, index, etc. Output options are plain or json. To suppoort udev rules, \-\-udev option will invoke **azure-nvme-id** in udev mode. # OPTIONS `--debug` : Debug mode with additional logging. `--format {plain|json}` : Output format, default is plain. `--help` : Show usage information and exit. `--udev` : Run in udev mode, printing a set of `=` variables consumed by udev rules. Requires DEVNAME to be set in environment. `--version` : Show version information and exit. # EXAMPLES Identify NVMe namespaces: ```bash $ sudo azure-nvme-id /dev/nvme0n1: /dev/nvme1n1: type=local,index=1,name=nvme-110G-1 ``` Parse device identifiers for udev consumption: ```bash $ sudo env DEVNAME=/dev/nvme1n1 azure-nvme-id --udev AZURE_DISK_VS=type=local,index=1,name=nvme-110G-1 AZURE_DISK_TYPE=local AZURE_DISK_INDEX=1 AZURE_DISK_NAME=nvme-110G-1 ``` Check `azure-nvme-id` version: ```bash $ azure-nvme-id --version azure-nvme-id 0.1.2 ``` # SEE ALSO Source and documentation available at: azure-vm-utils-0.6.0/doc/azure-vm-utils-selftest.8.in000066400000000000000000000017171500025016600224200ustar00rootroot00000000000000.TH "azure-vm-utils-selftest" "8" "February\ 27,\ 2025" "azure-vm-utils-selftest @VERSION@" "User Manual" .hy .SH NAME .PP azure-vm-utils-selftest - packaging self-tests .SH SYNOPSIS .PP \f[B]azure-vm-utils-selftest [--skip-imds-validation] [--skip-symlink-validation]\f[R] .SH DESCRIPTION .PP \f[B]azure-vm-utils-selftest\f[R] validates functionailty related to azure-vm-utils package. .PP These self-tests validate azure-nvme-id outputs and /dev/disk/azure symlinks. .SH EXAMPLES .PP Running self tests requires root: .IP .nf \f[CR] $ sudo azure-vm-utils-selftest \f[R] .fi .PP To run inside Azure VM without a reboot after install: .IP .nf \f[CR] azure-vm-utils-selftest --skip-symlink-validation \f[R] .fi .PP To run outside of Azure VM: .IP .nf \f[CR] azure-vm-utils-selftest --skip-imds-validation --skip-symlink-validation \f[R] .fi .PP Hopefully it exits successfully! .SH SEE ALSO .PP Source and documentation available at: azure-vm-utils-0.6.0/doc/azure-vm-utils-selftest.md000066400000000000000000000016301500025016600222360ustar00rootroot00000000000000--- title: azure-vm-utils-selftest section: 8 header: User Manual footer: azure-vm-utils-selftest __VERSION__ date: __DATE__ --- # NAME azure-vm-utils-selftest - packaging self-tests # SYNOPSIS **azure-vm-utils-selftest [\-\-skip-imds-validation] [\-\-skip-symlink-validation]** # DESCRIPTION **azure-vm-utils-selftest** validates functionailty related to azure-vm-utils package. These self-tests validate azure\-nvme\-id outputs and /dev/disk/azure symlinks. # EXAMPLES Running self tests requires root: ```bash $ sudo azure-vm-utils-selftest ``` To run inside Azure VM without a reboot after install: ```bash azure-vm-utils-selftest --skip-symlink-validation ``` To run outside of Azure VM: ```bash azure-vm-utils-selftest --skip-imds-validation --skip-symlink-validation ``` Hopefully it exits successfully! # SEE ALSO Source and documentation available at: azure-vm-utils-0.6.0/doc/disk-identification.md000066400000000000000000000251301500025016600214250ustar00rootroot00000000000000# Azure VM Utils - Disk Identification ## Introduction azure-vm-utils is a new package to consolidate tools and udev rules which are critical to the Linux on Azure experience. The initial focus for azure-vm-utils is to provide the tools and configuration to help the customer identify the various NVMe disk devices. ## Background There are number of udev rules critical to the Linux on Azure experience, to assist managing devices including: SCSI, NVMe, MANA, Mellanox. Today these rules are spread out among cloud-init, WALinuxAgent, azure-nvme-utils, and vendor-specific image customization. ### azure-nvme-utils The precursor to azure-vm-utils, renamed to azure-vm-utils to support increased scope without requiring additional packages. ### WALinuxAgent The WALinuxAgent team is working to decouple their guest agent from the provisioning agent. The provisioning agent is being deprecated and will be removed in a future release. The long-term plan is to migrate the udev rules and configuration found in WALinuxAgent into azure-vm-utils so they can be maintained and updated independently of the WALinuxAgent package. Today, WALinuxAgent includes udev rules to provide several symlinks for SCSI disks, including: - /dev/disk/azure/resource - /dev/disk/azure/root - /dev/disk/azure/scsi0/lun\ - /dev/disk/azure/scsi1/lun\ ## Azure NVMe Controllers Azure currently supports three NVMe disk controllers: - MSFT NVMe Accelerator v1 - Provides remote NVMe disks attached to the VM including OS and data disks. - All disks are currently found on one controller, future implementations may leverage multiple controllers. - Microsoft NVMe Direct Disk v1 - Earlist generation of VMs with directly-attached NVMe local disks, e.g. LSv2/LSv3. - Each local disk has its own controller. - Microsoft NVMe Direct Disk v2 - Second generation of NVMe directly-attached local disks used in v6 and newer VM sizes. - Each local disk has its own controller. ## NVMe Identify Namespace: Vendor-Specific Reserved NVMe' identify namespace command returns a structure with a vendor-specific (VS) field found at offset 384 and is 3712 bytes long. Azure utilizes this space to include some metadata about the device, including the disk's: - type: specifies the type of NVMe device (os, data, local) - index: indicates the specific device's index within a set, if applicable (local) - lun: customer-configured "lun" for data disks - name: customer-configured name for data disks, or designated name for local disks The metadata is formatted as comma-separated key-value pairs truncated by a null-byte. Example VS metadata includes: - type=local,index=1,name=nvme-300G-1\0 - type=os\0 - type=data,lun=4,name=data-db\0 If VS starts with a null-terminator, the protocol is unsupported. This metadata is currently supported by Microsoft NVMe Direct Disk v2 and will be extended in the future. ## Disk Controller Identification For NVMe Disks without VS-based Identification For NVMe devices which do not (yet) support identification via metadata in Identify Namespace, some identifiers are derived from a combination of NVMe controller identifier and namespace identifier. The following are assumed to be local disks: - Microsoft NVMe Direct Disk v1 - Microsoft NVMe Direct Disk v2 The following is assumed to be OS disk: - MSFT NVMe Accelerator v1, namespace id = 1 The following are assumed to be data disks: - MSFT NVMe Accelerator v1, namespace id = 2+. The "lun" for a data disk can be calculated by subtracting 2 from the namespace id. This calculation may be different for Windows VMs. ## Disk Device Identification on Linux The rules found in WALinuxAgent are being extended with azure-vm-utils to add identification support for NVMe devices. New symlinks that will be provided for all instances with NVMe disks include: - /dev/disk/azure/data/by-lun/\ - /dev/disk/azure/local/by-serial/\ - /dev/disk/azure/os For v6 and newer VM sizes with local NVMe disks supporting namespace identifiers, additional links will be available: - /dev/disk/azure/local/by-index/\ - /dev/disk/azure/local/by-name/\ For future VM sizes with remote NVMe disks supporting namespace identifiers, additional links will be available: - /dev/disk/azure/data/by-name/\ ### SCSI Compatibility For compatibility reasons, azure-vm-utils will ensure SCSI disks support the following links: - /dev/disk/azure/os - /dev/disk/azure/resource Note that some VM sizes come with both NVMe "local" disks in addition to a SCSI "temp" resource disk. These temp resource disks are considered to be separate from "local" disks which are dedicated to local NVMe disks to avoid confusion. ### Disk Identification Examples for Various VM Sizes & Configurations #### Standard_D2s_v4 + 4 Data Disks ```bash $ find /dev/disk/azure -type l | sort /dev/disk/azure/data/by-lun/0 /dev/disk/azure/data/by-lun/1 /dev/disk/azure/data/by-lun/2 /dev/disk/azure/data/by-lun/3 /dev/disk/azure/os /dev/disk/azure/os-part1 /dev/disk/azure/os-part14 /dev/disk/azure/os-part15 /dev/disk/azure/root /dev/disk/azure/root-part1 /dev/disk/azure/root-part14 /dev/disk/azure/root-part15 /dev/disk/azure/scsi1/lun0 /dev/disk/azure/scsi1/lun1 /dev/disk/azure/scsi1/lun2 /dev/disk/azure/scsi1/lun3 $ sudo azure-nvme-id ``` #### Standard_D2ds_v4 configured with 4 data disks ```bash $ find /dev/disk/azure -type l | sort /dev/disk/azure/data/by-lun/0 /dev/disk/azure/data/by-lun/1 /dev/disk/azure/data/by-lun/2 /dev/disk/azure/data/by-lun/3 /dev/disk/azure/os /dev/disk/azure/os-part1 /dev/disk/azure/os-part14 /dev/disk/azure/os-part15 /dev/disk/azure/resource /dev/disk/azure/resource-part1 /dev/disk/azure/root /dev/disk/azure/root-part1 /dev/disk/azure/root-part14 /dev/disk/azure/root-part15 /dev/disk/azure/scsi1/lun0 /dev/disk/azure/scsi1/lun1 /dev/disk/azure/scsi1/lun2 /dev/disk/azure/scsi1/lun3 $ sudo azure-nvme-id ``` #### Standard_E2bds_v5 configured with SCSI disk controller and 4 data disks ```bash $ find /dev/disk/azure -type l | sort /dev/disk/azure/data/by-lun/0 /dev/disk/azure/data/by-lun/1 /dev/disk/azure/data/by-lun/2 /dev/disk/azure/data/by-lun/3 /dev/disk/azure/os /dev/disk/azure/os-part1 /dev/disk/azure/os-part14 /dev/disk/azure/os-part15 /dev/disk/azure/resource /dev/disk/azure/resource-part1 /dev/disk/azure/root /dev/disk/azure/root-part1 /dev/disk/azure/root-part14 /dev/disk/azure/root-part15 /dev/disk/azure/scsi1/lun0 /dev/disk/azure/scsi1/lun1 /dev/disk/azure/scsi1/lun2 /dev/disk/azure/scsi1/lun3 $ sudo azure-nvme-id ``` #### Standard_E2bds_v5 configured with NVMe disk controller and 4 data disks ```bash $ find /dev/disk/azure -type l | sort /dev/disk/azure/data/by-lun/0 /dev/disk/azure/data/by-lun/1 /dev/disk/azure/data/by-lun/2 /dev/disk/azure/data/by-lun/3 /dev/disk/azure/os /dev/disk/azure/os-part1 /dev/disk/azure/os-part14 /dev/disk/azure/os-part15 /dev/disk/azure/resource /dev/disk/azure/resource-part1 $ sudo azure-nvme-id /dev/nvme0n1: /dev/nvme0n2: /dev/nvme0n3: /dev/nvme0n4: /dev/nvme0n5: ``` #### Standard_E2bs_v5 configured with NVMe disk controller and 4 data disks ```bash $ find /dev/disk/azure -type l | sort /dev/disk/azure/data/by-lun/0 /dev/disk/azure/data/by-lun/1 /dev/disk/azure/data/by-lun/2 /dev/disk/azure/data/by-lun/3 /dev/disk/azure/os /dev/disk/azure/os-part1 /dev/disk/azure/os-part14 /dev/disk/azure/os-part15 $ sudo azure-nvme-id /dev/nvme0n1: /dev/nvme0n2: /dev/nvme0n3: /dev/nvme0n4: /dev/nvme0n5: ``` #### Standard_L8s_v3 configured with 4 data disks (1 local NVMe disk + 1 temp SCSI disk) ```bash $ find /dev/disk/azure -type l | sort /dev/disk/azure/data/by-lun/0 /dev/disk/azure/data/by-lun/1 /dev/disk/azure/data/by-lun/2 /dev/disk/azure/data/by-lun/3 /dev/disk/azure/local/by-serial/70307ea5392400000001 /dev/disk/azure/os /dev/disk/azure/os-part1 /dev/disk/azure/os-part14 /dev/disk/azure/os-part15 /dev/disk/azure/resource /dev/disk/azure/resource-part1 /dev/disk/azure/root /dev/disk/azure/root-part1 /dev/disk/azure/root-part14 /dev/disk/azure/root-part15 /dev/disk/azure/scsi1/lun0 /dev/disk/azure/scsi1/lun1 /dev/disk/azure/scsi1/lun2 /dev/disk/azure/scsi1/lun3 $ sudo azure-nvme-id /dev/nvme0n1: ``` #### Standard_L8s_v3 configured with 4 data disks (4 local NVMe disks + 1 temp SCSI disk) ```bash $ find /dev/disk/azure -type l | sort /dev/disk/azure/data/by-lun/0 /dev/disk/azure/data/by-lun/1 /dev/disk/azure/data/by-lun/2 /dev/disk/azure/data/by-lun/3 /dev/disk/azure/local/by-serial/f0451a1ba53a00000001 /dev/disk/azure/local/by-serial/f0451a1ba53a00000002 /dev/disk/azure/local/by-serial/f0451a1ba53a00000003 /dev/disk/azure/local/by-serial/f0451a1ba53a00000004 /dev/disk/azure/os /dev/disk/azure/os-part1 /dev/disk/azure/os-part14 /dev/disk/azure/os-part15 /dev/disk/azure/resource /dev/disk/azure/resource-part1 /dev/disk/azure/root /dev/disk/azure/root-part1 /dev/disk/azure/root-part14 /dev/disk/azure/root-part15 /dev/disk/azure/scsi1/lun0 /dev/disk/azure/scsi1/lun1 /dev/disk/azure/scsi1/lun2 /dev/disk/azure/scsi1/lun3 $ sudo azure-nvme-id /dev/nvme0n1: /dev/nvme1n1: /dev/nvme2n1: /dev/nvme3n1: ``` #### Standard_D2alds_v6 configured with 2 data disks (1 local NVMe disk) ```bash $ find /dev/disk/azure -type l | sort /dev/disk/azure/data/by-lun/0 /dev/disk/azure/data/by-lun/1 /dev/disk/azure/local/by-index/1 /dev/disk/azure/local/by-name/nvme-440G-1 /dev/disk/azure/local/by-serial/ebeb91bd841bceb90001 /dev/disk/azure/os /dev/disk/azure/os-part1 /dev/disk/azure/os-part14 /dev/disk/azure/os-part15 $ sudo azure-nvme-id /dev/nvme0n1: /dev/nvme0n2: /dev/nvme0n3: /dev/nvme1n1: type=local,index=1,name=nvme-110G-1 ``` #### Standard_D16alds_v6 configured with 4 data disks (4 local NVMe disks) ```bash $ find /dev/disk/azure -type l | sort /dev/disk/azure/data/by-lun/0 /dev/disk/azure/data/by-lun/1 /dev/disk/azure/data/by-lun/2 /dev/disk/azure/data/by-lun/3 /dev/disk/azure/local/by-index/1 /dev/disk/azure/local/by-index/2 /dev/disk/azure/local/by-index/3 /dev/disk/azure/local/by-index/4 /dev/disk/azure/local/by-name/nvme-440G-1 /dev/disk/azure/local/by-name/nvme-440G-2 /dev/disk/azure/local/by-name/nvme-440G-3 /dev/disk/azure/local/by-name/nvme-440G-4 /dev/disk/azure/local/by-serial/351ea1d05ea261d50001 /dev/disk/azure/local/by-serial/351ea1d05ea261d50002 /dev/disk/azure/local/by-serial/351ea1d05ea261d50003 /dev/disk/azure/local/by-serial/351ea1d05ea261d50004 /dev/disk/azure/os /dev/disk/azure/os-part1 /dev/disk/azure/os-part14 /dev/disk/azure/os-part15 $ sudo azure-nvme-id /dev/nvme0n1: /dev/nvme0n2: /dev/nvme0n3: /dev/nvme0n4: /dev/nvme0n5: /dev/nvme1n1: type=local,index=2,name=nvme-440G-2 /dev/nvme2n1: type=local,index=3,name=nvme-440G-3 /dev/nvme3n1: type=local,index=1,name=nvme-440G-1 /dev/nvme4n1: type=local,index=4,name=nvme-440G-4 ``` azure-vm-utils-0.6.0/doc/pandoc.template000066400000000000000000000007151500025016600201650ustar00rootroot00000000000000$if(has-tables)$ .\"t $endif$ $if(pandoc-version)$ $endif$ $if(adjusting)$ .ad $adjusting$ $endif$ .TH "$title/nowrap$" "$section/nowrap$" "$date/nowrap$" "$title/nowrap$ @VERSION@" "$header/nowrap$" $if(hyphenate)$ .hy $else$ .nh $endif$ $for(header-includes)$ $header-includes$ $endfor$ $for(include-before)$ $include-before$ $endfor$ $body$ $for(include-after)$ $include-after$ $endfor$ $if(author)$ .SH AUTHORS $for(author)$$author$$sep$; $endfor$. $endif$ azure-vm-utils-0.6.0/initramfs/000077500000000000000000000000001500025016600164105ustar00rootroot00000000000000azure-vm-utils-0.6.0/initramfs/dracut/000077500000000000000000000000001500025016600176725ustar00rootroot00000000000000azure-vm-utils-0.6.0/initramfs/dracut/modules.d/000077500000000000000000000000001500025016600215645ustar00rootroot00000000000000azure-vm-utils-0.6.0/initramfs/dracut/modules.d/97azure-disk/000077500000000000000000000000001500025016600240225ustar00rootroot00000000000000azure-vm-utils-0.6.0/initramfs/dracut/modules.d/97azure-disk/module-setup.sh000066400000000000000000000003311500025016600267760ustar00rootroot00000000000000#!/bin/bash # called by dracut check() { return 0 } # called by dracut depends() { return 0 } # called by dracut install() { inst_multiple azure-nvme-id cut readlink inst_rules 80-azure-disk.rules } azure-vm-utils-0.6.0/initramfs/dracut/modules.d/97azure-unmanaged-sriov/000077500000000000000000000000001500025016600261675ustar00rootroot00000000000000azure-vm-utils-0.6.0/initramfs/dracut/modules.d/97azure-unmanaged-sriov/module-setup.sh000066400000000000000000000002731500025016600311500ustar00rootroot00000000000000#!/usr/bin/bash # called by dracut check() { return 0 } # called by dracut depends() { return 0 } # called by dracut install() { inst_rules 10-azure-unmanaged-sriov.rules } azure-vm-utils-0.6.0/initramfs/initramfs-tools/000077500000000000000000000000001500025016600215425ustar00rootroot00000000000000azure-vm-utils-0.6.0/initramfs/initramfs-tools/hooks/000077500000000000000000000000001500025016600226655ustar00rootroot00000000000000azure-vm-utils-0.6.0/initramfs/initramfs-tools/hooks/azure-disk000077500000000000000000000010051500025016600246650ustar00rootroot00000000000000#!/bin/sh PREREQ="udev" prereqs() { echo "${PREREQ}" } case $1 in prereqs) prereqs exit 0 ;; esac . /usr/share/initramfs-tools/hook-functions DESTINATION="${DESTDIR}/lib/udev/rules.d/" RULES_FILE="/usr/lib/udev/rules.d/80-azure-disk.rules" if [ -e "${RULES_FILE}" ]; then mkdir -p "${DESTINATION}" cp -p "${RULES_FILE}" "${DESTINATION}" fi copy_exec "/usr/bin/cut" "${DESTDIR}/bin/" copy_exec "/usr/bin/readlink" "${DESTDIR}/bin/" copy_exec "/usr/sbin/azure-nvme-id" "${DESTDIR}/sbin/" azure-vm-utils-0.6.0/initramfs/initramfs-tools/hooks/azure-unmanaged-sriov000077500000000000000000000005751500025016600270450ustar00rootroot00000000000000#!/bin/sh PREREQ="udev" prereqs() { echo "${PREREQ}" } case $1 in prereqs) prereqs exit 0 ;; esac . /usr/share/initramfs-tools/hook-functions DESTINATION="${DESTDIR}/lib/udev/rules.d/" RULES_FILE="/usr/lib/udev/rules.d/10-azure-unmanaged-sriov.rules" if [ -e "${RULES_FILE}" ]; then mkdir -p "${DESTINATION}" cp -p "${RULES_FILE}" "${DESTINATION}" fi azure-vm-utils-0.6.0/networkd/000077500000000000000000000000001500025016600162515ustar00rootroot00000000000000azure-vm-utils-0.6.0/networkd/01-azure-unmanaged-sriov.network000066400000000000000000000005361500025016600243310ustar00rootroot00000000000000# Azure VMs with accelerated networking may have MANA, mlx4, or mlx5 SR-IOV # devices which are transparently bonded to a synthetic hv_netvsc device. # 10-azure-unmanaged-sriov.rules will mark these devices with # AZURE_UNMANAGED_SRIOV=1 so this can configure the devices as unmanaged. [Match] Property=AZURE_UNMANAGED_SRIOV=1 [Link] Unmanaged=yes azure-vm-utils-0.6.0/packaging/000077500000000000000000000000001500025016600163405ustar00rootroot00000000000000azure-vm-utils-0.6.0/packaging/almalinux000077700000000000000000000000001500025016600215122fedora/ustar00rootroot00000000000000azure-vm-utils-0.6.0/packaging/azurelinux000077700000000000000000000000001500025016600221232mariner/ustar00rootroot00000000000000azure-vm-utils-0.6.0/packaging/debian/000077500000000000000000000000001500025016600175625ustar00rootroot00000000000000azure-vm-utils-0.6.0/packaging/debian/azure-vm-utils-selftest.install000066400000000000000000000001201500025016600256760ustar00rootroot00000000000000/usr/sbin/azure-vm-utils-selftest /usr/share/man/man8/azure-vm-utils-selftest.8 azure-vm-utils-0.6.0/packaging/debian/azure-vm-utils.install000066400000000000000000000004701500025016600240570ustar00rootroot00000000000000/usr/lib/systemd/network/01-azure-unmanaged-sriov.network /usr/lib/udev/rules.d/10-azure-unmanaged-sriov.rules /usr/lib/udev/rules.d/80-azure-disk.rules /usr/sbin/azure-nvme-id /usr/share/initramfs-tools/hooks/azure-disk /usr/share/initramfs-tools/hooks/azure-unmanaged-sriov /usr/share/man/man8/azure-nvme-id.8 azure-vm-utils-0.6.0/packaging/debian/control000066400000000000000000000015351500025016600211710ustar00rootroot00000000000000Source: azure-vm-utils Section: admin Priority: optional Maintainer: Chris Patterson Build-Depends: cmake, debhelper-compat (= 12), pkg-config, libcmocka-dev, libjson-c-dev, python3 Standards-Version: 4.5.0 Homepage: https://github.com/Azure/azure-vm-utils Rules-Requires-Root: no Package: azure-vm-utils Architecture: any Multi-Arch: foreign Depends: ${misc:Depends}, ${shlibs:Depends} Description: Core utilities and configuration for Linux VMs on Azure This package provides a home for core utilities, udev rules and other configuration to support Linux VMs on Azure. Package: azure-vm-utils-selftest Architecture: all Multi-Arch: foreign Depends: ${misc:Depends}, python3, azure-vm-utils (= ${binary:Version}) Description: Self-test script for Azure VM Utils Test script for maintainer sanity checks in various environments. azure-vm-utils-0.6.0/packaging/debian/copyright000066400000000000000000000024721500025016600215220ustar00rootroot00000000000000Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: azure-vm-utils Upstream-Contact: https://github.com/Azure/azure-vm-utils/issues Source: https://github.com/Azure/azure-vm-utils Files: * Copyright: 2024 Microsoft Corporation License: Expat License: Expat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: . The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. . THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE azure-vm-utils-0.6.0/packaging/debian/rules000077500000000000000000000004161500025016600206430ustar00rootroot00000000000000#!/usr/bin/make -f export DH_VERBOSE = 1 export DEB_BUILD_MAINT_OPTIONS = hardening=+all export DEB_CFLAGS_MAINT_APPEND = -Wall -Werror -Wextra export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed %: dh $@ override_dh_auto_test: CTEST_OUTPUT_ON_FAILURE=1 dh_auto_test azure-vm-utils-0.6.0/packaging/debian/source/000077500000000000000000000000001500025016600210625ustar00rootroot00000000000000azure-vm-utils-0.6.0/packaging/debian/source/format000066400000000000000000000000151500025016600222710ustar00rootroot000000000000003.0 (native) azure-vm-utils-0.6.0/packaging/debian/source/lintian-overrides000066400000000000000000000001141500025016600244370ustar00rootroot00000000000000source: no-nmu-in-changelog source: source-nmu-has-incorrect-version-number azure-vm-utils-0.6.0/packaging/fedora/000077500000000000000000000000001500025016600176005ustar00rootroot00000000000000azure-vm-utils-0.6.0/packaging/fedora/azure-vm-utils.spec000066400000000000000000000032401500025016600233570ustar00rootroot00000000000000Name: azure-vm-utils Version: %{__git_version} Release: %{__git_release}%{?dist} Summary: Utilities and udev rules for Linux on Azure License: MIT URL: https://github.com/Azure/%{name} Source0: %{name}_dev.tgz BuildRequires: cmake BuildRequires: gcc BuildRequires: json-c-devel BuildRequires: libcmocka-devel %description A collection of utilities and udev rules to make the most of the Linux experience on Azure. %package selftest Summary: Self-test script for Azure VM Utils Requires: %{name} = %{version}-%{release} %if 0%{?rhel} == 8 || 0%{?centos} == 8 || 0%{?almalinux} == 8 Requires: python39 %else Requires: python3 >= 3.9 %endif %description selftest This package contains the self-test script for the Azure VM Utils package. %prep %autosetup %build %cmake -DVERSION="%{version}-%{release}" -DAZURE_NVME_ID_INSTALL_DIR="%{_bindir}" %cmake_build %install %cmake_install %check %ctest %if 0%{?rhel} == 8 && 0%{?centos} == 8 && 0%{?almalinux} == 8 %undefine __brp_mangle_shebangs %endif %files %{_exec_prefix}/lib/dracut/modules.d/97azure-disk/module-setup.sh %{_exec_prefix}/lib/dracut/modules.d/97azure-unmanaged-sriov/module-setup.sh %{_exec_prefix}/lib/systemd/network/01-azure-unmanaged-sriov.network %{_exec_prefix}/lib/udev/rules.d/10-azure-unmanaged-sriov.rules %{_exec_prefix}/lib/udev/rules.d/80-azure-disk.rules %{_bindir}/azure-nvme-id %{_mandir}/man8/azure-nvme-id.8.gz %files selftest %{_bindir}/azure-vm-utils-selftest %{_mandir}/man8/azure-vm-utils-selftest.8.gz %changelog * Wed Feb 05 2025 Test - %{__git_version}-%{__git_release} - Test test test azure-vm-utils-0.6.0/packaging/mariner/000077500000000000000000000000001500025016600177755ustar00rootroot00000000000000azure-vm-utils-0.6.0/packaging/mariner/azure-vm-utils.spec000066400000000000000000000025711500025016600235620ustar00rootroot00000000000000Name: azure-vm-utils Version: %{__git_version} Release: %{__git_release}%{?dist} Summary: Utilities and udev rules for Linux on Azure License: MIT URL: https://github.com/Azure/%{name} Source0: azure-vm-utils_dev.tgz BuildRequires: binutils BuildRequires: cmake BuildRequires: gcc BuildRequires: glibc-devel BuildRequires: json-c-devel BuildRequires: kernel-headers BuildRequires: libcmocka-devel BuildRequires: make %description A collection of utilities and udev rules to make the most of the Linux experience on Azure. %package selftest Summary: Self-test script for Azure VM Utils Requires: %{name} = %{version}-%{release} Requires: python3 %description selftest This package contains the self-test script for the Azure VM Utils package. %prep %autosetup %build %cmake -DVERSION="%{version}-%{release}" %cmake_build %install %cmake_install %check %ctest %files %{_libdir}/dracut/modules.d/97azure-disk/module-setup.sh %{_libdir}/dracut/modules.d/97azure-unmanaged-sriov/module-setup.sh %{_libdir}/systemd/network/01-azure-unmanaged-sriov.network %{_libdir}/udev/rules.d/10-azure-unmanaged-sriov.rules %{_libdir}/udev/rules.d/80-azure-disk.rules %{_sbindir}/azure-nvme-id %{_mandir}/man8/azure-nvme-id.8.gz %files selftest %{_sbindir}/azure-vm-utils-selftest %{_mandir}/man8/azure-vm-utils-selftest.8.gz azure-vm-utils-0.6.0/packaging/rhel000077700000000000000000000000001500025016600204522fedora/ustar00rootroot00000000000000azure-vm-utils-0.6.0/scripts/000077500000000000000000000000001500025016600161035ustar00rootroot00000000000000azure-vm-utils-0.6.0/scripts/build-deb.sh000077500000000000000000000026011500025016600202700ustar00rootroot00000000000000#!/bin/bash set -eux -o pipefail if [[ $UID -ne 0 ]]; then sudo_cmd="sudo" else sudo_cmd="" fi # Ensure dependencies are installed and up-to-date. $sudo_cmd apt update $sudo_cmd apt install -y \ build-essential \ clang-format \ cmake \ cppcheck \ devscripts \ debhelper \ gcc \ initramfs-tools \ libcmocka-dev \ libjson-c-dev \ pandoc \ pkg-config \ rsync project_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" git_version="$(git describe --tags --always --dirty)" git_ref="$(echo "${git_version}" | sed 's/.*-g//' | sed 's/-dirty/DIRTY/')" deb_version="$(echo "${git_version}" | sed 's/^v//' | sed 's/-.*//')+git${git_ref}" output_dir="${project_dir}/out" echo "project root: $project_dir" echo "project version: $git_version" cd "$project_dir" mkdir -p debian rsync -a packaging/debian/. debian/. rm -f debian/changelog if [[ -z ${DEBEMAIL:-} ]]; then git_user="$(git config user.name || echo Azure VM Utils CI)" git_email="$(git config user.email || echo azure/azure-vm-utils@github.com)" export DEBEMAIL="${git_user} <${git_email}>" fi dch --create -v "${deb_version}" --package "azure-vm-utils" "development build: ${git_version}" debuild --no-tgz-check mkdir -p "${output_dir}" rm -f "${output_dir}"/*.deb mv ../azure-vm-utils*"${deb_version}"* "${output_dir}"/ azure-vm-utils-0.6.0/scripts/build-rpm.sh000077500000000000000000000026451500025016600203440ustar00rootroot00000000000000#!/bin/bash # shellcheck disable=SC1091 set -eu -o pipefail if [[ $UID -ne 0 ]]; then sudo_cmd="sudo" else sudo_cmd="" fi source /etc/os-release distro="$ID" project_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" version="$(git describe --tags | cut -f 1 -d "-" | sed 's/^v//')" release="dev_$(git describe --tags --dirty --always | sed 's/^v//g' | sed 's/-/_/g')" output_dir="${project_dir}/out" echo "project root: ${project_dir}" echo "build version: ${version}" echo "build release: ${release}" set -x # Create rpmbuild directory layout. build_dir="$(mktemp -d)" mkdir -p "${build_dir}"/{BUILD,RPMS,SOURCES,SPECS,SRPMS} # Create source tarball. output_source="${build_dir}/SOURCES/azure-vm-utils_dev.tgz" cd "$project_dir" git archive --verbose --format=tar.gz --prefix="azure-vm-utils-${version}/" HEAD --output "${output_source}" # Create spec file from template. cd "${project_dir}/packaging/${distro}" # Install dependencies. build_requirements=$(grep ^BuildRequires azure-vm-utils.spec | awk '{{print $2}}' | tr '\n' ' ') $sudo_cmd dnf install -y ${build_requirements} rpm-build dracut # Build RPM. rpmbuild -ba --define "__git_version ${version}" --define "__git_release ${release}" --define "_topdir ${build_dir}" azure-vm-utils.spec # Copy RPM to output directory. mkdir -p "${output_dir}" rm -f "${output_dir}"/*.rpm cp -v "${build_dir}"/RPMS/*/azure-vm-utils*"${version}-${release}"*.rpm "${output_dir}" azure-vm-utils-0.6.0/selftest/000077500000000000000000000000001500025016600162455ustar00rootroot00000000000000azure-vm-utils-0.6.0/selftest/__init__.py000066400000000000000000000000001500025016600203440ustar00rootroot00000000000000azure-vm-utils-0.6.0/selftest/conftest.py000066400000000000000000000053311500025016600204460ustar00rootroot00000000000000# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root for license information. # -------------------------------------------------------------------------- """pytest configuration for selftest.""" import logging import os import sys from datetime import datetime from pathlib import Path import pytest logger = logging.getLogger(__name__) # pylint: disable=unused-argument def pytest_sessionstart(session): """Initialize session attributes that are marked on finish.""" session.aborted = False session.failed = False def pytest_sessionfinish(session, exitstatus): """Mark test session as aborted or failed.""" if exitstatus == 2: # Interrupted (e.g., Ctrl+C) session.aborted = True elif exitstatus != 0: session.failed = True def pytest_configure(config): """Configure pytest logging and artifacts directory. Create artifacts directory based on the current timestamp if not configured. This is used to store logs and other artifacts generated during the test run. If running under pytest-xdist, use the worker ID to create separate logs for each worker. """ artifacts_path = os.getenv("SELFTEST_ARTIFACTS_PATH") if "PYTEST_XDIST_WORKER" not in os.environ: if not artifacts_path: timestamp = datetime.now().strftime("%Y%m%d%H%M%S%f") artifacts_path = f"/tmp/selftest-{timestamp}" os.environ["SELFTEST_ARTIFACTS_PATH"] = artifacts_path print(f"artifacts={artifacts_path}", file=sys.stderr) artifacts_path = os.getenv("SELFTEST_ARTIFACTS_PATH") assert artifacts_path, "SELFTEST_ARTIFACTS_PATH must be set" log_path = Path(artifacts_path) log_path.mkdir(parents=True, exist_ok=True) worker_id = os.environ.get("PYTEST_XDIST_WORKER") if worker_id is not None: logging.basicConfig( format="%(asctime)s [%(levelname)s] %(message)s", filename=log_path / f"{worker_id}.log", level=logging.DEBUG, ) else: logging.basicConfig( format="%(asctime)s [%(levelname)s] %(message)s", filename=log_path / "main.log", level=logging.DEBUG, ) logger.debug("test") @pytest.hookimpl(tryfirst=True) def pytest_collection_modifyitems(config, items): """Group tests by vm_size for pytest-xdist to avoid quota issues.""" for item in items: vm_size = ( item.callspec.params.get("vm_size", None) if hasattr(item, "callspec") else None ) if vm_size: item.add_marker(pytest.mark.xdist_group(vm_size)) azure-vm-utils-0.6.0/selftest/selftest.py000077500000000000000000001423231500025016600204600ustar00rootroot00000000000000#!/usr/bin/env python3 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root for license information. # -------------------------------------------------------------------------- """Azure VM utilities self-tests script.""" import argparse import glob import json import logging import os import re import subprocess import time import urllib.error import urllib.request from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Literal, Optional logger = logging.getLogger("selftest") # pylint: disable=broad-exception-caught # pylint: disable=line-too-long # pylint: disable=too-many-lines # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-lines # pylint: disable=too-many-locals SYS_CLASS_NET = "/sys/class/net" @dataclass(eq=True, repr=True) class SkuConfig: """VM sku-specific configuration related to disks.""" vm_size: str vm_size_type: Literal["arm64", "x64"] = "x64" nvme_controller_toggle_supported: bool = ( False # whether the sku supports NVMe controller toggle (Eb[d]s_v5) ) nvme_only: bool = False # NVMe-only skus (v6+) nvme_id_enabled_local: bool = False # whether the sku supports NVMe ID locally nvme_id_enabled_remote: bool = False # whether the sku supports NVMe ID remotely nvme_local_disk_count: int = 0 nvme_local_disk_size_gib: int = 0 temp_disk_size_gib: int = 0 # SCSI temp/resource disk size in GiB @dataclass(eq=True, repr=True) class V6SkuConfig(SkuConfig): """V6 VM sku-specific configuration related to disks.""" nvme_only: bool = True nvme_id_enabled_local: bool = True nvme_id_enabled_remote: bool = False def gb_to_gib(size_gb: int) -> int: """Roughly convert GB to GiB as sizes are documented in both ways.""" return int(size_gb * (1000**3) / (1024**3)) SKU_CONFIGS = { "Standard_B2ts_v2": SkuConfig(vm_size="Standard_B2ts_v2"), # "Standard_D2s_v3": SkuConfig(vm_size="Standard_D2s_v3", temp_disk_size_gib=16), "Standard_D2s_v4": SkuConfig(vm_size="Standard_D2s_v4"), "Standard_D2ds_v4": SkuConfig(vm_size="Standard_D2ds_v4", temp_disk_size_gib=75), "Standard_D2s_v5": SkuConfig(vm_size="Standard_D2s_v5"), "Standard_D2ds_v5": SkuConfig(vm_size="Standard_D2ds_v5", temp_disk_size_gib=75), "Standard_D2ads_v5": SkuConfig(vm_size="Standard_D2ads_v5", temp_disk_size_gib=75), "Standard_D16ads_v5": SkuConfig( vm_size="Standard_D16ads_v5", temp_disk_size_gib=600 ), "Standard_L8s_v2": SkuConfig( vm_size="Standard_L8s_v2", temp_disk_size_gib=80, nvme_local_disk_count=1, nvme_local_disk_size_gib=gb_to_gib(1920), ), "Standard_L8s_v3": SkuConfig( vm_size="Standard_L8s_v3", temp_disk_size_gib=80, nvme_local_disk_count=1, nvme_local_disk_size_gib=gb_to_gib(1920), ), "Standard_L80s_v3": SkuConfig( vm_size="Standard_L80s_v3", nvme_controller_toggle_supported=True, temp_disk_size_gib=800, nvme_local_disk_count=10, nvme_local_disk_size_gib=gb_to_gib(1920), ), "Standard_E2bs_v5": SkuConfig( vm_size="Standard_E2bs_v5", nvme_controller_toggle_supported=True ), "Standard_E2bds_v5": SkuConfig( vm_size="Standard_E2bds_v5", nvme_controller_toggle_supported=True, temp_disk_size_gib=75, ), "Standard_D2s_v6": V6SkuConfig(vm_size="Standard_D2s_v6"), "Standard_D2ds_v6": V6SkuConfig( vm_size="Standard_D2ds_v6", nvme_local_disk_count=1, nvme_local_disk_size_gib=110, ), "Standard_D16ds_v6": V6SkuConfig( vm_size="Standard_D16ds_v6", nvme_local_disk_count=2, nvme_local_disk_size_gib=440, ), "Standard_D32ds_v6": V6SkuConfig( vm_size="Standard_D32ds_v6", nvme_local_disk_count=4, nvme_local_disk_size_gib=440, ), "Standard_D2as_v6": V6SkuConfig(vm_size="Standard_D2as_v6"), "Standard_D2ads_v6": V6SkuConfig( vm_size="Standard_D2ads_v6", nvme_local_disk_count=1, nvme_local_disk_size_gib=110, ), "Standard_D16ads_v6": V6SkuConfig( vm_size="Standard_D16ads_v6", nvme_local_disk_count=2, nvme_local_disk_size_gib=440, ), "Standard_D32ads_v6": V6SkuConfig( vm_size="Standard_D32ads_v6", nvme_local_disk_count=4, nvme_local_disk_size_gib=440, ), "Standard_D2pls_v5": SkuConfig( vm_size="Standard_D2pls_v5", vm_size_type="arm64", ), "Standard_D2plds_v5": SkuConfig( vm_size="Standard_D2plds_v5", vm_size_type="arm64", temp_disk_size_gib=75, ), "Standard_D8pls_v5": SkuConfig( vm_size="Standard_D8pls_v5", vm_size_type="arm64", ), "Standard_D8plds_v5": SkuConfig( vm_size="Standard_D8plds_v5", vm_size_type="arm64", temp_disk_size_gib=300, ), "Standard_D2pls_v6": SkuConfig( vm_size="Standard_D2pls_v6", vm_size_type="arm64", ), "Standard_D2plds_v6": SkuConfig( vm_size="Standard_D2plds_v6", vm_size_type="arm64", nvme_local_disk_count=1, nvme_local_disk_size_gib=110, ), "Standard_D16pls_v6": SkuConfig( vm_size="Standard_D16pls_v6", vm_size_type="arm64", ), "Standard_D16plds_v6": SkuConfig( vm_size="Standard_D16plds_v6", vm_size_type="arm64", nvme_local_disk_count=2, nvme_local_disk_size_gib=440, ), } def device_sort(devices: List[str]) -> List[str]: """Natural sort for devices.""" def natural_sort_key(s: str): # Natural sort by turning a string into a list of string and number chunks. # e.g. "nvme0n10" -> ["nvme", 0, "n", 10] return [ int(text) if text.isdigit() else text for text in re.split("([0-9]+)", s) ] return sorted(devices, key=natural_sort_key) def get_disk_size_gb(disk_path: str) -> int: """Get the size of the disk in GB.""" try: proc = subprocess.run( ["lsblk", "-b", "-n", "-o", "SIZE", "-d", disk_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True, ) logger.debug("lsblk output: %r", proc) size_bytes = int(proc.stdout.strip()) size_gib = size_bytes // (1000**3) return size_gib except subprocess.CalledProcessError as error: logger.error("error while fetching disk size: %r", error) raise except FileNotFoundError: logger.error("lsblk command not found") raise def get_disk_size_gib(disk_path: str) -> int: """Get the size of the disk in GiB.""" try: proc = subprocess.run( ["lsblk", "-b", "-n", "-o", "SIZE", "-d", disk_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True, ) logger.debug("lsblk output: %r", proc) size_bytes = int(proc.stdout.strip()) size_gib = size_bytes // (1024**3) return size_gib except subprocess.CalledProcessError as error: logger.error("error while fetching disk size: %r", error) raise except FileNotFoundError: logger.error("lsblk command not found") raise def get_imds_metadata() -> Dict: """Fetch IMDS metadata using urllib.""" url = "http://169.254.169.254/metadata/instance?api-version=2021-02-01" headers = {"Metadata": "true"} req = urllib.request.Request(url, headers=headers) last_error = None deadline = time.time() + 300 while time.time() < deadline: try: with urllib.request.urlopen(req, timeout=60) as response: if response.status != 200: raise urllib.error.HTTPError( url, response.status, "Failed to fetch metadata", response.headers, None, ) metadata = json.load(response) logger.debug("fetched IMDS metadata: %r", metadata) return metadata except urllib.error.URLError as error: last_error = error logger.error("error fetching IMDS metadata: %r", error) time.sleep(1) raise RuntimeError(f"failed to fetch IMDS metadata: {last_error}") def get_local_nvme_disks() -> List[str]: """Get all local NVMe disks.""" local_disk_controllers = get_nvme_controllers_with_model( "Microsoft NVMe Direct Disk" ) local_disk_controllers_v2 = get_nvme_controllers_with_model( "Microsoft NVMe Direct Disk v2" ) return device_sort( [ namespace for controller in local_disk_controllers + local_disk_controllers_v2 for namespace in get_nvme_namespace_devices(controller) ] ) def get_remote_nvme_disks() -> List[str]: """Get all remote NVMe disks.""" remote_disk_controllers = get_nvme_controllers_with_model( "MSFT NVMe Accelerator v1.0" ) assert ( len(remote_disk_controllers) <= 1 ), f"unexpected number of remote controllers {remote_disk_controllers}" return device_sort( [ namespace for controller in remote_disk_controllers for namespace in get_nvme_namespace_devices(controller) ] ) def get_nvme_controllers_with_model(model: str) -> List[str]: """Get a list of all NVMe controllers with the specified model.""" nvme_controllers = [] nvme_path = "/sys/class/nvme" for controller in glob.glob(os.path.join(nvme_path, "nvme*")): logger.debug("checking controller: %s", controller) model_path = os.path.join(controller, "model") try: with open(model_path, "r", encoding="utf-8") as file: controller_model = file.read().strip() logger.debug("controller: %s model: %s", controller, controller_model) if controller_model == model: controller_name = controller.split("/")[-1] nvme_controllers.append(controller_name) except FileNotFoundError: logger.debug("model file not found: %s", model_path) continue return device_sort(nvme_controllers) def get_nvme_namespace_devices_with_model(model: str) -> List[str]: """Get all NVMe namespace devices for a given NVMe controller model.""" controllers = get_nvme_controllers_with_model(model) logger.debug("controllers found for model=%s: %r", model, controllers) return device_sort( [ namespace for controller in controllers for namespace in get_nvme_namespace_devices(controller) ] ) def get_nvme_namespace_devices(controller: str) -> List[str]: """Get all NVMe namespace devices for a given NVMe controller.""" namespace_devices = [] controller_name = controller.split("/")[-1] nvme_path = f"/sys/class/nvme/{controller_name}" logger.debug("checking namespaces under %s", nvme_path) for namespace in glob.glob(os.path.join(nvme_path, "nvme*")): logger.debug("checking namespace device: %s", namespace) if os.path.isdir(namespace): device_name = namespace.split("/")[-1] namespace_devices.append(device_name) return device_sort(namespace_devices) def get_root_block_device() -> str: """Get the root block device using findmnt.""" try: proc = subprocess.run( ["findmnt", "-n", "-o", "SOURCE", "/"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True, ) logger.debug("findmnt output: %r", proc) return proc.stdout.strip() except subprocess.CalledProcessError as error: logger.error("error while fetching root block device: %r", error) raise except FileNotFoundError: logger.error("findmnt command not found") raise def get_scsi_resource_disk() -> Optional[str]: """Get the SCSI resource disk device.""" paths = [ # cloud-init udev rules "/dev/disk/cloud/azure_resource", # gen2 "/dev/disk/by-path/acpi-VMBUS:00-vmbus-f8b3781a1e824818a1c363d806ec15bb-lun-1", # gen1 "/dev/disk/by-path/acpi-VMBUS:01-vmbus-000000000001*-lun-0", ] for path in paths: if "*" in path: matched_paths = glob.glob(path) for matched_path in matched_paths: resolved_path = os.path.realpath(matched_path) if os.path.exists(resolved_path): return resolved_path.split("/")[-1] else: if os.path.exists(path): resolved_path = os.path.realpath(path) if os.path.exists(resolved_path): return resolved_path.split("/")[-1] logger.info("no SCSI resource disk found") return None DEV_DISK_AZURE_RESOURCE = "/dev/disk/azure/resource" @dataclass(eq=True, repr=True) class DiskInfo: """Information about different types of disks present.""" root_device: str dev_disk_azure_links: List[str] = field(default_factory=list) dev_disk_azure_resource_disk: Optional[str] = None # resolved path dev_disk_azure_resource_disk_size_gib: int = 0 nvme_local_disk_size_gib: int = 0 nvme_local_disks_v1: List[str] = field(default_factory=list) nvme_local_disks_v2: List[str] = field(default_factory=list) nvme_local_disks: List[str] = field(default_factory=list) nvme_remote_data_disks: List[str] = field(default_factory=list) nvme_remote_disks: List[str] = field(default_factory=list) nvme_remote_os_disk: Optional[str] = None root_device_is_nvme: bool = False scsi_resource_disk: Optional[str] = None scsi_resource_disk_size_gib: int = 0 @classmethod def gather(cls) -> "DiskInfo": """Gather disk information and return an instance of DiskInfo.""" dev_disk_azure_links = device_sort( [ link for link in glob.glob( os.path.join("/dev/disk/azure", "**"), recursive=True ) if os.path.islink(link) ] ) dev_disk_azure_resource_disk = None dev_disk_azure_resource_disk_size_gib = 0 if os.path.exists(DEV_DISK_AZURE_RESOURCE): dev_disk_azure_resource_disk = os.path.realpath(DEV_DISK_AZURE_RESOURCE) dev_disk_azure_resource_disk_size_gib = get_disk_size_gib( dev_disk_azure_resource_disk ) nvme_local_disks_v1 = get_nvme_namespace_devices_with_model( "Microsoft NVMe Direct Disk" ) nvme_local_disks_v2 = get_nvme_namespace_devices_with_model( "Microsoft NVMe Direct Disk v2" ) nvme_local_disks = device_sort(nvme_local_disks_v1 + nvme_local_disks_v2) nvme_local_disk_size_gib = 0 if nvme_local_disks: nvme_local_disk_size_gib = min( get_disk_size_gib(f"/dev/{disk}") for disk in nvme_local_disks ) local_disk_max_size = max( get_disk_size_gib(f"/dev/{disk}") for disk in nvme_local_disks ) assert ( nvme_local_disk_size_gib == local_disk_max_size ), f"local disk size mismatch: {nvme_local_disk_size_gib} != {local_disk_max_size} for {nvme_local_disks}" nvme_remote_disks = get_remote_nvme_disks() if nvme_remote_disks: nvme_remote_os_disk = nvme_remote_disks.pop(0) nvme_remote_data_disks = nvme_remote_disks else: nvme_remote_os_disk = None nvme_remote_data_disks = [] root_device = get_root_block_device() root_device_is_nvme = root_device.startswith("/dev/nvme") root_device = root_device.split("/")[-1] scsi_resource_disk = get_scsi_resource_disk() scsi_resource_disk_size_gib = ( get_disk_size_gib(f"/dev/{scsi_resource_disk}") if scsi_resource_disk else 0 ) disk_info = cls( dev_disk_azure_links=dev_disk_azure_links, dev_disk_azure_resource_disk=dev_disk_azure_resource_disk, dev_disk_azure_resource_disk_size_gib=dev_disk_azure_resource_disk_size_gib, nvme_local_disk_size_gib=nvme_local_disk_size_gib, nvme_local_disks_v1=nvme_local_disks_v1, nvme_local_disks_v2=nvme_local_disks_v2, nvme_local_disks=nvme_local_disks, nvme_remote_os_disk=nvme_remote_os_disk, nvme_remote_data_disks=nvme_remote_data_disks, nvme_remote_disks=nvme_remote_disks, root_device=root_device, root_device_is_nvme=root_device_is_nvme, scsi_resource_disk=scsi_resource_disk, scsi_resource_disk_size_gib=scsi_resource_disk_size_gib, ) logger.info("disks info: %r", disk_info) return disk_info @dataclass class AzureNvmeIdDevice: """Azure NVMe ID device.""" device: str model: Optional[str] nvme_id: str type: Optional[str] index: Optional[int] lun: Optional[int] name: Optional[str] extra: Dict[str, str] @dataclass(repr=True) class AzureNvmeIdInfo: """Azure NVMe ID.""" azure_nvme_id_stdout: str azure_nvme_id_stderr: str azure_nvme_id_returncode: int azure_nvme_id_disks: Dict[str, AzureNvmeIdDevice] azure_nvme_id_json_stdout: str azure_nvme_id_json_stderr: str azure_nvme_id_json_returncode: int azure_nvme_id_json_disks: Dict[str, AzureNvmeIdDevice] azure_nvme_id_help_stdout: str azure_nvme_id_help_stderr: str azure_nvme_id_help_returncode: int azure_nvme_id_version_stdout: str azure_nvme_id_version_stderr: str azure_nvme_id_version_returncode: int azure_nvme_id_version: str azure_nvme_id_zzz_stdout: str azure_nvme_id_zzz_stderr: str azure_nvme_id_zzz_returncode: int def _validate_azure_nvme_disks( self, azure_nvme_id_disks: Dict[str, AzureNvmeIdDevice], disk_info: DiskInfo ) -> None: disk_cfg: Optional[AzureNvmeIdDevice] = None for device_name, disk_cfg in azure_nvme_id_disks.items(): assert f"/dev/{device_name}" == disk_cfg.device assert disk_cfg.device.startswith( "/dev/nvme" ), f"unexpected device: {disk_cfg}" for device_name in disk_info.nvme_local_disks_v2: assert ( device_name in azure_nvme_id_disks ), f"missing azure-nvme-id for {device_name}" disk_cfg = azure_nvme_id_disks.get(device_name) assert disk_cfg, f"failed to find azure-nvme-id for {device_name}" assert disk_cfg.type == "local", f"unexpected local disk type {disk_cfg}" assert disk_cfg.name, f"unexpected local disk name {disk_cfg}" assert disk_cfg.index, f"unexpected local disk index {disk_cfg}" assert disk_cfg.lun is None, f"unexpected local disk lun {disk_cfg}" assert disk_cfg.nvme_id, f"unexpected local disk id {disk_cfg}" assert not disk_cfg.extra, f"unexpected local disk extra {disk_cfg}" for device_name in disk_info.nvme_local_disks_v1: assert ( device_name in azure_nvme_id_disks ), f"missing azure-nvme-id for {device_name}" disk_cfg = azure_nvme_id_disks.get(device_name) assert disk_cfg, f"failed to find azure-nvme-id for {device_name}" assert disk_cfg.type == "local", f"unexpected disk type {disk_cfg}" assert not disk_cfg.name, f"unexpected disk name {disk_cfg}" assert not disk_cfg.index, f"unexpected disk index {disk_cfg}" assert disk_cfg.lun is None, f"unexpected local disk lun {disk_cfg}" assert disk_cfg.nvme_id, f"unexpected disk id {disk_cfg}" assert not disk_cfg.extra, f"unexpected disk extra {disk_cfg}" for device_name in disk_info.nvme_remote_disks: assert ( device_name in azure_nvme_id_disks ), f"missing azure-nvme-id for {device_name}" disk_cfg = azure_nvme_id_disks.get(device_name) assert disk_cfg, f"failed to find azure-nvme-id for {device_name}" assert disk_cfg.type in ( "os", "data", ), f"unexpected remote disk type {disk_cfg}" if disk_cfg.type == "data": assert ( disk_cfg.lun is not None and disk_cfg.lun >= 0 ), f"unexpected remote disk index {disk_cfg}" else: assert disk_cfg.lun is None, f"unexpected remote disk index {disk_cfg}" assert not disk_cfg.name, f"unexpected remote disk name {disk_cfg}" assert disk_cfg.nvme_id, f"unexpected remote disk id {disk_cfg}" assert not disk_cfg.extra, f"unexpected remote disk extra {disk_cfg}" logger.info("validate_azure_nvme_disks OK: %r", self.azure_nvme_id_disks) def validate_azure_nvme_id(self, disk_info: DiskInfo) -> None: """Validate azure-nvme-id outputs.""" assert self.azure_nvme_id_returncode == 0, "azure-nvme-id failed" if not os.path.exists("/sys/class/nvme"): assert ( self.azure_nvme_id_stderr == "no NVMe devices in /sys/class/nvme: No such file or directory\n" ), f"unexpected azure-nvme-id stderr without /sys/class/nvme: {self.azure_nvme_id_stderr}" else: assert ( self.azure_nvme_id_stderr == "" ), f"unexpected azure-nvme-id stderr: {self.azure_nvme_id_stderr}" self._validate_azure_nvme_disks(self.azure_nvme_id_disks, disk_info) logger.info("validate_azure_nvmve_id OK: %r", self.azure_nvme_id_stdout) def validate_azure_nvme_id_help(self) -> None: """Validate azure-nvme-id --help outputs.""" assert self.azure_nvme_id_help_returncode == 0, "azure-nvme-id --help failed" assert ( self.azure_nvme_id_help_stderr == "" ), f"unexpected azure-nvme-id --help stderr: {self.azure_nvme_id_help_stderr!r}" assert ( self.azure_nvme_id_help_stdout and self.azure_nvme_id_help_stdout.startswith("Usage: azure-nvme-id ") ), "unexpected azure-nvme-id --help stdout: {self.azure_nvme_id_help_stdout!r}" logger.info( "validate_azure_nvme_id_help OK: %r", self.azure_nvme_id_help_stdout ) def validate_azure_nvme_id_json(self, disk_info: DiskInfo) -> None: """Validate azure-nvme-id --format json outputs.""" assert self.azure_nvme_id_json_returncode == 0, "azure-nvme-id failed" if not os.path.exists("/sys/class/nvme"): assert ( self.azure_nvme_id_json_stderr == "no NVMe devices in /sys/class/nvme: No such file or directory\n" ), f"unexpected azure-nvme-id stderr without /sys/class/nvme: {self.azure_nvme_id_json_stderr}" else: assert ( self.azure_nvme_id_json_stderr == "" ), f"unexpected azure-nvme-id stderr: {self.azure_nvme_id_json_stderr}" self._validate_azure_nvme_disks(self.azure_nvme_id_disks, disk_info) assert all( disk.model in ( "MSFT NVMe Accelerator v1.0", "Microsoft NVMe Direct Disk", "Microsoft NVMe Direct Disk v2", ) for disk in self.azure_nvme_id_json_disks.values() ), "missing model in azure-nvme-id --format json" logger.info( "validate_azure_nvmve_id_json OK: %r", self.azure_nvme_id_json_stdout ) def validate_azure_nvme_id_version(self) -> None: """Validate azure-nvme-id --version outputs.""" assert ( self.azure_nvme_id_version_returncode == 0 ), "azure-nvme-id --version failed" assert ( self.azure_nvme_id_version_stderr == "" ), f"unexpected azure-nvme-id stderr: {self.azure_nvme_id_stderr}" assert self.azure_nvme_id_version_stdout, "missing azure-nvme-id version stdout" assert re.match( r"azure-nvme-id [0v]\.*", self.azure_nvme_id_version_stdout.strip() ), f"unexpected azure-nvme-id version stdout: {self.azure_nvme_id_version_stdout}" assert re.match( r"[0v]\.*", self.azure_nvme_id_version ), f"unexpected azure-nvme-id version: {self.azure_nvme_id_version}" logger.info("validate_azure_nvme_id_version OK: %s", self.azure_nvme_id_version) def validate_azure_nvme_id_zzz_invalid_arg(self) -> None: """Validate azure-nvme-id handles invalid arguments.""" assert ( self.azure_nvme_id_zzz_returncode == 1 ), f"azure-nvme-id zzz rc={self.azure_nvme_id_zzz_returncode}" assert ( self.azure_nvme_id_zzz_stderr == "invalid argument: zzz\n" ), f"unexpected azure-nvme-id zzz stderr: {self.azure_nvme_id_zzz_stderr!r}" assert ( self.azure_nvme_id_zzz_stdout and self.azure_nvme_id_zzz_stdout.startswith("Usage: azure-nvme-id ") ), (f"unexpected azure-nvme-id zzz stdout: {self.azure_nvme_id_zzz_stdout!r}") logger.info( "validate_azure_nvme_id_invalid_arg OK: %r", self.azure_nvme_id_zzz_stdout ) def validate(self, disk_info: DiskInfo) -> None: """Validate Azure NVMe ID output.""" self.validate_azure_nvme_id_help() self.validate_azure_nvme_id_version() self.validate_azure_nvme_id_zzz_invalid_arg() self.validate_azure_nvme_id(disk_info) self.validate_azure_nvme_id_json(disk_info) @classmethod def gather(cls) -> "AzureNvmeIdInfo": """Gather Azure NVMe ID information.""" proc = subprocess.run(["azure-nvme-id"], capture_output=True, check=False) azure_nvme_id_stdout = proc.stdout.decode("utf-8") azure_nvme_id_stderr = proc.stderr.decode("utf-8") azure_nvme_id_returncode = proc.returncode azure_nvme_id_disks = cls.parse_azure_nvme_id_output(azure_nvme_id_stdout) proc = subprocess.run( ["azure-nvme-id", "--format", "json"], capture_output=True, check=False ) azure_nvme_id_json_stdout = proc.stdout.decode("utf-8") azure_nvme_id_json_stderr = proc.stderr.decode("utf-8") azure_nvme_id_json_returncode = proc.returncode azure_nvme_id_json_disks = cls.parse_azure_nvme_id_json_output( azure_nvme_id_json_stdout ) proc = subprocess.run( ["azure-nvme-id", "--help"], capture_output=True, check=False ) azure_nvme_id_help_stdout = proc.stdout.decode("utf-8") azure_nvme_id_help_stderr = proc.stderr.decode("utf-8") azure_nvme_id_help_returncode = proc.returncode proc = subprocess.run( ["azure-nvme-id", "--version"], capture_output=True, check=False ) azure_nvme_id_version_stdout = proc.stdout.decode("utf-8") azure_nvme_id_version_stderr = proc.stderr.decode("utf-8") azure_nvme_id_version_returncode = proc.returncode azure_nvme_id_version = cls.parse_azure_nvme_id_version( azure_nvme_id_version_stdout ) proc = subprocess.run( ["azure-nvme-id", "zzz"], capture_output=True, check=False ) azure_nvme_id_zzz_stdout = proc.stdout.decode("utf-8") azure_nvme_id_zzz_stderr = proc.stderr.decode("utf-8") azure_nvme_id_zzz_returncode = proc.returncode azure_nvme_id_info = cls( azure_nvme_id_stdout=azure_nvme_id_stdout, azure_nvme_id_stderr=azure_nvme_id_stderr, azure_nvme_id_returncode=azure_nvme_id_returncode, azure_nvme_id_help_stdout=azure_nvme_id_help_stdout, azure_nvme_id_help_stderr=azure_nvme_id_help_stderr, azure_nvme_id_help_returncode=azure_nvme_id_help_returncode, azure_nvme_id_disks=azure_nvme_id_disks, azure_nvme_id_json_stdout=azure_nvme_id_json_stdout, azure_nvme_id_json_stderr=azure_nvme_id_json_stderr, azure_nvme_id_json_returncode=azure_nvme_id_json_returncode, azure_nvme_id_json_disks=azure_nvme_id_json_disks, azure_nvme_id_version_stdout=azure_nvme_id_version_stdout, azure_nvme_id_version_stderr=azure_nvme_id_version_stderr, azure_nvme_id_version_returncode=azure_nvme_id_version_returncode, azure_nvme_id_version=azure_nvme_id_version, azure_nvme_id_zzz_returncode=azure_nvme_id_zzz_returncode, azure_nvme_id_zzz_stdout=azure_nvme_id_zzz_stdout, azure_nvme_id_zzz_stderr=azure_nvme_id_zzz_stderr, ) logger.info("azure-nvme-id info: %r", azure_nvme_id_info) return azure_nvme_id_info @staticmethod def parse_azure_nvme_id_json_output(output: str) -> Dict[str, AzureNvmeIdDevice]: """Parse azure-nvme-id --format json output. Example output: [ { "path": "/dev/nvme0n33", "model": "MSFT NVMe Accelerator v1.0", "properties": { "type": "data", "lun": 31 }, "vs": "" }, { "path": "/dev/nvme1n1", "model": "Microsoft NVMe Direct Disk v2", "properties": { "type": "local", "index": 1, "name": "nvme-440G-1" }, "vs": "type=local,index=1,name=nvme-440G-1" } ] """ devices = {} for device in json.loads(output): device_path = device["path"] model = device["model"] properties = device["properties"] device_type = properties.pop("type", None) device_index = ( int(properties.pop("index")) if "index" in properties else None ) device_lun = int(properties.pop("lun")) if "lun" in properties else None device_name = properties.pop("name", None) azure_nvme_id_device = AzureNvmeIdDevice( device=device_path, model=model, nvme_id=",".join([f"{k}={v}" for k, v in properties.items()]), type=device_type, index=device_index, lun=device_lun, name=device_name, extra=properties, ) key = device_path.split("/")[-1] devices[key] = azure_nvme_id_device return devices @staticmethod def parse_azure_nvme_id_output(output: str) -> Dict[str, AzureNvmeIdDevice]: """Parse azure-nvme-id output. Example output: /dev/nvme0n1: type=os /dev/nvme0n2: type=data,lun=0 /dev/nvme0n3: type=data,lun=1 /dev/nvme1n1: type=local,index=1,name=nvme-440G-1 /dev/nvme2n1: type=local,index=2,name=nvme-440G-2 /dev/nvme3n1: """ devices = {} for line in output.splitlines(): parts = line.strip().split(":", 1) if parts[-1] == "": parts.pop() device = parts[0].strip() if len(parts) == 2: nvme_id = parts[1].strip() properties = dict(kv.split("=", 1) for kv in nvme_id.split(",")) elif len(parts) == 1: nvme_id = "" properties = {} else: raise ValueError(f"unexpected azure-nvme-id output: {line}") device_type = properties.pop("type", None) device_index = ( int(properties.pop("index")) if "index" in properties else None ) device_lun = int(properties.pop("lun")) if "lun" in properties else None device_name = properties.pop("name", None) azure_nvme_id_device = AzureNvmeIdDevice( device=device, model=None, nvme_id=nvme_id, type=device_type, index=device_index, lun=device_lun, name=device_name, extra=properties, ) key = device.split("/")[-1] devices[key] = azure_nvme_id_device return devices @staticmethod def parse_azure_nvme_id_version(azure_nvme_id_version_output: str) -> str: """Parse azure-nvme-id version output and return version info.""" parts = azure_nvme_id_version_output.strip().split(" ") assert ( len(parts) == 2 ), f"unexpected azure-nvme-id version output: {azure_nvme_id_version_output}" return parts[1] @dataclass class NetworkInterface: """Network interface.""" name: str driver: str mac: str ipv4_addrs: List[str] udev_properties: Dict[str, str] @dataclass(eq=True, repr=True) class NetworkInfo: """Network information.""" interfaces: Dict[str, NetworkInterface] = field(default_factory=dict) @classmethod def enumerate_interfaces(cls) -> Dict[str, NetworkInterface]: """Retrieve all Ethernet interfaces on the system.""" interfaces: Dict[str, NetworkInterface] = {} interface_names = [ interface for interface in os.listdir(SYS_CLASS_NET) if os.path.exists(os.path.join(SYS_CLASS_NET, interface, "device")) ] for interface_name in interface_names: sys_path = Path(SYS_CLASS_NET, interface_name) udev_properties = cls.query_udev_properties(interface_name) driver_path = Path(sys_path, "device", "driver") if not driver_path.is_symlink(): logger.debug( "ignoring interface %s without driver symlink", interface_name ) continue link = os.readlink(driver_path) driver = os.path.basename(link) mac = (sys_path / "address").read_text().strip() ipv4_addrs = cls.get_ipv4_addresses(interface_name) interfaces[interface_name] = NetworkInterface( name=interface_name, driver=driver, mac=mac, ipv4_addrs=ipv4_addrs, udev_properties=udev_properties, ) return interfaces @staticmethod def get_ipv4_addresses(interface_name: str) -> List[str]: """Get the IPv4 addresses of a given network interface using `ip addr`.""" try: result = subprocess.run( ["ip", "-4", "addr", "show", interface_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True, ) ipv4_addresses = re.findall(r"inet (\d+\.\d+\.\d+\.\d+)", result.stdout) return ipv4_addresses except subprocess.CalledProcessError as error: logger.error("failed to get IPv4 address for %s: %r", interface_name, error) raise @staticmethod def query_udev_properties(interface_name: str) -> Dict[str, str]: """Query all udev properties for a given interface using udevadm.""" try: result = subprocess.run( [ "udevadm", "info", "--query=property", f"--path={SYS_CLASS_NET}/{interface_name}", ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True, ) properties: Dict[str, str] = {} for line in result.stdout.splitlines(): if "=" in line: key, value = line.split("=", 1) properties[key] = value return properties except subprocess.CalledProcessError as error: logger.error( "Failed to query udev properties for %s: %r", interface_name, error ) return {} def _validate_interface(self, interface: NetworkInterface) -> None: """Ensure the required properties are set for hv_netvsc, mlx4, mlx5, and mana devices.""" if interface.driver in ["mlx4_core", "mlx5_core", "mana"]: assert ( interface.udev_properties.get("NM_UNMANAGED") == "1" and interface.udev_properties.get("AZURE_UNMANAGED_SRIOV") == "1" and interface.udev_properties.get("ID_NET_MANAGED_BY") == "unmanaged" ), f"missing required properties for network interface: {interface}" elif interface.driver == "hv_netvsc": assert ( "AZURE_UNMANAGED_SRIOV" not in interface.udev_properties ), f"unexpected AZURE_UNMANAGED_SRIOV property: {interface}" assert ( interface.udev_properties.get("ID_NET_MANAGED_BY") != "unmanaged" ), f"hv_netvsc interface should be managed: {interface}" mana_has_synthetic_netvsc = interface.driver == "mana" and any( i.driver == "hv_netvsc" and i.mac == interface.mac for i in self.interfaces.values() ) if interface.driver == "hv_netvsc" or ( interface.driver == "mana" and not mana_has_synthetic_netvsc ): assert interface.ipv4_addrs, f"missing IPv4 addresses for {interface}" else: assert ( not interface.ipv4_addrs ), f"unexpected IPv4 addresses for {interface}" logger.info("validate_interface %s OK: %r", interface.name, interface) def validate(self) -> None: """Validate network configuration.""" for _, interface in self.interfaces.items(): self._validate_interface(interface) @classmethod def gather(cls) -> "NetworkInfo": """Gather networking information.""" return NetworkInfo(interfaces=cls.enumerate_interfaces()) class AzureVmUtilsValidator: """Validate Azure VM utilities.""" def __init__( self, *, skip_imds_validation: bool = False, skip_network_validation: bool = False, skip_symlink_validation: bool = False, ) -> None: self.azure_nvme_id_info = AzureNvmeIdInfo.gather() self.disk_info = DiskInfo.gather() self.net_info = NetworkInfo.gather() self.skip_imds_validation = skip_imds_validation self.skip_network_validation = skip_network_validation self.skip_symlink_validation = skip_symlink_validation try: self.imds_metadata = get_imds_metadata() except Exception as error: logger.error("failed to fetch IMDS metadata: %r", error) if not self.skip_imds_validation: raise self.imds_metadata = {} self.vm_size = self.imds_metadata.get("compute", {}).get("vmSize") self.sku_config = SKU_CONFIGS.get(self.vm_size) logger.info("sku config: %r", self.sku_config) def validate_dev_disk_azure_links_data(self) -> None: """Validate /dev/disk/azure/data links. All data disks should have by-lun if azure-vm-utils is installed. Future variants of remote disks will include by-name. """ imds_data_disks = ( self.imds_metadata.get("compute", {}) .get("storageProfile", {}) .get("dataDisks", []) ) expected_data_disks = len(imds_data_disks) data_disks = [ link for link in self.disk_info.dev_disk_azure_links if link.startswith("/dev/disk/azure/data/by-lun") ] if self.disk_info.nvme_remote_disks: assert len(data_disks) == len( self.disk_info.nvme_remote_data_disks ), f"unexpected number of data disks: {data_disks} configured={self.disk_info.nvme_remote_data_disks}" assert ( len(data_disks) == expected_data_disks ), f"unexpected number of data disks: {data_disks} IMDS configured={imds_data_disks} (note that IMDS may not be accurate)" # Verify disk sizes match up with IMDS configuration. for imds_disk in imds_data_disks: lun = imds_disk.get("lun") # Disk size is actually reported in GiB not GB. expected_size_gib = int(imds_disk.get("diskSizeGB")) disk_path = f"/dev/disk/azure/data/by-lun/{lun}" actual_size_gib = get_disk_size_gib(disk_path) assert ( actual_size_gib == expected_size_gib ), f"disk size mismatch for {disk_path}: expected {expected_size_gib} GiB, found {actual_size_gib} GiB" logger.info("validate_dev_disk_azure_links_data OK: %r", data_disks) def validate_dev_disk_azure_links_local(self) -> None: """Validate /dev/disk/azure/local links. All local disks should have by-serial if azure-vm-utils is installed. If NVMe id is supported, by-index and by-name will be available as well. """ local_disks = sorted( [ link for link in self.disk_info.dev_disk_azure_links if link.startswith("/dev/disk/azure/local") ] ) for key in ["index", "name", "serial"]: local_disks_by_key = sorted( [ link for link in self.disk_info.dev_disk_azure_links if link.startswith(f"/dev/disk/azure/local/by-{key}") ] ) if key == "serial": expected_count = len(self.disk_info.nvme_local_disks) else: expected_count = len(self.disk_info.nvme_local_disks_v2) assert ( len(local_disks_by_key) == expected_count ), f"unexpected number of local disks by-{key}: {local_disks_by_key} (expected {expected_count})" assert ( not self.sku_config or not self.sku_config.nvme_id_enabled_local or len(local_disks_by_key) == self.sku_config.nvme_local_disk_count ), f"unexpected number of local disks by sku for by-{key}: {local_disks_by_key} (expected {expected_count})" if key == "name": for disk in local_disks_by_key: name = disk.split("/")[-1] assert name.startswith( "nvme-" ), f"unexpected local disk name: {name}" match = re.match(r"nvme-(\d+)G-(\d+)", name) assert ( match ), f"local disk name does not conform to expected pattern: {name}" size, index = match.groups() assert ( size.isdigit() and index.isdigit() ), f"invalid size or index in local disk name: {name}" # Cross-check by-index links with by-name links. by_index_path = f"/dev/disk/azure/local/by-index/{index}" assert os.path.realpath(by_index_path) == os.path.realpath( disk ), f"mismatch between by-index and by-name links: {by_index_path} != {disk}" logger.info("validate_dev_disk_azure_links_local OK: %r", local_disks) def validate_dev_disk_azure_links_os(self) -> None: """Validate /dev/disk/azure/os link.""" os_disk = "/dev/disk/azure/os" assert os_disk in self.disk_info.dev_disk_azure_links, f"missing {os_disk}" logger.info("validate_dev_disk_azure_links_os OK: %r", os_disk) def validate_dev_disk_azure_links_resource(self) -> None: """Validate /dev/disk/azure/resource link.""" resource_disk = "/dev/disk/azure/resource" expected = (self.sku_config and self.sku_config.temp_disk_size_gib) or bool( self.disk_info.scsi_resource_disk ) if expected: assert ( resource_disk in self.disk_info.dev_disk_azure_links ), f"missing {resource_disk}" else: assert ( resource_disk not in self.disk_info.dev_disk_azure_links ), f"unexpected {resource_disk}" logger.info("validate_dev_disk_azure_links_resource OK: %r", resource_disk) def validate_networking(self) -> None: """Validate networking configuration.""" self.net_info.validate() logger.info("validate_networking OK: %r", self.net_info) def validate_nvme_local_disks(self) -> None: """Validate NVMe local disks.""" logger.info("validate_nvme_local_disks OK: %r", self.disk_info.nvme_local_disks) def validate_scsi_resource_disk(self) -> None: """Validate SCSI resource disk symlink and size.""" assert ( self.disk_info.scsi_resource_disk_size_gib == self.disk_info.dev_disk_azure_resource_disk_size_gib ), f"resource disk size mismatch: {self.disk_info}" if self.disk_info.scsi_resource_disk: assert ( f"/dev/{self.disk_info.scsi_resource_disk}" == self.disk_info.dev_disk_azure_resource_disk ), f"unexpected resource disk path: {self.disk_info}" else: assert ( self.disk_info.scsi_resource_disk is None and self.disk_info.dev_disk_azure_resource_disk is None ), f"unexpected resource disk path: {self.disk_info}" logger.info( "validate_scsi_resource_disk OK: /dev/disk/azure/resource => %s", self.disk_info.dev_disk_azure_resource_disk, ) def validate_sku_config(self) -> None: """Validate SKU config.""" if not self.sku_config: logger.warning( "validate_sku_config SKIPPED: no sku configuration for VM size %r", self.vm_size, ) return assert ( self.sku_config.vm_size == self.vm_size ), f"vm size mismatch: {self.sku_config.vm_size} != {self.vm_size}" assert ( len(self.disk_info.nvme_local_disks) == self.sku_config.nvme_local_disk_count ), f"local disk count mismatch: {len(self.disk_info.nvme_local_disks)} != {self.sku_config.nvme_local_disk_count}" assert ( self.disk_info.nvme_local_disk_size_gib == self.sku_config.nvme_local_disk_size_gib ), f"local disk size mismatch: {self.disk_info.nvme_local_disk_size_gib} != {self.sku_config.nvme_local_disk_size_gib}" assert ( self.disk_info.scsi_resource_disk_size_gib == self.sku_config.temp_disk_size_gib ), f"temp disk size mismatch: {self.disk_info.scsi_resource_disk_size_gib} != {self.sku_config.temp_disk_size_gib}" assert ( self.disk_info.dev_disk_azure_resource_disk_size_gib == self.sku_config.temp_disk_size_gib ), f"temp disk size mismatch: {self.disk_info.dev_disk_azure_resource_disk_size_gib} != {self.sku_config.temp_disk_size_gib}" logger.info("validate_sku_config OK: %r", self.sku_config) def validate(self) -> None: """Run validations.""" self.azure_nvme_id_info.validate(self.disk_info) if self.skip_symlink_validation: logger.info("validate_dev_disk_azure_links_data SKIPPED") logger.info("validate_dev_disk_azure_links_local SKIPPED") logger.info("validate_dev_disk_azure_links_os SKIPPED") logger.info("validate_dev_disk_azure_links_resource SKIPPED") logger.info("validate_scsi_resource_disk SKIPPED") else: self.validate_dev_disk_azure_links_data() self.validate_dev_disk_azure_links_local() self.validate_dev_disk_azure_links_os() self.validate_dev_disk_azure_links_resource() self.validate_scsi_resource_disk() if self.skip_network_validation: logger.info("validate_networking SKIPPED") else: self.validate_networking() self.validate_sku_config() logger.info("success!") def main() -> None: """Main entry point.""" parser = argparse.ArgumentParser( description="Azure VM utilities self-tests script." ) parser.add_argument( "--debug", action="store_true", help="Enable debug logging", ) parser.add_argument( "--skip-imds-validation", action="store_true", help="Skip imds validation (allow for running tests outside Azure VM)", ) parser.add_argument( "--skip-network-validation", action="store_true", help="Skip network validation (allow for running test without reboot after install)", ) parser.add_argument( "--skip-symlink-validation", action="store_true", help="Skip symlink validation (allow for running test without reboot after install)", ) args = parser.parse_args() if args.debug: logging.basicConfig(format="[%(asctime)s] %(message)s", level=logging.DEBUG) else: logging.basicConfig(format="[%(asctime)s] %(message)s", level=logging.INFO) validator = AzureVmUtilsValidator( skip_imds_validation=args.skip_imds_validation, skip_network_validation=args.skip_network_validation, skip_symlink_validation=args.skip_symlink_validation, ) validator.validate() if __name__ == "__main__": main() azure-vm-utils-0.6.0/selftest/test-requirements-frozen-py38.txt000066400000000000000000000010221500025016600246030ustar00rootroot00000000000000astroid==3.2.4 autoflake==2.3.1 black==24.8.0 certifi==2024.12.14 charset-normalizer==3.4.1 click==8.1.8 dill==0.3.9 exceptiongroup==1.2.2 execnet==2.1.1 flake8==7.1.1 idna==3.10 iniconfig==2.0.0 isort==5.13.2 mccabe==0.7.0 mypy==1.14.1 mypy-extensions==1.0.0 packaging==24.2 pathspec==0.12.1 platformdirs==4.3.6 pluggy==1.5.0 pycodestyle==2.12.1 pyflakes==3.2.0 pylint==3.2.7 pytest==8.3.4 pytest-xdist==3.6.1 requests==2.32.3 tomli==2.2.1 tomlkit==0.13.2 types-requests==2.32.0.20241016 typing-extensions==4.12.2 urllib3==2.2.3 azure-vm-utils-0.6.0/selftest/test-requirements-frozen.txt000066400000000000000000000010211500025016600240010ustar00rootroot00000000000000astroid==3.3.8 autoflake==2.3.1 black==25.1.0 certifi==2024.12.14 charset-normalizer==3.4.1 click==8.1.8 dill==0.3.9 exceptiongroup==1.2.2 execnet==2.1.1 flake8==7.1.1 idna==3.10 iniconfig==2.0.0 isort==6.0.0 mccabe==0.7.0 mypy==1.14.1 mypy-extensions==1.0.0 packaging==24.2 pathspec==0.12.1 platformdirs==4.3.6 pluggy==1.5.0 pycodestyle==2.12.1 pyflakes==3.2.0 pylint==3.3.4 pytest==8.3.4 pytest-xdist==3.6.1 requests==2.32.3 tomli==2.2.1 tomlkit==0.13.2 types-requests==2.32.0.20241016 typing_extensions==4.12.2 urllib3==2.3.0 azure-vm-utils-0.6.0/selftest/test-requirements.txt000066400000000000000000000001741500025016600225100ustar00rootroot00000000000000autoflake black flake8 isort mypy pycodestyle pyflakes pylint pytest pytest-xdist requests types-requests typing_extensions azure-vm-utils-0.6.0/selftest/test_images.py000077500000000000000000000614621500025016600211370ustar00rootroot00000000000000#!/usr/bin/env python3 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE in the project root for license information. # -------------------------------------------------------------------------- """Azure VM utilities self-tests script.""" import codecs import getpass import logging import os import shlex import subprocess import time import traceback import uuid from datetime import datetime, timedelta, timezone from pathlib import Path from typing import List import pytest from . import selftest logger = logging.getLogger(__name__) # pylint: disable=unknown-option-value # pylint: disable=line-too-long # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-arguments # pylint: disable=too-many-locals # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument # pylint: disable=attribute-defined-outside-init # pylint: disable=redefined-outer-name ARM64_IMAGES = [ image for image in os.getenv("SELFTEST_ARM64_IMAGES", "").split(",") if image ] if not ARM64_IMAGES: ARM64_IMAGES = [ "debian:debian-13-daily:13-arm64:latest", "debian:debian-sid-daily:sid-arm64:latest", "/CommunityGalleries/Fedora-5e266ba4-2250-406d-adad-5d73860d958f/Images/Fedora-Cloud-Rawhide-Arm64/versions/latest", ] ARM64_VM_SIZES = [ vm_size for vm_size in os.getenv("SELFTEST_ARM64_VM_SIZES", "").split(",") if vm_size ] if not ARM64_VM_SIZES: ARM64_VM_SIZES = [ sku.vm_size for sku in selftest.SKU_CONFIGS.values() if sku.vm_size_type == "arm64" ] GEN1_IMAGES = [ image for image in os.getenv("SELFTEST_GEN1_IMAGES", "").split(",") if image ] if not GEN1_IMAGES: GEN1_IMAGES = [ "debian:debian-13-daily:13:latest", "debian:debian-sid-daily:sid:latest", ] GEN1_VM_SIZES = [ vm_size for vm_size in os.getenv("SELFTEST_GEN1_VM_SIZES", "").split(",") if vm_size ] if not GEN1_VM_SIZES: GEN1_VM_SIZES = [ sku.vm_size for sku in selftest.SKU_CONFIGS.values() if sku.vm_size_type == "x64" and not sku.vm_size.endswith("v6") ] GEN2_IMAGES = [ image for image in os.getenv("SELFTEST_GEN2_IMAGES", "").split(",") if image ] if not GEN2_IMAGES: GEN2_IMAGES = [ "debian:debian-13-daily:13-gen2:latest", "debian:debian-sid-daily:sid-gen2:latest", "/CommunityGalleries/Fedora-5e266ba4-2250-406d-adad-5d73860d958f/Images/Fedora-Cloud-Rawhide-x64/versions/latest", ] GEN2_VM_SIZES = [ vm_size for vm_size in os.getenv("SELFTEST_GEN2_VM_SIZES", "").split(",") if vm_size ] if not GEN2_VM_SIZES: GEN2_VM_SIZES = [ sku.vm_size for sku in selftest.SKU_CONFIGS.values() if sku.vm_size_type == "x64" ] def safe_parse_bytes_literal(s: str) -> str: """Parse a bytes literal string (e.g. b'foo') safely to a string (foo).""" s = s.strip() if not s.startswith("b'") or not s.endswith("'"): return s # Strip the b' and trailing ' inner = s[2:-1] # Decode escape sequences (e.g., \r, \n, \x00) raw_bytes: bytes = codecs.escape_decode(inner.encode())[0] # type: ignore return raw_bytes.decode("utf-8", errors="ignore").rstrip("\x00") def _subprocess_run( cmd: List[str], *, artifacts_path: Path, artifact_name: str, check: bool = False, decode: bool = False, ) -> subprocess.CompletedProcess: """Run a subprocess command and capture outputs as utf-8.""" artifact_path = artifacts_path / artifact_name deadline = datetime.now(timezone.utc) + timedelta(minutes=30) printable_cmd = shlex.join(cmd) while datetime.now(timezone.utc) < deadline: logger.debug("executing command: %s", printable_cmd) proc = subprocess.run( cmd, check=False, capture_output=True, text=True, encoding="utf-8", errors="replace", ) logger.debug( "executed command=%s rc=%d artifact=%s", printable_cmd, proc.returncode, artifact_path.as_posix(), ) stdout = proc.stdout stderr = proc.stderr if decode: # Decode potential byte repr strings from get-boot-log. stdout = safe_parse_bytes_literal(stdout) stderr = safe_parse_bytes_literal(stderr) artifact_path.write_text( f"cmd: {printable_cmd}\nrc: {proc.returncode}\nstdout:\n{stdout!s}\nstderr:\n{stderr!s}", encoding="utf-8", ) if proc.returncode != 0 and proc.stderr.startswith("ssh:"): logger.debug("SSH failed, retrying...") time.sleep(5) continue if check and proc.returncode != 0: raise RuntimeError( f"Command '{printable_cmd}' failed with return code {proc.returncode}: {artifact_path.as_posix()}" ) return proc raise RuntimeError( f"Command '{printable_cmd}' timed out after 30 minutes: {artifacts_path.as_posix()}" ) @pytest.fixture def artifacts_path(tmp_path, vm_size, image, temp_vm_name): """Generate a temporary SSH key pair for the test.""" artifacts_path = os.getenv("SELFTEST_ARTIFACTS_PATH") if artifacts_path: tmp_path = Path(artifacts_path) path = tmp_path / vm_size / image.replace("/", "_").replace(":", "_") / temp_vm_name path.mkdir(exist_ok=True, parents=True) yield path @pytest.fixture def ssh_key_path(artifacts_path): """Generate a temporary SSH key pair for the test.""" path = artifacts_path / "id_rsa" _subprocess_run( ["ssh-keygen", "-t", "rsa", "-b", "2048", "-f", str(path), "-N", ""], artifact_name="ssh-keygen", artifacts_path=artifacts_path, check=True, ) yield path @pytest.fixture def azure_subscription(): """Get the Azure subscription ID.""" subscription = os.getenv("SELFTEST_AZURE_SUBSCRIPTION") or "" assert subscription, "SELFTEST_AZURE_SUBSCRIPTION environment variable is required" yield subscription @pytest.fixture def azure_location(): """Get the Azure location.""" location = os.getenv("SELFTEST_AZURE_LOCATION") assert location, "SELFTEST_AZURE_LOCATION environment variable is required" yield location @pytest.fixture def admin_username(): """Get the admin username.""" username = os.getenv("SELFTEST_ADMIN_USERNAME", "azureuser") yield username @pytest.fixture def admin_password(): """Get the admin password.""" yield os.getenv("SELFTEST_ADMIN_PASSWORD") @pytest.fixture def hold_failures(): """Get the hold failures flag.""" yield bool(os.getenv("SELFTEST_HOLD_FAILURES")) @pytest.fixture def random_suffix(): """Generate a random suffix for the resource group.""" yield str(uuid.uuid4().hex[:8]) @pytest.fixture def reboots(): """Get the number of reboots.""" yield int(os.getenv("SELFTEST_REBOOTS", "50")) @pytest.fixture def temp_vm_name(random_suffix): """Create a temporary resource group for the test.""" user = getpass.getuser() yield f"temp-vm-{user}-selftest-{random_suffix}" def is_cleanup_permitted(request, hold_failures: bool = False) -> bool: """Check to see if resources should be deleted.""" aborted = getattr(request.session, "aborted", False) failed = getattr(request.session, "failed", False) logger.debug( "test session aborted=%s failed=%s hold_failures=%s", aborted, failed, hold_failures, ) if hold_failures and failed: return False return True @pytest.fixture def temp_resource_group( azure_subscription, azure_location, delete_after_tag, hold_failures, random_suffix, request, artifacts_path, ): """Create a temporary resource group for the test.""" create_rg = True env_rg = os.getenv("SELFTEST_RESOURCE_GROUP") if env_rg: resource_group_name = env_rg proc = _subprocess_run( [ "az", "group", "show", "--subscription", azure_subscription, "--name", resource_group_name, ], artifact_name="az_group_show", artifacts_path=artifacts_path, ) if proc.returncode == 0: create_rg = False else: user = getpass.getuser() resource_group_name = f"temp-rg-{user}-selftest-{random_suffix}" if create_rg: _subprocess_run( [ "az", "group", "create", "--subscription", azure_subscription, "--location", azure_location, "--name", resource_group_name, "--tags", f"deleteAfter={delete_after_tag}", ], artifact_name="az_group_create", artifacts_path=artifacts_path, check=True, ) yield resource_group_name if not env_rg and is_cleanup_permitted(request, hold_failures): _subprocess_run( [ "az", "group", "delete", "--name", resource_group_name, "--yes", "--no-wait", ], artifact_name="az_group_delete", artifacts_path=artifacts_path, check=True, ) @pytest.fixture def public_ip( temp_resource_group, azure_subscription, azure_location, hold_failures, random_suffix, request, artifacts_path, ): """Create a temporary public IP address for the test.""" while True: ip_name = f"temp-ip-{random_suffix}" proc = _subprocess_run( [ "az", "network", "public-ip", "create", "--subscription", azure_subscription, "--resource-group", temp_resource_group, "--location", azure_location, "--name", ip_name, "--sku", "Standard", "--allocation-method", "Static", "--query", "publicIp.ipAddress", "--output", "tsv", ], artifact_name="az_public_ip_create", artifacts_path=artifacts_path, check=True, ) public_ip = proc.stdout.strip() delete_cmd = [ "az", "network", "public-ip", "delete", "--subscription", azure_subscription, "--resource-group", temp_resource_group, "--name", ip_name, ] # Temporary workaround for VPN-unroutable networks. if any( public_ip.startswith(prefix) for prefix in ("20.242", "68.154.", "128.24.") ): _subprocess_run( delete_cmd, check=True, artifacts_path=artifacts_path, artifact_name="az_public_ip_delete", ) continue yield public_ip, ip_name if is_cleanup_permitted(request, hold_failures): for _ in range(30): # Some retries to delete the public IP may be required. proc = _subprocess_run( delete_cmd, check=False, artifacts_path=artifacts_path, artifact_name="az_public_ip_delete", ) if proc.returncode == 0: break time.sleep(2) return @pytest.fixture def delete_after_tag(): """Get the deleteAfter tag value.""" yield (datetime.now(timezone.utc) + timedelta(hours=8)).isoformat() class TestVMs: """Test VMs with different images and sizes.""" @pytest.fixture(autouse=True) def setup( self, admin_username, admin_password, azure_location, azure_subscription, artifacts_path, delete_after_tag, hold_failures, public_ip, reboots, request, ssh_key_path, temp_resource_group, temp_vm_name, image, vm_size, ): """Initialize the test.""" self.admin_username = admin_username self.admin_password = admin_password self.artifacts_path = artifacts_path self.azure_location = azure_location self.azure_subscription = azure_subscription self.boot = 0 self.delete_after_tag = delete_after_tag self.hold_failures = hold_failures self.image = image self.nsg = os.getenv("SELFTEST_NSG") self.public_ip, self.public_ip_name = public_ip self.reboots = reboots self.request = request self.selftest_script_path = ( Path(os.path.abspath(__file__)).parent / "selftest.py" ) self.ssh_key_path = ssh_key_path self.target_script_path = f"/home/{self.admin_username}/selftest.py" self.temp_resource_group = temp_resource_group self.vm_name = temp_vm_name self.vm_size = vm_size self.ssh_command = [ "ssh", "-i", self.ssh_key_path.as_posix(), "-o", "StrictHostKeyChecking=no", f"{self.admin_username}@{self.public_ip}", "--", "sudo", ] self.vm_cfg = ( f"name={self.vm_name}\n" f"rg={self.temp_resource_group}\n" f"ip={self.public_ip}\n" f"image={self.image}\n" f"size={self.vm_size}\n" f"ssh_key_path={self.ssh_key_path.as_posix()}\n" f'ssh_cmd="{shlex.join(self.ssh_command)}"\n' f'console_cmd="az serial-console connect -n {self.vm_name} -g {self.temp_resource_group}"\n' f"artifacts_path={self.artifacts_path.as_posix()}\n" ) try: yield finally: if is_cleanup_permitted(request, hold_failures): logger.info("CLEANING:\n%s", self.vm_cfg) self._cleanup() else: logger.error( "TEST FAILED, HOLDING RESOURCES FOR DEBUGGING:\n%s", self.vm_cfg, ) def _cleanup(self): """Cleanup resources.""" proc = _subprocess_run( [ "az", "vm", "delete", "--subscription", self.azure_subscription, "--resource-group", self.temp_resource_group, "--name", self.vm_name, "--yes", ], artifacts_path=self.artifacts_path, artifact_name="az_vm_delete", ) if proc.returncode != 0: logger.error("failed to delete VM %s: %s", self.vm_name, proc.stderr) def _create_vm(self) -> None: """Create a VM.""" cmd = [ "az", "vm", "create", "--subscription", self.azure_subscription, "--resource-group", self.temp_resource_group, "--location", self.azure_location, "--name", self.vm_name, "--image", self.image, "--size", self.vm_size, "--public-ip-address", self.public_ip_name, "--admin-username", self.admin_username, "--ssh-key-value", f"{self.ssh_key_path}.pub", "--data-disk-sizes-gb", "100", "200", "300", "400", "--accept-term", "--data-disk-delete-option", "Delete", "--nic-delete-option", "Delete", "--os-disk-delete-option", "Delete", "--tags", f"deleteAfter={self.delete_after_tag}", ] if self.admin_password: cmd += ["--admin-password", self.admin_password] if self.nsg: cmd += ["--nsg", self.nsg] proc = _subprocess_run( cmd, artifacts_path=self.artifacts_path, artifact_name="az_vm_create", ) if proc.returncode != 0: # Skip the test if the VM creation failed and indicate the reason. if "QuotaExceeded" in proc.stderr: pytest.skip( f"Unable to create VM due to QuotaExceeded for {self.vm_size}: {proc.stderr}" ) if "SkuNotAvailable" in proc.stderr: pytest.skip( f"Unable to create VM due to SkuNotAvailable for {self.vm_size}: {proc.stderr}" ) pytest.skip( f"Unable to create VM: stdout={proc.stdout} stderr={proc.stderr}" ) logger.info("CREATED VM:\n%s", self.vm_cfg) (self.artifacts_path / "vm.cfg").write_text(self.vm_cfg) # Enable boot diagnostics proc = _subprocess_run( [ "az", "vm", "boot-diagnostics", "enable", "--subscription", self.azure_subscription, "--resource-group", self.temp_resource_group, "--name", self.vm_name, ], artifacts_path=self.artifacts_path, artifact_name="az_vm_boot_diagnostics_enable", ) def _execute_ssh_command( self, command: List[str], *, artifact_name: str, decode: bool = False ) -> subprocess.CompletedProcess: """Execute a command on the VM with sudo.""" proc = _subprocess_run( self.ssh_command + command, artifacts_path=self.artifacts_path, artifact_name=artifact_name, decode=decode, ) return proc def _fetch_boot_diagnostics(self) -> None: """Fetch boot diagnostics logs.""" for _ in range(300): proc = _subprocess_run( [ "az", "vm", "boot-diagnostics", "get-boot-log", "--subscription", self.azure_subscription, "--resource-group", self.temp_resource_group, "--name", self.vm_name, "--output", "tsv", ], artifacts_path=self.artifacts_path, artifact_name=f"boot{self.boot}-console", decode=True, ) if proc.returncode == 0 and "BlobNotFound" not in proc.stderr: return time.sleep(1) def _fetch_logs(self) -> None: """Fetch logs.""" proc = self._execute_ssh_command( ["journalctl", "-o", "short-monotonic", "-b"], artifact_name=f"boot{self.boot}-journal", ) if proc.returncode != 0: raise RuntimeError( f"failed to fetch journalctl logs on boot {self.boot} @ {self.artifacts_path.as_posix()}: {proc.stderr}" ) self._scp_from_vm("/var/log/cloud-init.log", f"boot{self.boot}-cloud-init.log") self._scp_from_vm( "/var/log/cloud-init-output.log", f"boot{self.boot}-cloud-init-output.log" ) self._execute_ssh_command( ["rm", "-f", "/var/log/cloud-init.log", "/var/log/cloud-init-output.log"], artifact_name=f"boot{self.boot}-rm-logs", ) def _run_selftest_script(self) -> None: """Run selftest script on the VM.""" proc = self._execute_ssh_command( [self.target_script_path, "--debug"], artifact_name=f"boot{self.boot}-selftest", ) if proc.returncode != 0: raise RuntimeError( f"self-test failed on boot {self.boot} @ {self.artifacts_path.as_posix()}: {proc.stderr}" ) def _scp_from_vm(self, src: str, artifact_name: str) -> Path: """Copy file from the VM using ssh with sudo.""" dst = self.artifacts_path / artifact_name proc = self._execute_ssh_command( ["cat", src], artifact_name=f"{artifact_name}.cmd" ) if proc.returncode != 0: raise RuntimeError( f"failed to copy {src} -> {dst.as_posix()}: {proc.stderr}" ) dst.write_text(proc.stdout, encoding="utf-8") return dst def _scp_to_vm(self, src: Path, dst: str) -> None: """Copy file to VM.""" src_path = src.as_posix() dst = f"{self.admin_username}@{self.public_ip}:{dst}" proc = _subprocess_run( [ "scp", "-i", self.ssh_key_path.as_posix(), src_path, dst, ], artifacts_path=self.artifacts_path, artifact_name=f"scp-{src.name}", ) if proc.returncode != 0: raise RuntimeError(f"failed to scp {src_path} -> {dst}: {proc.stderr}") def _reboot_vm(self) -> None: """Reboot the VM.""" self._execute_ssh_command(["reboot"], artifact_name="reboot") time.sleep(10) self._wait_for_vm() def _verify_image(self) -> None: """Verify the image has scrubbed conflicting configurations.""" for config in [ "/etc/NetworkManager/conf.d/99-azure-unmanaged-devices.conf", "/etc/udev/rules.d/68-azure-sriov-nm-unmanaged.rules", ]: proc = self._execute_ssh_command( ["test", "-f", config], artifact_name=f"verify-image-{os.path.basename(config)}", ) if proc.returncode == 0: raise RuntimeError(f"config {config} found in {self.image}") def _wait_for_vm(self) -> None: """Wait for the VM to be ready.""" while True: logger.debug("waiting for VM to be ready on boot %d...", self.boot) self._execute_ssh_command( ["cloud-init", "status", "--wait"], artifact_name="cloud-init-status" ) proc = self._execute_ssh_command( ["systemctl", "is-system-running", "--wait"], artifact_name="systemctl-status", ) status = proc.stdout.strip() if not status: status = "failed to ssh" logger.debug("VM status: %s", status) if status in ("running", "degraded"): return logger.debug("debug: %s bash", shlex.join(self.ssh_command)) logger.debug( " az serial-console connect --name %s --resource-group %s", self.vm_name, self.temp_resource_group, ) time.sleep(2) def run_test(self) -> None: """Create VM and run self-tests.""" try: logger.debug("artifacts path: %s", self.artifacts_path.as_posix()) logger.debug("public IP: %s", self.public_ip) self._create_vm() self._wait_for_vm() self._scp_to_vm( self.selftest_script_path, self.target_script_path, ) self._verify_image() reboots = int(os.getenv("SELFTEST_REBOOTS", "50")) for self.boot in range(0, reboots): self._fetch_boot_diagnostics() self._fetch_logs() self._run_selftest_script() self._reboot_vm() except Exception as error: self.request.session.failed = True logger.error( "test failed: %r\nartifacts: %s\ntraceback: %s", error, self.artifacts_path.as_posix(), traceback.format_exc(), ) raise RuntimeError( f"test failed: {error} @ {self.artifacts_path.as_posix()}" ) from error @pytest.mark.parametrize( "image", ARM64_IMAGES, ) @pytest.mark.parametrize( "vm_size", ARM64_VM_SIZES, ) def test_arm64(self, image, vm_size): """Test arm64 images.""" self.run_test() @pytest.mark.parametrize( "image", GEN1_IMAGES, ) @pytest.mark.parametrize( "vm_size", GEN1_VM_SIZES, ) def test_gen1_x64(self, image, vm_size): """Test gen1 x64 images.""" self.run_test() @pytest.mark.parametrize( "image", GEN2_IMAGES, ) @pytest.mark.parametrize( "vm_size", GEN2_VM_SIZES, ) def test_gen2_x64(self, image, vm_size): """Test gen2 x64 images.""" self.run_test() azure-vm-utils-0.6.0/src/000077500000000000000000000000001500025016600152035ustar00rootroot00000000000000azure-vm-utils-0.6.0/src/debug.c000066400000000000000000000007131500025016600164360ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #include "debug.h" bool debug = false; extern char **environ; /** * Dump environment variables. */ void debug_environment_variables() { int i = 0; DEBUG_PRINTF("Environment Variables:\n"); while (environ[i]) { DEBUG_PRINTF("%s\n", environ[i]); i++; } } azure-vm-utils-0.6.0/src/debug.h000066400000000000000000000022461500025016600164460ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #ifndef __DEBUG_H__ #define __DEBUG_H__ #include #include extern bool debug; #define DEBUG_PRINTF(fmt, ...) \ do \ { \ if (debug == true) \ { \ fprintf(stderr, "DEBUG: " fmt, ##__VA_ARGS__); \ } \ } while (0) void debug_environment_variables(); #endif // __DEBUG_H__ azure-vm-utils-0.6.0/src/identify_disks.c000066400000000000000000000216361500025016600203670ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #include #include #include #include #include #include #include #include "debug.h" #include "identify_disks.h" #include "nvme.h" /** * Trim trailing whitespace from a string in-place. */ void trim_trailing_whitespace(char *str) { size_t len = strlen(str); while (len > 0 && isspace((unsigned char)str[len - 1])) { str[--len] = '\0'; // Replace trailing whitespace with null terminator } } /** * Callback for scandir() to filter for NVMe namespaces. */ int is_nvme_namespace(const struct dirent *entry) { unsigned int ctrl, nsid; char p; // Check for NVME controller name format: nvmen return sscanf(entry->d_name, "nvme%un%u%c", &ctrl, &nsid, &p) == 2; } /** * Identify namespace without using vendor-specific command set. * Used as a fallback when vendor-specific command set is not supported. */ char *identify_namespace_without_vs(const char *controller_sys_path, const char *namespace_path) { char model_name[MAX_PATH] = {0}; char model_path[MAX_PATH]; snprintf(model_path, sizeof(model_path), "%s/model", controller_sys_path); FILE *file = fopen(model_path, "r"); if (file == NULL) { DEBUG_PRINTF("failed to open %s: %m\n", model_path); return strdup(""); } char *result = fgets(model_name, sizeof(model_name), file); if (result == NULL) { DEBUG_PRINTF("failed to read model name from %s: %m\n", model_path); fclose(file); return strdup(""); } fclose(file); trim_trailing_whitespace(model_name); DEBUG_PRINTF("read model name=\"%s\"\n", model_name); if (strcmp(model_name, "MSFT NVMe Accelerator v1.0") == 0) { // Microsoft Azure NVMe Accelerator v1.0 supports remote OS and data disks. // nsid=1 is the OS disk and nsid=2+ for data disks. // A data disk's lun id == nsid - 2. int nsid = get_nsid_from_namespace_device_path(namespace_path); if (nsid == 1) { return strdup("type=os"); } else { char *output = NULL; if (asprintf(&output, "type=data,lun=%d", nsid - 2) > 0) { return output; } } } else if (strcmp(model_name, "Microsoft NVMe Direct Disk") == 0 || strcmp(model_name, "Microsoft NVMe Direct Disk v2") == 0) { return strdup("type=local"); } return strdup(""); } /** * Parse a vs string and turn it into a JSON dictionary. * Example: "type=local,index=2,name=nvme-600G-2" */ json_object *parse_vs_string(const char *vs) { json_object *vs_obj = json_object_new_object(); char *vs_copy = strdup(vs); char *token; char *rest = vs_copy; while ((token = strtok_r(rest, ",", &rest))) { char *key = strtok_r(token, "=", &token); char *value = strtok_r(NULL, "=", &token); if (key != NULL && value != NULL) { int int_value = atoi(value); if ((strcmp(key, "lun") == 0) || (strcmp(key, "index") == 0)) { if (int_value == 0 && strcmp(value, "0") != 0) { fprintf(stderr, "failed to parse vs=%s key=%s value=%s as int\n", vs, key, value); json_object_object_add(vs_obj, key, json_object_new_string(value)); } else { json_object_object_add(vs_obj, key, json_object_new_int(int_value)); } } else { json_object_object_add(vs_obj, key, json_object_new_string(value)); } } } free(vs_copy); return vs_obj; } /** * Enumerate namespaces for a controller. */ void enumerate_namespaces_for_controller(struct nvme_controller *ctrl, struct context *ctx, json_object *namespaces_array) { struct dirent **namelist; int n = scandir(ctrl->sys_path, &namelist, is_nvme_namespace, versionsort); if (n < 0) { fprintf(stderr, "failed scandir for %s: %m\n", ctrl->sys_path); return; } DEBUG_PRINTF("found %d namespace(s) for controller=%s:\n", n, ctrl->name); for (int i = 0; i < n; i++) { char namespace_path[MAX_PATH]; snprintf(namespace_path, sizeof(namespace_path), "/dev/%s", namelist[i]->d_name); char *vs = nvme_identify_namespace_vs_for_namespace_device(namespace_path); char *id = NULL; json_object *namespace_obj = json_object_new_object(); json_object_object_add(namespace_obj, "path", json_object_new_string(namespace_path)); json_object_object_add(namespace_obj, "model", json_object_new_string(ctrl->model)); json_object_array_add(namespaces_array, namespace_obj); if (vs != NULL) { if (vs[0] == 0) { id = identify_namespace_without_vs(ctrl->sys_path, namespace_path); } else { id = strdup(vs); } if (ctx->output_format == PLAIN) { printf("%s: %s\n", namespace_path, id); } } if (id != NULL) { json_object_object_add(namespace_obj, "properties", parse_vs_string(id)); free(id); } else { json_object_object_add(namespace_obj, "properties", json_object_new_object()); } if (vs != NULL) { json_object_object_add(namespace_obj, "vs", json_object_new_string(vs)); free(vs); } else { json_object_object_add(namespace_obj, "vs", NULL); } free(namelist[i]); } free(namelist); } /** * Check if NVMe vendor in sysfs matches Microsoft's vendor ID. */ int is_microsoft_nvme_device(const char *device_name) { char vendor_id_path[MAX_PATH]; snprintf(vendor_id_path, sizeof(vendor_id_path), "%s/%s/device/vendor", SYS_CLASS_NVME_PATH, device_name); FILE *file = fopen(vendor_id_path, "r"); if (file == NULL) { return 0; } unsigned int vendor_id; int result = fscanf(file, "%x", &vendor_id); fclose(file); if (result != 1) { return 0; } return vendor_id == MICROSOFT_NVME_VENDOR_ID; } /** * Callback for scandir() to filter for Microsoft Azure NVMe controllers. */ int is_azure_nvme_controller(const struct dirent *entry) { unsigned int ctrl; char p; // Check for NVME controller name format: nvme if (sscanf(entry->d_name, "nvme%u%c", &ctrl, &p) != 1) { return false; } return is_microsoft_nvme_device(entry->d_name); } /** * Initialize nvme_controller structure. Read model name from sysfs, * trimming off whitespaces. */ bool initialize_nvme_controller(struct nvme_controller *ctrl, const char *name) { char model_path[sizeof(ctrl->sys_path) + sizeof("/model")]; snprintf(ctrl->name, sizeof(ctrl->name), "%s", name); snprintf(ctrl->dev_path, sizeof(ctrl->dev_path), "/dev/%s", ctrl->name); snprintf(ctrl->sys_path, sizeof(ctrl->sys_path), "%s/%s", SYS_CLASS_NVME_PATH, ctrl->name); snprintf(model_path, sizeof(model_path), "%s/model", ctrl->sys_path); FILE *file = fopen(model_path, "r"); if (file == NULL) { DEBUG_PRINTF("failed to open %s: %m\n", model_path); return false; } char *result = fgets(ctrl->model, sizeof(ctrl->model), file); if (result == NULL) { DEBUG_PRINTF("failed to read model name from %s: %m\n", model_path); fclose(file); return false; } fclose(file); trim_trailing_whitespace(ctrl->model); return true; } /** * Enumerate Microsoft Azure NVMe controllers and identify disks. */ int identify_disks(struct context *ctx) { struct dirent **namelist = NULL; json_object *namespaces_array = json_object_new_array(); int n = scandir(SYS_CLASS_NVME_PATH, &namelist, is_azure_nvme_controller, versionsort); if (n < 0) { fprintf(stderr, "no NVMe devices in %s: %m\n", SYS_CLASS_NVME_PATH); n = 0; } DEBUG_PRINTF("found %d controllers\n", n); for (int i = 0; i < n; i++) { struct nvme_controller ctrl; initialize_nvme_controller(&ctrl, namelist[i]->d_name); enumerate_namespaces_for_controller(&ctrl, ctx, namespaces_array); free(namelist[i]); } free(namelist); if (ctx->output_format == JSON) { int flags = JSON_C_TO_STRING_NOSLASHESCAPE | JSON_C_TO_STRING_PRETTY | JSON_C_TO_STRING_SPACED; const char *json_output = json_object_to_json_string_ext(namespaces_array, flags); printf("%s\n", json_output); json_object_put(namespaces_array); } return 0; } azure-vm-utils-0.6.0/src/identify_disks.h000066400000000000000000000023071500025016600203660ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #ifndef __IDENTIFY_DISKS_H__ #define __IDENTIFY_DISKS_H__ #include #include #include #define MAX_PATH 4096 #define MICROSOFT_NVME_VENDOR_ID 0x1414 #define SYS_CLASS_NVME_PATH "/sys/class/nvme" #ifdef UNIT_TESTING_SYS_CLASS_NVME extern char *fake_sys_class_nvme_path; #undef SYS_CLASS_NVME_PATH #define SYS_CLASS_NVME_PATH fake_sys_class_nvme_path #endif struct nvme_controller { char name[256]; char dev_path[MAX_PATH]; char sys_path[MAX_PATH]; char model[MAX_PATH]; }; struct context { enum { PLAIN, JSON, } output_format; }; void trim_trailing_whitespace(char *str); int is_microsoft_nvme_device(const char *device_name); int is_nvme_namespace(const struct dirent *entry); void enumerate_namespaces_for_controller(struct nvme_controller *ctrl, struct context *ctx, json_object *namespaces_array); int is_azure_nvme_controller(const struct dirent *entry); int identify_disks(struct context *ctx); #endif // __IDENTIFY_DISKS_H__ azure-vm-utils-0.6.0/src/identify_udev.c000066400000000000000000000047541500025016600202170ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #include #include #include #include #include "nvme.h" /** * Print a udev key-value pair for environment variable. */ void print_udev_key_value(const char *key, const char *value) { printf("AZURE_DISK_"); while (*key) { putchar(toupper((unsigned char)*key)); key++; } printf("=%s\n", value); } /** * Print udev environment variables for all vs properties. * * Example parsing for vs: * type=local,index=2,name=nvme-600G-2 * * Environment variables printed include: * AZURE_DISK_TYPE=local * AZURE_DISK_INDEX=2 * AZURE_DISK_NAME=nvme-600G-2 */ int print_udev_key_values_for_vs(char *vs) { char *vs_copy = strdup(vs); char *outer_saveptr = NULL; char *inner_saveptr = NULL; char *pair = strtok_r(vs_copy, ",", &outer_saveptr); while (pair != NULL) { char *key = strtok_r(pair, "=", &inner_saveptr); char *value = strtok_r(NULL, "=", &inner_saveptr); if (key == NULL || value == NULL) { fprintf(stderr, "failed to parse key-value pair: %s\n", pair); free(vs_copy); return 1; } print_udev_key_value(key, value); pair = strtok_r(NULL, ",", &outer_saveptr); } free(vs_copy); return 0; } /** * Execute udev mode, printing out environment variables for import. * * The udev rules will trigger for Azure NVMe controllers, so no additional * checks are needed. The device path will be in DEVNAME environment variable. * * Environment variables printed include: * - AZURE_DISK_VS: * - AZURE_DISK_TYPE: * - AZURE_DISK_INDEX: * - AZURE_DISK_NAME: * * @return 0 on success, non-zero on error. */ int identify_udev_device(void) { const char *dev_name = getenv("DEVNAME"); if (dev_name == NULL) { fprintf(stderr, "environment variable 'DEVNAME' not set\n"); return 1; } char *vs = nvme_identify_namespace_vs_for_namespace_device(dev_name); if (vs == NULL) { fprintf(stderr, "failed to query namespace vendor-specific data: %s\n", dev_name); return 1; } printf("AZURE_DISK_VS=%s\n", vs); print_udev_key_values_for_vs(vs); free(vs); return 0; } azure-vm-utils-0.6.0/src/identify_udev.h000066400000000000000000000006101500025016600202070ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #ifndef __IDENTIFY_UDEV_H__ #define __IDENTIFY_UDEV_H__ void print_udev_key_value(const char *key, const char *value); int print_udev_key_values_for_vs(char *vs); int identify_udev_device(void); #endif // __IDENTIFY_UDEV_H__ azure-vm-utils-0.6.0/src/main.c000066400000000000000000000053351500025016600163010ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #include #include #include "debug.h" #include "identify_disks.h" #include "identify_udev.h" #include "version.h" void print_help(const char *program) { printf("Usage: %s [-d|--debug] [-u|--udev|-h|--help|-v|--version]\n", program); printf(" -d, --debug Enable debug mode\n"); printf(" -f, --format {plain|json} Output format (default=plain)\n"); printf(" -h, --help Display this help message\n"); printf(" -u, --udev Enable udev mode\n"); printf(" -v, --version Display the version\n"); } void print_version(const char *program) { printf("%s %s\n", program, VERSION); } void print_invalid_argument(const char *program, const char *argument) { fprintf(stderr, "invalid argument: %s\n", argument); print_help(program); } int main(int argc, char **argv) { bool udev_mode = false; struct context ctx = {.output_format = PLAIN}; int opt; int option_index = 0; // clang-format off static struct option long_options[] = { {"debug", no_argument, 0, 'd'}, {"udev", no_argument, 0, 'u'}, {"version", no_argument, 0, 'v'}, {"help", no_argument, 0, 'h'}, {"format", required_argument, 0, 'f'}, {0, 0, 0, 0}}; // clang-format on while ((opt = getopt_long(argc, argv, "duvhf:", long_options, &option_index)) != -1) { switch (opt) { case 'd': debug = true; break; case 'u': udev_mode = true; break; case 'v': print_version(argv[0]); return 0; case 'h': print_help(argv[0]); return 0; case 'f': if (strcmp(optarg, "json") == 0) { ctx.output_format = JSON; } else if (strcmp(optarg, "plain") == 0) { ctx.output_format = PLAIN; } else { print_invalid_argument(argv[0], optarg); return 1; } break; default: // Error for invalid --args print_invalid_argument(argv[0], argv[optind - 1]); return 1; } } // Error for unparsed args if (optind < argc) { print_invalid_argument(argv[0], argv[optind]); return 1; } if (debug) { debug_environment_variables(); } if (udev_mode) { return identify_udev_device(); } return identify_disks(&ctx); } azure-vm-utils-0.6.0/src/nvme.c000066400000000000000000000073311500025016600163200ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #include #include #include #include #include #include #include #include "debug.h" #include "nvme.h" /** * Given the path to a namespace device, determine the namespace id. * * Examples: * - /dev/nvme0n5 -> returns 5 * - /dev/nvme2n12 -> returns 12 * - /dev/nvme100n1 -> returns 1 * * @return Namespace ID or -1 on failure. */ int get_nsid_from_namespace_device_path(const char *namespace_path) { unsigned int ctrl, nsid; if (sscanf(namespace_path, "/dev/nvme%un%u", &ctrl, &nsid) != 2) { return -1; } return nsid; } /** * Execute identify namespace command. * * @param device_path Path to the NVMe device, either controller or namespace. * @param nsid Namespace ID to identify. * * @return Pointer to the namespace structure or NULL on failure. * */ struct nvme_id_ns *nvme_identify_namespace(const char *device_path, int nsid) { int fd = open(device_path, O_RDONLY); if (fd < 0) { fprintf(stderr, "failed to open %s: %m\n", device_path); return NULL; } long pagesize = sysconf(_SC_PAGESIZE); if (pagesize < 0) { fprintf(stderr, "failed sysconf: %m\n"); close(fd); return NULL; } struct nvme_id_ns *ns = NULL; if (posix_memalign((void **)&ns, pagesize, sizeof(struct nvme_id_ns)) != 0) { fprintf(stderr, "failed posix_memalign for %s: %m\n", device_path); close(fd); return NULL; } struct nvme_admin_cmd cmd = { .opcode = 0x06, // Identify namespace command .nsid = nsid, .addr = (unsigned long)ns, .data_len = sizeof(*ns), }; if (ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd) < 0) { fprintf(stderr, "failed NVME_IOCTL_ADMIN_CMD ioctl for %s: %m\n", device_path); free(ns); close(fd); return NULL; } close(fd); return ns; } /** * Query the NVMe namespace for the vendor specific data. * * For Azure devices, the vendor-specific data is current exposed as a string. * It will include the type of device and various properties in the format: * key1=value1,key2=value2,...\0. * * Anything beyond the terminating null-byte is currently undefined and * consequently ignored. * * @param device_path Path to the NVMe device, either controller or namespace. * @param nsid Namespace ID to identify. * * @return Vendor specific data string or NULL on failure. */ char *nvme_identify_namespace_vs(const char *device_path, int nsid) { DEBUG_PRINTF("identifying namespace id=%d for device=%s...\n", nsid, device_path); struct nvme_id_ns *ns = nvme_identify_namespace(device_path, nsid); if (ns == NULL) { fprintf(stderr, "failed to identify namespace for device=%s\n", device_path); return NULL; } char *vs = strndup((const char *)ns->vs, sizeof(ns->vs)); free(ns); return vs; } /** * Query the NVMe namespace for the vendor specific data. * * This a helper for nvme_identify_namespace_vs that takes a namespace device * path and extracts the namespace id from it. * * @param namespace_path Path to the NVMe namespace device. * * @return Vendor specific data string or NULL on failure. */ char *nvme_identify_namespace_vs_for_namespace_device(const char *namespace_path) { int nsid = get_nsid_from_namespace_device_path(namespace_path); if (nsid < 0) { fprintf(stderr, "failed to parse namespace id: %s\n", namespace_path); return NULL; } return nvme_identify_namespace_vs(namespace_path, nsid); } azure-vm-utils-0.6.0/src/nvme.h000066400000000000000000000026251500025016600163260ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #ifndef __NVME_H__ #define __NVME_H__ #include struct nvme_lbaf { __le16 ms; __u8 ds; __u8 rp; }; struct nvme_id_ns { __le64 nsze; __le64 ncap; __le64 nuse; __u8 nsfeat; __u8 nlbaf; __u8 flbas; __u8 mc; __u8 dpc; __u8 dps; __u8 nmic; __u8 rescap; __u8 fpi; __u8 dlfeat; __le16 nawun; __le16 nawupf; __le16 nacwu; __le16 nabsn; __le16 nabo; __le16 nabspf; __le16 noiob; __u8 nvmcap[16]; __le16 npwg; __le16 npwa; __le16 npdg; __le16 npda; __le16 nows; __u8 rsvd74[18]; __le32 anagrpid; __u8 rsvd96[3]; __u8 nsattr; __le16 nvmsetid; __le16 endgid; __u8 nguid[16]; __u8 eui64[8]; struct nvme_lbaf lbaf[16]; __u8 rsvd192[192]; __u8 vs[3712]; }; #define NVME_ADMIN_IDENTFY_NAMESPACE_OPCODE 0x06 int nvme_nsid_from_namespace_device_path(const char *namespace_path); struct nvme_id_ns *nvme_identify_namespace(const char *device_path, int nsid); char *nvme_identify_namespace_vs(const char *device_path, int nsid); int get_nsid_from_namespace_device_path(const char *namespace_path); char *nvme_identify_namespace_vs_for_namespace_device(const char *namespace_path); #endif // __NVME_H__ azure-vm-utils-0.6.0/src/version.h.in000066400000000000000000000003701500025016600174460ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license information. */ #ifndef VERSION_H #define VERSION_H #define VERSION "@VERSION@" #endif // VERSION_H azure-vm-utils-0.6.0/tests/000077500000000000000000000000001500025016600155565ustar00rootroot00000000000000azure-vm-utils-0.6.0/tests/capture.c000066400000000000000000000074411500025016600173730ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #include #include #include #include #include #include #include #include // clange-format off #include // clange-format on #include "capture.h" static int stdout_fd = -1; static int stderr_fd = -1; static int stdout_pipe[2] = {-1, -1}; static int stderr_pipe[2] = {-1, -1}; #define BUFFER_SIZE 128 * 1024 char capture_stdout_buffer[BUFFER_SIZE]; char capture_stderr_buffer[BUFFER_SIZE]; /** * Bypass potentially mocked close using dlsym(). */ static int (*real_close)(int) = NULL; static int capture_close(int fd) { if (!real_close) { real_close = (int (*)(int))dlsym(RTLD_NEXT, "close"); } return real_close(fd); } /** * Capture stdout and stderr using non-blocking pipes. */ void capture_setup(void **state) { (void)state; // Unused parameter memset(capture_stdout_buffer, 0, BUFFER_SIZE); memset(capture_stderr_buffer, 0, BUFFER_SIZE); if (pipe(stdout_pipe) == -1 || pipe(stderr_pipe) == -1) { perror("pipe failed"); exit(EXIT_FAILURE); } stdout_fd = dup(STDOUT_FILENO); stderr_fd = dup(STDERR_FILENO); if (stdout_fd == -1 || stderr_fd == -1) { perror("dup failed"); exit(EXIT_FAILURE); } if (dup2(stdout_pipe[1], STDOUT_FILENO) == -1 || dup2(stderr_pipe[1], STDERR_FILENO) == -1) { perror("dup2 failed"); exit(EXIT_FAILURE); } capture_close(stdout_pipe[1]); capture_close(stderr_pipe[1]); // Set pipes to non-blocking mode fcntl(stdout_pipe[0], F_SETFL, O_NONBLOCK); fcntl(stderr_pipe[0], F_SETFL, O_NONBLOCK); } /** * Synchronize captured stdout and return buffer. */ const char *capture_stdout() { if (stdout_pipe[0] != -1) { fflush(stdout); // Read data from stdout pipe ssize_t bytes_read; size_t offset = strlen(capture_stdout_buffer); while ((bytes_read = read(stdout_pipe[0], capture_stdout_buffer + offset, BUFFER_SIZE - 1 - offset)) > 0) { offset += bytes_read; if (offset >= BUFFER_SIZE - 1) { break; } } capture_stdout_buffer[offset] = '\0'; } return capture_stdout_buffer; } /** * Synchronize captured stderr and return buffer. */ const char *capture_stderr() { if (stderr_pipe[0] != -1) { fflush(stderr); ssize_t bytes_read; size_t offset = strlen(capture_stderr_buffer); while ((bytes_read = read(stderr_pipe[0], capture_stderr_buffer + offset, BUFFER_SIZE - 1 - offset)) > 0) { offset += bytes_read; if (offset >= BUFFER_SIZE - 1) { break; } } capture_stderr_buffer[offset] = '\0'; } return capture_stderr_buffer; } /** * Restore stdout and stderr after syncing final time. */ void capture_teardown(void **state) { (void)state; // Unused parameter if (stdout_fd != -1) { dup2(stdout_fd, STDOUT_FILENO); capture_close(stdout_fd); stdout_fd = -1; } if (stderr_fd != -1) { dup2(stderr_fd, STDERR_FILENO); capture_close(stderr_fd); stderr_fd = -1; } if (stdout_pipe[0] != -1) { capture_stdout(); capture_close(stdout_pipe[0]); stdout_pipe[0] = -1; } if (stderr_pipe[0] != -1) { capture_stderr(); capture_close(stderr_pipe[0]); stderr_pipe[0] = -1; } fprintf(stderr, ">> TEST STDOUT: %s\n", capture_stdout_buffer); fprintf(stderr, ">> TEST STDERR: %s\n", capture_stderr_buffer); } azure-vm-utils-0.6.0/tests/capture.h000066400000000000000000000005561500025016600174000ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #ifndef __CAPTURE_H__ #define __CAPTURE_H__ void capture_setup(void **state); void capture_teardown(void **state); const char *capture_stdout(); const char *capture_stderr(); #endif // __CAPTURE_H__ azure-vm-utils-0.6.0/tests/debug_tests.c000066400000000000000000000040341500025016600202330ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #include #include #include #include #include #include #include // clange-format off #include // clange-format on #include "capture.h" #include "debug.h" extern char **environ; static const char *environ_test[] = { "ENV1=VALUE1", "ENV2=VALUE2", "ENV3=VALUE3", NULL, }; static char **original_environ = NULL; static int setup(void **state) { (void)state; // Unused parameter original_environ = environ; environ = (char **)environ_test; capture_setup(state); return 0; } static int teardown(void **state) { (void)state; // Unused parameter environ = original_environ; debug = false; capture_teardown(state); return 0; } static void test_debug_environment_variables_with_debug_enabled(void **state) { (void)state; // Unused parameter debug = true; const char *expected_output = "DEBUG: Environment Variables:\n" "DEBUG: ENV1=VALUE1\n" "DEBUG: ENV2=VALUE2\n" "DEBUG: ENV3=VALUE3\n"; debug_environment_variables(); assert_string_equal(capture_stderr(), expected_output); assert_string_equal(capture_stdout(), ""); } static void test_debug_environment_variables_with_debug_disabled(void **state) { (void)state; // Unused parameter debug = false; debug_environment_variables(); assert_string_equal(capture_stderr(), ""); assert_string_equal(capture_stdout(), ""); } int main(void) { const struct CMUnitTest tests[] = { cmocka_unit_test_setup_teardown(test_debug_environment_variables_with_debug_enabled, setup, teardown), cmocka_unit_test_setup_teardown(test_debug_environment_variables_with_debug_disabled, setup, teardown), }; return cmocka_run_group_tests(tests, NULL, NULL); } azure-vm-utils-0.6.0/tests/identify_disks_tests.c000066400000000000000000000575231500025016600221700ustar00rootroot00000000000000#include #include #include #include #include #include #include #include #include #include // clang-format off #include // clang-format on #include "capture.h" #include "identify_disks.h" #define MICROSOFT_NVME_DIRECT_DISK_V1 "Microsoft NVMe Direct Disk \n" #define MICROSOFT_NVME_DIRECT_DISK_V2 "Microsoft NVMe Direct Disk v2 \n" #define MSFT_NVME_ACCELERATOR_MODEL_V1 "MSFT NVMe Accelerator v1.0 \n" char *fake_sys_class_nvme_path = "/sys/class/nvme"; char *__wrap_nvme_identify_namespace_vs_for_namespace_device(const char *namespace_path) { check_expected_ptr(namespace_path); return mock_ptr_type(char *); } static void remove_temp_dir(const char *path) { struct dirent *entry; DIR *dir = opendir(path); if (dir == NULL) { return; } while ((entry = readdir(dir)) != NULL) { char full_path[MAX_PATH]; if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) { continue; } snprintf(full_path, sizeof(full_path), "%s/%s", path, entry->d_name); if (entry->d_type == DT_DIR) { remove_temp_dir(full_path); } else { remove(full_path); } } closedir(dir); rmdir(path); } static void create_intermediate_dirs(const char *path) { char temp_path[MAX_PATH]; snprintf(temp_path, sizeof(temp_path), "%s", path); char *p = temp_path; while (*p) { if (*p == '/') { *p = '\0'; mkdir(temp_path, 0755); *p = '/'; } p++; } } static void create_dir(const char *base_path, const char *sub_path) { char full_path[MAX_PATH]; snprintf(full_path, sizeof(full_path), "%s/%s", base_path, sub_path); create_intermediate_dirs(full_path); mkdir(full_path, 0755); } static void create_file(const char *base_path, const char *sub_path, const char *content) { char full_path[MAX_PATH]; snprintf(full_path, sizeof(full_path), "%s/%s", base_path, sub_path); create_intermediate_dirs(full_path); FILE *file = fopen(full_path, "w"); if (file == NULL) { perror("fopen"); return; } fputs(content, file); fclose(file); } static int setup(void **state) { capture_setup(state); char template[] = "/tmp/nvme_test_XXXXXX"; char *temp_path = mkdtemp(template); if (temp_path == NULL) { perror("mkdtemp"); return -1; } *state = strdup(temp_path); fake_sys_class_nvme_path = *state; return 0; } static int teardown(void **state) { capture_teardown(state); if (*state != NULL) { remove_temp_dir(*state); free(*state); *state = NULL; } fake_sys_class_nvme_path = SYS_CLASS_NVME_PATH; return 0; } /** * Setup nvme0: microsoft disk controler, no namespaces. */ static void _setup_nvme0_microsoft_no_name_namespaces(void) { create_file(fake_sys_class_nvme_path, "nvme0/device/vendor", "0x1414"); create_file(fake_sys_class_nvme_path, "nvme0/model", "Unknown model"); // nothing to expect as no namespaces are iterated through. } /** * Setup nvme1: microsoft disk, one namespace. */ static void _setup_nvme1_microsoft_one_namespace(void) { create_file(fake_sys_class_nvme_path, "nvme1/device/vendor", "0x1414"); create_file(fake_sys_class_nvme_path, "nvme1/model", "Unknown model"); create_dir(fake_sys_class_nvme_path, "nvme1/nvme1n1"); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme1n1"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("key1=nvme1n1value1,key2=nvme1n1value2")); } /** * Setup nvme2: microsoft disk, two namespaces. */ static void _setup_nvme2_microsoft_two_namespaces(void) { create_file(fake_sys_class_nvme_path, "nvme2/device/vendor", "0x1414"); create_file(fake_sys_class_nvme_path, "nvme2/model", "Unknown model"); create_dir(fake_sys_class_nvme_path, "nvme2/nvme2n1"); create_dir(fake_sys_class_nvme_path, "nvme2/nvme2n2"); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme2n1"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("key1=nvme2n1value1,key2=nvme2n1value2")); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme2n2"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("key1=nvme2n2value1,key2=nvme2n2value2")); } // No nvme3. /** * Setup nvme4: non-microsoft disk. */ static void _setup_nvme4_non_microsoft(void) { create_file(fake_sys_class_nvme_path, "nvme4/device/vendor", "0x0000"); create_file(fake_sys_class_nvme_path, "nvme4/model", "Unknown model"); create_dir(fake_sys_class_nvme_path, "nvme4/nvme4n1"); create_dir(fake_sys_class_nvme_path, "nvme4/nvme4n2"); } /** * Setup nvme5: microsoft disk, empty vs for nsid=2, error on nsid=3. */ static void _setup_nvme5_microsoft_mixed_namespaces(void) { create_file(fake_sys_class_nvme_path, "nvme5/device/vendor", "0x1414"); create_file(fake_sys_class_nvme_path, "nvme5/model", "Unknown model"); create_dir(fake_sys_class_nvme_path, "nvme5/nvme5n1"); create_dir(fake_sys_class_nvme_path, "nvme5/nvme5n2"); create_dir(fake_sys_class_nvme_path, "nvme5/nvme5n3"); create_dir(fake_sys_class_nvme_path, "nvme5/nvme5n4"); create_dir(fake_sys_class_nvme_path, "nvme5/nvme5n32"); create_dir(fake_sys_class_nvme_path, "nvme5/nvme5n315"); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme5n1"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("key1=nvme5n1value1,key2=nvme5n1value2")); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme5n2"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("")); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme5n3"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, NULL); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme5n4"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("key1=nvme5n4value1,key2=nvme5n4value2")); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme5n32"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("key1=nvme5n32value1")); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme5n315"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("key1=nvme5n315value1")); } /** * Setup nvme6: remote accelerator v1 with vs. */ static void _setup_nvme6_remote_accelerator_v1_with_vs(void) { create_file(fake_sys_class_nvme_path, "nvme6/device/vendor", "0x1414"); create_dir(fake_sys_class_nvme_path, "nvme6/nvme6n1"); create_file(fake_sys_class_nvme_path, "nvme6/model", MSFT_NVME_ACCELERATOR_MODEL_V1); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme6n1"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("key1=nvme6n1value1,key2=nvme6n1value2")); } /** * Setup nvme7: remote accelerator v1 without vs. */ static void _setup_nvme7_remote_accelerator_v1_without_vs(void) { create_file(fake_sys_class_nvme_path, "nvme7/device/vendor", "0x1414"); create_file(fake_sys_class_nvme_path, "nvme7/model", MSFT_NVME_ACCELERATOR_MODEL_V1); create_dir(fake_sys_class_nvme_path, "nvme7/nvme7n1"); create_dir(fake_sys_class_nvme_path, "nvme7/nvme7n2"); create_dir(fake_sys_class_nvme_path, "nvme7/nvme7n3"); create_dir(fake_sys_class_nvme_path, "nvme7/nvme7n4"); create_dir(fake_sys_class_nvme_path, "nvme7/nvme7n9"); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme7n1"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("")); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme7n2"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("")); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme7n3"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("")); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme7n4"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("")); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme7n9"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("")); } /** * Setup nvme8: direct disk v1 which doesn't have vs support. */ static void _setup_nvme8_direct_disk_v1_without_vs(void) { create_file(fake_sys_class_nvme_path, "nvme8/device/vendor", "0x1414"); create_file(fake_sys_class_nvme_path, "nvme8/model", MICROSOFT_NVME_DIRECT_DISK_V1); create_dir(fake_sys_class_nvme_path, "nvme8/nvme8n1"); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme8n1"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("")); } /** * Setup nvme9: direct disk v2 which has vs support. */ static void _setup_nvme9_direct_disk_v2(void) { create_file(fake_sys_class_nvme_path, "nvme9/device/vendor", "0x1414"); create_file(fake_sys_class_nvme_path, "nvme9/model", MICROSOFT_NVME_DIRECT_DISK_V2); create_dir(fake_sys_class_nvme_path, "nvme9/nvme9n1"); create_dir(fake_sys_class_nvme_path, "nvme9/nvme9n2"); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme9n1"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("type=local,index=0,name=nvme-500G-0")); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme9n2"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("type=local,index=1,name=nvme-500G-1")); } /** * Setup nvme10: direct disk v2 which is missing vs support (unexpected case). */ static void _setup_nvme10_direct_disk_v2_missing_vs(void) { create_file(fake_sys_class_nvme_path, "nvme10/device/vendor", "0x1414"); create_file(fake_sys_class_nvme_path, "nvme10/model", MICROSOFT_NVME_DIRECT_DISK_V2); create_dir(fake_sys_class_nvme_path, "nvme10/nvme10n1"); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme10n1"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("")); } /** * Setup nvme11: disk with non-integer lun and index (unexpected case). */ static void _setup_nvme11_non_integer_lun_and_index(void) { create_file(fake_sys_class_nvme_path, "nvme11/device/vendor", "0x1414"); create_file(fake_sys_class_nvme_path, "nvme11/model", "Unknown model"); create_dir(fake_sys_class_nvme_path, "nvme11/nvme11n1"); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme11n1"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, strdup("type=local,index=foo,lun=bar")); } static void test_trim_trailing_whitespace(void **state) { (void)state; // Unused parameter struct { char input[MAX_PATH]; char expected[MAX_PATH]; } test_cases[] = {{"NoTrailingWhitespace", "NoTrailingWhitespace"}, {"TrailingSpaces ", "TrailingSpaces"}, {"TrailingTabs\t\t\t", "TrailingTabs"}, {"TrailingNewline\n", "TrailingNewline"}, {"TrailingMixed \t\n", "TrailingMixed"}, {"", ""}, {"\0", "\0"}}; for (size_t i = 0; i < sizeof(test_cases) / sizeof(test_cases[0]); i++) { trim_trailing_whitespace(test_cases[i].input); assert_string_equal(test_cases[i].input, test_cases[i].expected); } } static void test_identify_disks_no_sys_class_nvme_present(void **state) { (void)state; // Unused parameter char expected_string[1024]; snprintf(expected_string, sizeof(expected_string), "no NVMe devices in %s: No such file or directory\n", fake_sys_class_nvme_path); remove_temp_dir(fake_sys_class_nvme_path); struct context ctx = {.output_format = PLAIN}; int result = identify_disks(&ctx); assert_int_equal(result, 0); assert_string_equal(capture_stderr(), expected_string); assert_string_equal(capture_stdout(), ""); } static void test_identify_disks_sys_class_nvme_empty(void **state) { (void)state; // Unused parameter struct context ctx = {.output_format = PLAIN}; int result = identify_disks(&ctx); assert_int_equal(result, 0); assert_string_equal(capture_stderr(), ""); assert_string_equal(capture_stdout(), ""); } static void test_identify_disks(void **state) { struct { const char *name; void (*setup_func)(void); const char *expected_stderr; const char *expected_stdout; } test_cases[] = { {"no namespaces", NULL, "", ""}, {"nvme0", _setup_nvme0_microsoft_no_name_namespaces, "", ""}, {"nvme1", _setup_nvme1_microsoft_one_namespace, "", "/dev/nvme1n1: key1=nvme1n1value1,key2=nvme1n1value2\n"}, {"nvme2", _setup_nvme2_microsoft_two_namespaces, "", "/dev/nvme2n1: key1=nvme2n1value1,key2=nvme2n1value2\n" "/dev/nvme2n2: key1=nvme2n2value1,key2=nvme2n2value2\n"}, {"nvme4", _setup_nvme4_non_microsoft, "", ""}, {"nvme5", _setup_nvme5_microsoft_mixed_namespaces, "", "/dev/nvme5n1: key1=nvme5n1value1,key2=nvme5n1value2\n" "/dev/nvme5n2: \n" "/dev/nvme5n4: key1=nvme5n4value1,key2=nvme5n4value2\n" "/dev/nvme5n32: key1=nvme5n32value1\n" "/dev/nvme5n315: key1=nvme5n315value1\n"}, {"nvme6", _setup_nvme6_remote_accelerator_v1_with_vs, "", "/dev/nvme6n1: key1=nvme6n1value1,key2=nvme6n1value2\n"}, {"nvme7", _setup_nvme7_remote_accelerator_v1_without_vs, "", "/dev/nvme7n1: type=os\n" "/dev/nvme7n2: type=data,lun=0\n" "/dev/nvme7n3: type=data,lun=1\n" "/dev/nvme7n4: type=data,lun=2\n" "/dev/nvme7n9: type=data,lun=7\n"}, {"nvme8", _setup_nvme8_direct_disk_v1_without_vs, "", "/dev/nvme8n1: type=local\n"}, {"nvme9", _setup_nvme9_direct_disk_v2, "", "/dev/nvme9n1: type=local,index=0,name=nvme-500G-0\n" "/dev/nvme9n2: type=local,index=1,name=nvme-500G-1\n"}, {"nvme10", _setup_nvme10_direct_disk_v2_missing_vs, "", "/dev/nvme10n1: type=local\n"}, {"nvme11", _setup_nvme11_non_integer_lun_and_index, "failed to parse vs=type=local,index=foo,lun=bar key=index value=foo as int\n" "failed to parse vs=type=local,index=foo,lun=bar key=lun value=bar as int\n", "/dev/nvme11n1: type=local,index=foo,lun=bar\n"}, }; for (size_t i = 0; i < sizeof(test_cases) / sizeof(test_cases[0]); i++) { printf("Running test case: %s\n", test_cases[i].name); teardown(state); setup(state); if (test_cases[i].setup_func) test_cases[i].setup_func(); struct context ctx = {.output_format = PLAIN}; int result = identify_disks(&ctx); assert_int_equal(result, 0); assert_string_equal(capture_stderr(), test_cases[i].expected_stderr); assert_string_equal(capture_stdout(), test_cases[i].expected_stdout); } } static void test_identify_disks_combined(void **state) { (void)state; // Unused parameter _setup_nvme0_microsoft_no_name_namespaces(); _setup_nvme1_microsoft_one_namespace(); _setup_nvme2_microsoft_two_namespaces(); _setup_nvme4_non_microsoft(); _setup_nvme5_microsoft_mixed_namespaces(); _setup_nvme6_remote_accelerator_v1_with_vs(); _setup_nvme7_remote_accelerator_v1_without_vs(); _setup_nvme8_direct_disk_v1_without_vs(); _setup_nvme9_direct_disk_v2(); _setup_nvme10_direct_disk_v2_missing_vs(); _setup_nvme11_non_integer_lun_and_index(); struct context ctx = {.output_format = PLAIN}; int result = identify_disks(&ctx); assert_int_equal(result, 0); assert_string_equal(capture_stderr(), "failed to parse vs=type=local,index=foo,lun=bar key=index value=foo as int\n" "failed to parse vs=type=local,index=foo,lun=bar key=lun value=bar as int\n"); assert_string_equal(capture_stdout(), "/dev/nvme1n1: key1=nvme1n1value1,key2=nvme1n1value2\n" "/dev/nvme2n1: key1=nvme2n1value1,key2=nvme2n1value2\n" "/dev/nvme2n2: key1=nvme2n2value1,key2=nvme2n2value2\n" "/dev/nvme5n1: key1=nvme5n1value1,key2=nvme5n1value2\n" "/dev/nvme5n2: \n" "/dev/nvme5n4: key1=nvme5n4value1,key2=nvme5n4value2\n" "/dev/nvme5n32: key1=nvme5n32value1\n" "/dev/nvme5n315: key1=nvme5n315value1\n" "/dev/nvme6n1: key1=nvme6n1value1,key2=nvme6n1value2\n" "/dev/nvme7n1: type=os\n" "/dev/nvme7n2: type=data,lun=0\n" "/dev/nvme7n3: type=data,lun=1\n" "/dev/nvme7n4: type=data,lun=2\n" "/dev/nvme7n9: type=data,lun=7\n" "/dev/nvme8n1: type=local\n" "/dev/nvme9n1: type=local,index=0,name=nvme-500G-0\n" "/dev/nvme9n2: type=local,index=1,name=nvme-500G-1\n" "/dev/nvme10n1: type=local\n" "/dev/nvme11n1: type=local,index=foo,lun=bar\n"); } // clang-format off static const char *expected_combined_json_string = "[{\"path\":\"/dev/nvme1n1\",\"model\":\"Unknown model\",\"properties\":{\"key1\":\"nvme1n1value1\",\"key2\":\"nvme1n1value2\"},\"vs\":\"key1=nvme1n1value1,key2=nvme1n1value2\"}," "{\"path\":\"/dev/nvme2n1\",\"model\":\"Unknown model\",\"properties\":{\"key1\":\"nvme2n1value1\",\"key2\":\"nvme2n1value2\"},\"vs\":\"key1=nvme2n1value1,key2=nvme2n1value2\"}," "{\"path\":\"/dev/nvme2n2\",\"model\":\"Unknown model\",\"properties\":{\"key1\":\"nvme2n2value1\",\"key2\":\"nvme2n2value2\"},\"vs\":\"key1=nvme2n2value1,key2=nvme2n2value2\"}," "{\"path\":\"/dev/nvme5n1\",\"model\":\"Unknown model\",\"properties\":{\"key1\":\"nvme5n1value1\",\"key2\":\"nvme5n1value2\"},\"vs\":\"key1=nvme5n1value1,key2=nvme5n1value2\"}," "{\"path\":\"/dev/nvme5n2\",\"model\":\"Unknown model\",\"properties\":{},\"vs\":\"\"}," "{\"path\":\"/dev/nvme5n3\",\"model\":\"Unknown model\",\"properties\":{},\"vs\":null}," "{\"path\":\"/dev/nvme5n4\",\"model\":\"Unknown model\",\"properties\":{\"key1\":\"nvme5n4value1\",\"key2\":\"nvme5n4value2\"},\"vs\":\"key1=nvme5n4value1,key2=nvme5n4value2\"}," "{\"path\":\"/dev/nvme5n32\",\"model\":\"Unknown model\",\"properties\":{\"key1\":\"nvme5n32value1\"},\"vs\":\"key1=nvme5n32value1\"}," "{\"path\":\"/dev/nvme5n315\",\"model\":\"Unknown model\",\"properties\":{\"key1\":\"nvme5n315value1\"},\"vs\":\"key1=nvme5n315value1\"}," "{\"path\":\"/dev/nvme6n1\",\"model\":\"MSFT NVMe Accelerator v1.0\",\"properties\":{\"key1\":\"nvme6n1value1\",\"key2\":\"nvme6n1value2\"},\"vs\":\"key1=nvme6n1value1,key2=nvme6n1value2\"}," "{\"path\":\"/dev/nvme7n1\",\"model\":\"MSFT NVMe Accelerator v1.0\",\"properties\":{\"type\":\"os\"},\"vs\":\"\"}," "{\"path\":\"/dev/nvme7n2\",\"model\":\"MSFT NVMe Accelerator v1.0\",\"properties\":{\"type\":\"data\",\"lun\":0},\"vs\":\"\"}," "{\"path\":\"/dev/nvme7n3\",\"model\":\"MSFT NVMe Accelerator v1.0\",\"properties\":{\"type\":\"data\",\"lun\":1},\"vs\":\"\"}," "{\"path\":\"/dev/nvme7n4\",\"model\":\"MSFT NVMe Accelerator v1.0\",\"properties\":{\"type\":\"data\",\"lun\":2},\"vs\":\"\"}," "{\"path\":\"/dev/nvme7n9\",\"model\":\"MSFT NVMe Accelerator v1.0\",\"properties\":{\"type\":\"data\",\"lun\":7},\"vs\":\"\"}," "{\"path\":\"/dev/nvme8n1\",\"model\":\"Microsoft NVMe Direct Disk\",\"properties\":{\"type\":\"local\"},\"vs\":\"\"}," "{\"path\":\"/dev/nvme9n1\",\"model\":\"Microsoft NVMe Direct Disk v2\",\"properties\":{\"type\":\"local\",\"index\":0,\"name\":\"nvme-500G-0\"},\"vs\":\"type=local,index=0,name=nvme-500G-0\"}," "{\"path\":\"/dev/nvme9n2\",\"model\":\"Microsoft NVMe Direct Disk v2\",\"properties\":{\"type\":\"local\",\"index\":1,\"name\":\"nvme-500G-1\"},\"vs\":\"type=local,index=1,name=nvme-500G-1\"}," "{\"path\":\"/dev/nvme10n1\",\"model\":\"Microsoft NVMe Direct Disk v2\",\"properties\":{\"type\":\"local\"},\"vs\":\"\"}," "{\"path\":\"/dev/nvme11n1\",\"model\":\"Unknown model\",\"properties\":{\"type\":\"local\",\"index\":\"foo\",\"lun\":\"bar\"},\"vs\":\"type=local,index=foo,lun=bar\"}]\n"; // clang-format on bool compare_json_strings(const char *json_str1, const char *json_str2) { // Parse the JSON strings into json_object structures json_object *json_obj1 = json_tokener_parse(json_str1); json_object *json_obj2 = json_tokener_parse(json_str2); // Check if both JSON objects are valid if (json_obj1 == NULL || json_obj2 == NULL) { if (json_obj1) json_object_put(json_obj1); if (json_obj2) json_object_put(json_obj2); return false; } // Compare the JSON objects bool are_equal = json_object_equal(json_obj1, json_obj2); // Free the JSON objects json_object_put(json_obj1); json_object_put(json_obj2); return are_equal; } static void test_identify_disks_combined_json(void **state) { (void)state; // Unused parameter _setup_nvme0_microsoft_no_name_namespaces(); _setup_nvme1_microsoft_one_namespace(); _setup_nvme2_microsoft_two_namespaces(); _setup_nvme4_non_microsoft(); _setup_nvme5_microsoft_mixed_namespaces(); _setup_nvme6_remote_accelerator_v1_with_vs(); _setup_nvme7_remote_accelerator_v1_without_vs(); _setup_nvme8_direct_disk_v1_without_vs(); _setup_nvme9_direct_disk_v2(); _setup_nvme10_direct_disk_v2_missing_vs(); _setup_nvme11_non_integer_lun_and_index(); struct context ctx = {.output_format = JSON}; int result = identify_disks(&ctx); assert_int_equal(result, 0); assert_string_equal(capture_stderr(), "failed to parse vs=type=local,index=foo,lun=bar key=index value=foo as int\n" "failed to parse vs=type=local,index=foo,lun=bar key=lun value=bar as int\n"); const char *json_output = capture_stdout(); assert_true(compare_json_strings(json_output, expected_combined_json_string)); // Pretty print in all cases will have a newline after the leading bracket. assert_true(strncmp(json_output, "[\n", 2) == 0); } int main(void) { const struct CMUnitTest tests[] = { cmocka_unit_test(test_trim_trailing_whitespace), cmocka_unit_test_setup_teardown(test_identify_disks_no_sys_class_nvme_present, setup, teardown), cmocka_unit_test_setup_teardown(test_identify_disks_sys_class_nvme_empty, setup, teardown), cmocka_unit_test_setup_teardown(test_identify_disks, setup, teardown), cmocka_unit_test_setup_teardown(test_identify_disks_combined, setup, teardown), cmocka_unit_test_setup_teardown(test_identify_disks_combined_json, setup, teardown), }; return cmocka_run_group_tests(tests, NULL, NULL); } azure-vm-utils-0.6.0/tests/identify_udev_tests.c000066400000000000000000000105521500025016600220050ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #include #include #include #include #include #include #include // clang-format off #include // clang-format on #include "capture.h" #include "identify_udev.h" #include "nvme.h" char *__wrap_nvme_identify_namespace_vs_for_namespace_device(const char *namespace_path) { check_expected_ptr(namespace_path); return mock_ptr_type(char *); } static int setup(void **state) { capture_setup(state); unsetenv("DEVNAME"); return 0; } static int teardown(void **state) { capture_teardown(state); unsetenv("DEVNAME"); return 0; } static void test_print_udev_key_value(void **state) { (void)state; // Unused parameter print_udev_key_value("type", "local"); assert_string_equal(capture_stdout(), "AZURE_DISK_TYPE=local\n"); assert_string_equal(capture_stderr(), ""); } static void test_print_udev_key_values_for_vs_success(void **state) { (void)state; // Unused parameter char vs[] = "type=local,index=2,name=nvme-600G-2"; int result = print_udev_key_values_for_vs(vs); assert_int_equal(result, 0); assert_string_equal(capture_stdout(), "AZURE_DISK_TYPE=local\n" "AZURE_DISK_INDEX=2\n" "AZURE_DISK_NAME=nvme-600G-2\n"); assert_string_equal(capture_stderr(), ""); } static void test_print_udev_key_values_for_vs_failure(void **state) { (void)state; // Unused parameter char vs[] = "type=local,index=2,name"; int result = print_udev_key_values_for_vs(vs); assert_int_equal(result, 1); assert_string_equal(capture_stderr(), "failed to parse key-value pair: name\n"); assert_string_equal(capture_stdout(), "AZURE_DISK_TYPE=local\n" "AZURE_DISK_INDEX=2\n"); } static void test_identify_udev_device_success(void **state) { (void)state; // Unused parameter setenv("DEVNAME", "/dev/nvme0n5", 1); const char *vs = strdup("type=local,index=2,name=nvme-600G-2"); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme0n5"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, vs); int result = identify_udev_device(); assert_int_equal(result, 0); assert_string_equal(capture_stdout(), "AZURE_DISK_VS=type=local,index=2,name=nvme-600G-2\n" "AZURE_DISK_TYPE=local\n" "AZURE_DISK_INDEX=2\n" "AZURE_DISK_NAME=nvme-600G-2\n"); assert_string_equal(capture_stderr(), ""); } static void test_identify_udev_device_no_devname(void **state) { (void)state; // Unused parameter unsetenv("DEVNAME"); int result = identify_udev_device(); assert_int_equal(result, 1); assert_string_equal(capture_stderr(), "environment variable 'DEVNAME' not set\n"); assert_string_equal(capture_stdout(), ""); } static void test_identify_udev_device_vs_failure(void **state) { (void)state; // Unused parameter setenv("DEVNAME", "/dev/nvme0n5", 1); expect_string(__wrap_nvme_identify_namespace_vs_for_namespace_device, namespace_path, "/dev/nvme0n5"); will_return(__wrap_nvme_identify_namespace_vs_for_namespace_device, NULL); int result = identify_udev_device(); assert_int_equal(result, 1); assert_string_equal(capture_stderr(), "failed to query namespace vendor-specific data: /dev/nvme0n5\n"); assert_string_equal(capture_stdout(), ""); } int main(void) { const struct CMUnitTest tests[] = { cmocka_unit_test_setup_teardown(test_print_udev_key_value, setup, teardown), cmocka_unit_test_setup_teardown(test_print_udev_key_values_for_vs_success, setup, teardown), cmocka_unit_test_setup_teardown(test_print_udev_key_values_for_vs_failure, setup, teardown), cmocka_unit_test_setup_teardown(test_identify_udev_device_success, setup, teardown), cmocka_unit_test_setup_teardown(test_identify_udev_device_no_devname, setup, teardown), cmocka_unit_test_setup_teardown(test_identify_udev_device_vs_failure, setup, teardown), }; return cmocka_run_group_tests(tests, NULL, NULL); } azure-vm-utils-0.6.0/tests/nvme_tests.c000066400000000000000000000315341500025016600201170ustar00rootroot00000000000000/** * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE in the project root for license * information. */ #include #include #include #include #include #include #include #include #include #include // clange-format off #include // clange-format on #include "capture.h" #include "nvme.h" #define OPEN_FD 14 #define OPEN_PATH "/dev/nvme199n19" #define NSID 19 #include bool debug = false; struct nvme_id_ns empty_ns = { 0, }; int __wrap_open(const char *pathname, int flags, ...) { check_expected(pathname); check_expected(flags); int ret = mock_type(int); if (ret < 0) { errno = mock_type(int); } return ret; } /** * Redirect open64 to open for platforms that use it, e.g. armhf. */ int __wrap_open64(const char *pathname, int flags, ...) { return __wrap_open(pathname, flags); } int __wrap_posix_memalign(void **memptr, size_t alignment, size_t size) { check_expected(memptr); check_expected(alignment); check_expected(size); int ret = mock_type(int); if (ret < 0) { errno = mock_type(int); } else if (ret == 0) { *memptr = malloc(size); } return ret; } int __wrap_ioctl(int fd, unsigned long request, ...) { check_expected(fd); check_expected(request); va_list args; va_start(args, request); void *arg = va_arg(args, void *); va_end(args); struct nvme_admin_cmd *cmd = (struct nvme_admin_cmd *)arg; assert_non_null(cmd); assert_int_equal(cmd->opcode, NVME_ADMIN_IDENTFY_NAMESPACE_OPCODE); unsigned int nsid = mock_type(unsigned int); assert_int_equal(cmd->nsid, nsid); assert_int_equal(cmd->data_len, sizeof(struct nvme_id_ns)); assert_non_null(cmd->addr); struct nvme_id_ns *ns = mock_type(struct nvme_id_ns *); if (ns != NULL) { memcpy((void *)(uintptr_t)cmd->addr, ns, sizeof(*ns)); ns = (struct nvme_id_ns *)(uintptr_t)cmd->addr; } int ret = mock_type(int); if (ret < 0) { errno = mock_type(int); } return ret; } /** * Redirect __ioctl_time64 to ioctl for platforms that use it, e.g. armhf. */ int __wrap___ioctl_time64(int fd, unsigned long request, ...) { va_list args; va_start(args, request); void *arg = va_arg(args, void *); va_end(args); return __wrap_ioctl(fd, request, arg); } int __wrap_close(int fd) { check_expected(fd); return mock_type(int); } static void test_nvme_identify_namespace_success(void **state) { (void)state; // Unused parameter expect_string(__wrap_open, pathname, OPEN_PATH); expect_value(__wrap_open, flags, O_RDONLY); will_return(__wrap_open, OPEN_FD); expect_any(__wrap_posix_memalign, memptr); expect_value(__wrap_posix_memalign, alignment, sysconf(_SC_PAGESIZE)); expect_value(__wrap_posix_memalign, size, sizeof(struct nvme_id_ns)); will_return(__wrap_posix_memalign, 0); expect_value(__wrap_ioctl, fd, OPEN_FD); expect_value(__wrap_ioctl, request, NVME_IOCTL_ADMIN_CMD); will_return(__wrap_ioctl, NSID); will_return(__wrap_ioctl, &empty_ns); will_return(__wrap_ioctl, 0); expect_value(__wrap_close, fd, OPEN_FD); will_return(__wrap_close, 0); struct nvme_id_ns *result = nvme_identify_namespace(OPEN_PATH, NSID); assert_non_null(result); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), ""); } static void test_nvme_identify_namespace_failure_to_open(void **state) { (void)state; // Unused parameter expect_string(__wrap_open, pathname, OPEN_PATH); expect_value(__wrap_open, flags, O_RDONLY); will_return(__wrap_open, -1); will_return(__wrap_open, EOWNERDEAD); struct nvme_id_ns *result = nvme_identify_namespace(OPEN_PATH, NSID); assert_null(result); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), "failed to open /dev/nvme199n19: Owner died\n"); } static void test_nvme_identify_namespace_failure_to_posix_memalign(void **state) { (void)state; // Unused parameter expect_string(__wrap_open, pathname, OPEN_PATH); expect_value(__wrap_open, flags, O_RDONLY); will_return(__wrap_open, OPEN_FD); expect_any(__wrap_posix_memalign, memptr); expect_value(__wrap_posix_memalign, alignment, sysconf(_SC_PAGESIZE)); expect_value(__wrap_posix_memalign, size, sizeof(struct nvme_id_ns)); will_return(__wrap_posix_memalign, -1); will_return(__wrap_posix_memalign, ENOLINK); expect_value(__wrap_close, fd, OPEN_FD); will_return(__wrap_close, 0); struct nvme_id_ns *result = nvme_identify_namespace(OPEN_PATH, NSID); assert_null(result); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), "failed posix_memalign for /dev/nvme199n19: Link has been severed\n"); } static void test_nvme_identify_namespace_failure_to_ioctl(void **state) { (void)state; // Unused parameter expect_string(__wrap_open, pathname, OPEN_PATH); expect_value(__wrap_open, flags, O_RDONLY); will_return(__wrap_open, OPEN_FD); expect_any(__wrap_posix_memalign, memptr); expect_value(__wrap_posix_memalign, alignment, sysconf(_SC_PAGESIZE)); expect_value(__wrap_posix_memalign, size, sizeof(struct nvme_id_ns)); will_return(__wrap_posix_memalign, 0); expect_value(__wrap_ioctl, fd, OPEN_FD); expect_value(__wrap_ioctl, request, NVME_IOCTL_ADMIN_CMD); will_return(__wrap_ioctl, NSID); will_return(__wrap_ioctl, NULL); will_return(__wrap_ioctl, -1); will_return(__wrap_ioctl, EUCLEAN); expect_value(__wrap_close, fd, OPEN_FD); will_return(__wrap_close, 0); struct nvme_id_ns *result = nvme_identify_namespace(OPEN_PATH, NSID); assert_null(result); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), "failed NVME_IOCTL_ADMIN_CMD ioctl for /dev/nvme199n19: " "Structure needs cleaning\n"); } static void test_nvme_identify_namespace_vs_success(void **state) { (void)state; // Unused parameter expect_string(__wrap_open, pathname, OPEN_PATH); expect_value(__wrap_open, flags, O_RDONLY); will_return(__wrap_open, OPEN_FD); expect_any(__wrap_posix_memalign, memptr); expect_value(__wrap_posix_memalign, alignment, sysconf(_SC_PAGESIZE)); expect_value(__wrap_posix_memalign, size, sizeof(struct nvme_id_ns)); will_return(__wrap_posix_memalign, 0); expect_value(__wrap_ioctl, fd, OPEN_FD); expect_value(__wrap_ioctl, request, NVME_IOCTL_ADMIN_CMD); will_return(__wrap_ioctl, NSID); struct nvme_id_ns ns = {.vs = "key1=value1,key2=value2"}; will_return(__wrap_ioctl, &ns); will_return(__wrap_ioctl, 0); expect_value(__wrap_close, fd, OPEN_FD); will_return(__wrap_close, 0); char *vs = nvme_identify_namespace_vs(OPEN_PATH, NSID); assert_non_null(vs); assert_string_equal(vs, "key1=value1,key2=value2"); free(vs); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), ""); } static void test_nvme_identify_namespace_vs_failure(void **state) { (void)state; // Unused parameter expect_string(__wrap_open, pathname, OPEN_PATH); expect_value(__wrap_open, flags, O_RDONLY); will_return(__wrap_open, -1); will_return(__wrap_open, EOWNERDEAD); char *vs = nvme_identify_namespace_vs(OPEN_PATH, NSID); assert_null(vs); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), "failed to open /dev/nvme199n19: Owner died\n" "failed to identify namespace for device=/dev/nvme199n19\n"); } static void test_get_nsid_from_namespace_device_path_success(void **state) { (void)state; // Unused parameter assert_int_equal(get_nsid_from_namespace_device_path("/dev/nvme0n5"), 5); assert_int_equal(get_nsid_from_namespace_device_path("/dev/nvme2n12"), 12); assert_int_equal(get_nsid_from_namespace_device_path("/dev/nvme100n1"), 1); assert_int_equal(get_nsid_from_namespace_device_path("/dev/nvme100000n1"), 1); assert_int_equal(get_nsid_from_namespace_device_path("/dev/nvme55n999"), 999); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), ""); } static void test_get_nsid_from_namespace_device_path_failure(void **state) { (void)state; // Unused parameter assert_int_equal(get_nsid_from_namespace_device_path("bad"), -1); assert_int_equal(get_nsid_from_namespace_device_path("bad1n1"), -1); assert_int_equal(get_nsid_from_namespace_device_path("/dev/bad1n1"), -1); assert_int_equal(get_nsid_from_namespace_device_path("/dev/nvme0"), -1); assert_int_equal(get_nsid_from_namespace_device_path("/dev/nvme0n"), -1); assert_int_equal(get_nsid_from_namespace_device_path("/dev/nvme0nX"), -1); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), ""); } static void test_nvme_identify_namespace_vs_for_namespace_device_success(void **state) { (void)state; // Unused parameter const char *path = "/dev/nvme0n5"; const char *vendor_specific_data = "key1=value1,key2=value2"; expect_string(__wrap_open, pathname, path); expect_value(__wrap_open, flags, O_RDONLY); will_return(__wrap_open, OPEN_FD); expect_any(__wrap_posix_memalign, memptr); expect_value(__wrap_posix_memalign, alignment, sysconf(_SC_PAGESIZE)); expect_value(__wrap_posix_memalign, size, sizeof(struct nvme_id_ns)); will_return(__wrap_posix_memalign, 0); expect_value(__wrap_ioctl, fd, OPEN_FD); expect_value(__wrap_ioctl, request, NVME_IOCTL_ADMIN_CMD); will_return(__wrap_ioctl, 5); struct nvme_id_ns ns = {.vs = "key1=value1,key2=value2"}; will_return(__wrap_ioctl, &ns); will_return(__wrap_ioctl, 0); expect_value(__wrap_close, fd, OPEN_FD); will_return(__wrap_close, 0); char *result = nvme_identify_namespace_vs_for_namespace_device(path); assert_non_null(result); assert_string_equal(result, vendor_specific_data); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), ""); } static void test_nvme_identify_namespace_vs_for_namespace_device_nsid_failure(void **state) { (void)state; // Unused parameter const char *path = "/dev/nvme0nX"; char *result = nvme_identify_namespace_vs_for_namespace_device(path); assert_null(result); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), "failed to parse namespace id: /dev/nvme0nX\n"); } static void test_nvme_identify_namespace_vs_for_namespace_device_vs_failure(void **state) { (void)state; // Unused parameter const char *path = "/dev/nvme0n5"; expect_string(__wrap_open, pathname, path); expect_value(__wrap_open, flags, O_RDONLY); will_return(__wrap_open, -1); will_return(__wrap_open, EOWNERDEAD); char *result = nvme_identify_namespace_vs_for_namespace_device(path); assert_null(result); assert_string_equal(capture_stdout(), ""); assert_string_equal(capture_stderr(), "failed to open /dev/nvme0n5: Owner died\n" "failed to identify namespace for device=/dev/nvme0n5\n"); } static int setup(void **state) { (void)state; // Unused parameter capture_setup(state); return 0; } static int teardown(void **state) { (void)state; // Unused parameter capture_teardown(state); return 0; } int main(void) { const struct CMUnitTest tests[] = { cmocka_unit_test_setup_teardown(test_nvme_identify_namespace_success, setup, teardown), cmocka_unit_test_setup_teardown(test_nvme_identify_namespace_failure_to_open, setup, teardown), cmocka_unit_test_setup_teardown(test_nvme_identify_namespace_failure_to_posix_memalign, setup, teardown), cmocka_unit_test_setup_teardown(test_nvme_identify_namespace_failure_to_ioctl, setup, teardown), cmocka_unit_test_setup_teardown(test_nvme_identify_namespace_vs_success, setup, teardown), cmocka_unit_test_setup_teardown(test_nvme_identify_namespace_vs_failure, setup, teardown), cmocka_unit_test_setup_teardown(test_get_nsid_from_namespace_device_path_success, setup, teardown), cmocka_unit_test_setup_teardown(test_get_nsid_from_namespace_device_path_failure, setup, teardown), cmocka_unit_test_setup_teardown(test_nvme_identify_namespace_vs_for_namespace_device_success, setup, teardown), cmocka_unit_test_setup_teardown(test_nvme_identify_namespace_vs_for_namespace_device_nsid_failure, setup, teardown), cmocka_unit_test_setup_teardown(test_nvme_identify_namespace_vs_for_namespace_device_vs_failure, setup, teardown), }; return cmocka_run_group_tests(tests, NULL, NULL); } azure-vm-utils-0.6.0/udev/000077500000000000000000000000001500025016600153575ustar00rootroot00000000000000azure-vm-utils-0.6.0/udev/10-azure-unmanaged-sriov.rules000066400000000000000000000012001500025016600230650ustar00rootroot00000000000000# Azure VMs with accelerated networking may have MANA, mlx4, or mlx5 SR-IOV devices which are transparently bonded to a synthetic # hv_netvsc device. Mark devices with the IFF_SLAVE bit set as unmanaged devices: # AZURE_UNMANAGED_SRIOV=1 for 01-azure-unmanaged-sriov.network # ID_NET_MANAGED_BY=unmanaged for systemd-networkd >= 255 # NM_UNMANAGED=1 for NetworkManager # # ATTR{flags}=="0x?[89ABCDEF]??" checks the IFF_SLAVE bit (0x800). SUBSYSTEM=="net", ACTION!="remove", DRIVERS=="mana|mlx4_core|mlx5_core", ATTR{flags}=="0x?[89ABCDEF]??", ENV{AZURE_UNMANAGED_SRIOV}="1", ENV{ID_NET_MANAGED_BY}="unmanaged", ENV{NM_UNMANAGED}="1" azure-vm-utils-0.6.0/udev/80-azure-disk.rules000066400000000000000000000101421500025016600207340ustar00rootroot00000000000000ACTION!="add|change", GOTO="azure_disk_end" SUBSYSTEM!="block", GOTO="azure_disk_end" KERNEL=="nvme*", ATTRS{nsid}=="?*", ENV{ID_MODEL}=="Microsoft NVMe Direct Disk", GOTO="azure_disk_nvme_direct_v1" KERNEL=="nvme*", ATTRS{nsid}=="?*", ENV{ID_MODEL}=="Microsoft NVMe Direct Disk v2", GOTO="azure_disk_nvme_direct_v2" KERNEL=="nvme*", ATTRS{nsid}=="?*", ENV{ID_MODEL}=="MSFT NVMe Accelerator v1.0", GOTO="azure_disk_nvme_remote_v1" ENV{ID_VENDOR}=="Msft", ENV{ID_MODEL}=="Virtual_Disk", GOTO="azure_disk_scsi" GOTO="azure_disk_end" LABEL="azure_disk_scsi" ATTRS{device_id}=="?00000000-0000-*", ENV{AZURE_DISK_TYPE}="os", GOTO="azure_disk_symlink" ENV{DEVTYPE}=="partition", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/../device|cut -d: -f4'", ENV{AZURE_DISK_LUN}="$result" ENV{DEVTYPE}=="disk", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/device|cut -d: -f4'", ENV{AZURE_DISK_LUN}="$result" ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{AZURE_DISK_LUN}=="0", ENV{AZURE_DISK_TYPE}="os", ENV{AZURE_DISK_LUN}="", GOTO="azure_disk_symlink" ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{AZURE_DISK_TYPE}="data", GOTO="azure_disk_symlink" ATTRS{device_id}=="{f8b3781c-1e82-4818-a1c3-63d806ec15bb}", ENV{AZURE_DISK_TYPE}="data", GOTO="azure_disk_symlink" ATTRS{device_id}=="{f8b3781d-1e82-4818-a1c3-63d806ec15bb}", ENV{AZURE_DISK_TYPE}="data", GOTO="azure_disk_symlink" # Use "resource" type for local SCSI because some VM skus offer NVMe local disks in addition to a SCSI resource disk, e.g. LSv3 family. # This logic is already in walinuxagent rules but we duplicate it here to avoid an unnecessary dependency for anyone requiring it. ATTRS{device_id}=="?00000000-0001-*", ENV{AZURE_DISK_TYPE}="resource", ENV{AZURE_DISK_LUN}="", GOTO="azure_disk_symlink" ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{AZURE_DISK_LUN}=="1", ENV{AZURE_DISK_TYPE}="resource", ENV{AZURE_DISK_LUN}="", GOTO="azure_disk_symlink" GOTO="azure_disk_end" LABEL="azure_disk_nvme_direct_v1" LABEL="azure_disk_nvme_direct_v2" ATTRS{nsid}=="?*", ENV{AZURE_DISK_TYPE}="local", ENV{AZURE_DISK_SERIAL}="$env{ID_SERIAL_SHORT}" GOTO="azure_disk_nvme_id" LABEL="azure_disk_nvme_remote_v1" ATTRS{nsid}=="1", ENV{AZURE_DISK_TYPE}="os", GOTO="azure_disk_nvme_id" ATTRS{nsid}=="?*", ENV{AZURE_DISK_TYPE}="data", PROGRAM="/bin/sh -ec 'echo $$((%s{nsid}-2))'", ENV{AZURE_DISK_LUN}="$result" LABEL="azure_disk_nvme_id" ATTRS{nsid}=="?*", IMPORT{program}="/usr/sbin/azure-nvme-id --udev" LABEL="azure_disk_symlink" # systemd v254 ships an updated 60-persistent-storage.rules that would allow # these to be deduplicated using $env{.PART_SUFFIX} ENV{DEVTYPE}=="disk", ENV{AZURE_DISK_TYPE}=="os|resource|root", SYMLINK+="disk/azure/$env{AZURE_DISK_TYPE}" ENV{DEVTYPE}=="disk", ENV{AZURE_DISK_TYPE}=="?*", ENV{AZURE_DISK_INDEX}=="?*", SYMLINK+="disk/azure/$env{AZURE_DISK_TYPE}/by-index/$env{AZURE_DISK_INDEX}" ENV{DEVTYPE}=="disk", ENV{AZURE_DISK_TYPE}=="?*", ENV{AZURE_DISK_LUN}=="?*", SYMLINK+="disk/azure/$env{AZURE_DISK_TYPE}/by-lun/$env{AZURE_DISK_LUN}" ENV{DEVTYPE}=="disk", ENV{AZURE_DISK_TYPE}=="?*", ENV{AZURE_DISK_NAME}=="?*", SYMLINK+="disk/azure/$env{AZURE_DISK_TYPE}/by-name/$env{AZURE_DISK_NAME}" ENV{DEVTYPE}=="disk", ENV{AZURE_DISK_TYPE}=="?*", ENV{AZURE_DISK_SERIAL}=="?*", SYMLINK+="disk/azure/$env{AZURE_DISK_TYPE}/by-serial/$env{AZURE_DISK_SERIAL}" ENV{DEVTYPE}=="partition", ENV{AZURE_DISK_TYPE}=="os|resource|root", SYMLINK+="disk/azure/$env{AZURE_DISK_TYPE}-part%n" ENV{DEVTYPE}=="partition", ENV{AZURE_DISK_TYPE}=="?*", ENV{AZURE_DISK_INDEX}=="?*", SYMLINK+="disk/azure/$env{AZURE_DISK_TYPE}/by-index/$env{AZURE_DISK_INDEX}-part%n" ENV{DEVTYPE}=="partition", ENV{AZURE_DISK_TYPE}=="?*", ENV{AZURE_DISK_LUN}=="?*", SYMLINK+="disk/azure/$env{AZURE_DISK_TYPE}/by-lun/$env{AZURE_DISK_LUN}-part%n" ENV{DEVTYPE}=="partition", ENV{AZURE_DISK_TYPE}=="?*", ENV{AZURE_DISK_NAME}=="?*", SYMLINK+="disk/azure/$env{AZURE_DISK_TYPE}/by-name/$env{AZURE_DISK_NAME}-part%n" ENV{DEVTYPE}=="partition", ENV{AZURE_DISK_TYPE}=="?*", ENV{AZURE_DISK_SERIAL}=="?*", SYMLINK+="disk/azure/$env{AZURE_DISK_TYPE}/by-serial/$env{AZURE_DISK_SERIAL}-part%n" LABEL="azure_disk_end"