pax_global_header00006660000000000000000000000064147736375100014527gustar00rootroot0000000000000052 comment=0c7963c60e716f0b9d81c6dd0eaf2afbd3078c51 incus-6.0.4/000077500000000000000000000000001477363751000126575ustar00rootroot00000000000000incus-6.0.4/.codespell-ignore000066400000000000000000000000541477363751000161120ustar00rootroot00000000000000AtLeast destOp ECT inport renderD requestor incus-6.0.4/.deepsource.toml000066400000000000000000000004061477363751000157700ustar00rootroot00000000000000version = 1 test_patterns = [ "test/**", "*_test.go" ] [[analyzers]] name = "python" enabled = true [analyzers.meta] runtime_version = "3.x.x" [[analyzers]] name = "go" enabled = true [analyzers.meta] import_paths = ["github.com/lxc/incus"] incus-6.0.4/.devcontainer/000077500000000000000000000000001477363751000154165ustar00rootroot00000000000000incus-6.0.4/.devcontainer/Dockerfile000066400000000000000000000060321477363751000174110ustar00rootroot00000000000000ARG GO_VERSION=1.23 ARG DEBIAN_VERSION=bookworm # Go development container FROM golang:${GO_VERSION}-${DEBIAN_VERSION} ARG USERNAME=vscode ARG USER_UID=1000 ARG USER_GID=1000 # Install necessary tools. RUN sed -r -i 's/^Components: main$/Components: main contrib/g' /etc/apt/sources.list.d/debian.sources && \ apt update && \ apt install -y \ acl \ aspell \ aspell-en \ attr \ autoconf \ automake \ bind9-dnsutils \ btrfs-progs \ busybox-static \ ceph-common \ curl \ dnsmasq-base \ ebtables \ flake8 \ gettext \ git \ jq \ less \ libacl1-dev \ libcap-dev \ # libcowsql-dev libdbus-1-dev \ # liblxc-dev \ liblxc1 \ liblz4-dev \ libseccomp-dev \ libselinux1-dev \ libsqlite3-dev \ libtool \ libudev-dev \ libusb-1.0-0-dev \ libuv1-dev \ locales \ locales-all \ lvm2 \ lxc-dev \ lxc-templates \ make \ man-db \ pipx \ pkg-config \ protoc-gen-go \ python3-matplotlib \ python3.11-venv \ rsync \ ruby-mdl \ shellcheck \ socat \ sqlite3 \ squashfs-tools \ sudo \ tar \ tcl \ thin-provisioning-tools \ vim \ # Disabled for now, very slow to install. # zfsutils-linux xz-utils # With pipx >= 1.5.0, we could use pipx --global instead. RUN PIPX_HOME=/opt/pipx PIPX_BIN_DIR=/usr/local/bin pipx install codespell # Add vscode user and add it to sudoers. RUN groupadd -g 1000 $USERNAME && \ useradd -s /bin/bash -u $USER_UID -g $USER_GID -m $USERNAME && \ mkdir -p /etc/sudoers.d && \ echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME && \ chmod 0440 /etc/sudoers.d/$USERNAME # Setup for vscode user. USER $USERNAME ENV EDITOR=vi \ LANG=en_US.UTF-8 \ CGO_CFLAGS="-I/home/vscode/vendor/raft/include/ -I/home/vscode/vendor/cowsql/include/" \ CGO_LDFLAGS="-L/home/vscode/vendor/raft/.libs -L/home/vscode/vendor/cowsql/.libs/" \ LD_LIBRARY_PATH="/home/vscode/vendor/raft/.libs/:/home/vscode/vendor/cowsql/.libs/" \ CGO_LDFLAGS_ALLOW="(-Wl,-wrap,pthread_create)|(-Wl,-z,now)" # Build Go tools with user vscode to ensure correct file and directory permissions for the build artifacts. RUN go install -v github.com/google/go-licenses@latest && \ go install -v github.com/766b/go-outliner@latest && \ GOTOOLCHAIN="" go install -v golang.org/x/tools/gopls@latest && \ go install -v github.com/go-delve/delve/cmd/dlv@latest && \ go install -v golang.org/x/tools/cmd/goimports@latest && \ curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin # Make dependencies COPY Makefile /home/vscode RUN cd /home/vscode && \ mkdir /home/vscode/vendor && \ make deps USER root # Since we use a volume for /go to persist the content between executions, we need to preserve the binaries. RUN mv /go/bin/* /usr/local/bin incus-6.0.4/.devcontainer/devcontainer.json000066400000000000000000000034461477363751000210010ustar00rootroot00000000000000{ "name": "Incus", "build": { "dockerfile": "Dockerfile", "context": ".." }, "customizations": { "vscode": { "extensions": [ "golang.go", "766b.go-outliner", "ms-azuretools.vscode-docker", "ms-vscode.makefile-tools", "github.vscode-github-actions", "davidanson.vscode-markdownlint", "shardulm94.trailing-spaces", "Gruntfuggly.todo-tree" ], "settings": { "files.insertFinalNewline": true, "go.goroot": "/usr/local/go", "go.gopath": "/go", "go.lintTool": "golangci-lint", "go.lintOnSave": "package", "go.lintFlags": [ "--fast" ], "go.useLanguageServer": true, "goOutliner.extendExplorerTab": true, "gopls": { "formatting.gofumpt": true, "formatting.local": "github.com/lxc/incus", "ui.diagnostic.staticcheck": false }, "[go]": { "editor.formatOnSave": true, "editor.codeActionsOnSave": { "source.organizeImports": "explicit" } }, "[go.mod]": { "editor.formatOnSave": true, "editor.codeActionsOnSave": { "source.organizeImports": "explicit" } }, "search.exclude": { "**/.git": true } } } }, "postCreateCommand": "go mod download", "mounts": [ "source=incus_devcontainer_cache,target=/home/vscode/.cache,type=volume", "source=incus_devcontainer_goroot,target=/go,type=volume" ], "runArgs": [ "--privileged", "-u", "vscode", "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined", "-v", "${env:HOME}/.ssh:/home/vscode/.ssh:ro", "--name", "${localEnv:USER}_incus_devcontainer" ], "remoteUser": "vscode" } incus-6.0.4/.github/000077500000000000000000000000001477363751000142175ustar00rootroot00000000000000incus-6.0.4/.github/CODEOWNERS000066400000000000000000000000141477363751000156050ustar00rootroot00000000000000* @stgraber incus-6.0.4/.github/FUNDING.yml000066400000000000000000000002551477363751000160360ustar00rootroot00000000000000# Frequent committers who contribute to Incus on their own time can add # themselves to the list here so users who feel like sponsoring can find # them. github: - stgraber incus-6.0.4/.github/ISSUE_TEMPLATE/000077500000000000000000000000001477363751000164025ustar00rootroot00000000000000incus-6.0.4/.github/ISSUE_TEMPLATE/bug-reports.yml000066400000000000000000000033771477363751000214100ustar00rootroot00000000000000name: Bug report description: File a bug report. type: bug body: - type: checkboxes attributes: label: Is there an existing issue for this? description: Please search to see if an issue already exists for the bug you encountered. options: - label: There is no existing issue for this bug required: true - type: checkboxes attributes: label: Is this happening on an up to date version of Incus? description: Please make sure that your system has all updates applied and is running a current version of Incus or Incus LTS. options: - label: This is happening on a supported version of Incus required: true - type: textarea attributes: label: Incus system details description: Output of `incus info`. render: yaml validations: required: true - type: textarea attributes: label: Instance details description: If the issue affects an instance, please include the output of `incus config show NAME`. validations: required: false - type: textarea attributes: label: Instance log description: If the issue is related to an instance startup failure, please include `incus info --show-log NAME`. validations: required: false - type: textarea attributes: label: Current behavior description: A concise description of what you're experiencing. validations: required: false - type: textarea attributes: label: Expected behavior description: A concise description of what you expected to happen. validations: required: false - type: textarea attributes: label: Steps to reproduce description: Step by step instructions to reproduce the behavior. placeholder: | 1. Step one 2. Step two 3. Step three validations: required: true incus-6.0.4/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000002441477363751000203720ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: Support question url: https://discuss.linuxcontainers.org about: Please ask and answer questions here. incus-6.0.4/.github/ISSUE_TEMPLATE/feature-requests.yml000066400000000000000000000013561477363751000224360ustar00rootroot00000000000000name: Feature request description: File a feature request. type: feature body: - type: checkboxes attributes: label: Is there an existing issue for this? description: Please search to see if an issue already exists for the feature you'd like to see added. options: - label: There is no existing issue for this feature required: true - type: textarea attributes: label: What are you currently unable to do description: A concise description of the problem you're trying to solve. validations: required: true - type: textarea attributes: label: What do you think would need to be added description: A concise description of what you think should be added to Incus. validations: required: false incus-6.0.4/.github/SUPPORT.md000066400000000000000000000002751477363751000157210ustar00rootroot00000000000000The Incus team uses GitHub for issue and feature tracking, not for user support. For information on how to get support, see [Support](https://linuxcontainers.org/incus/docs/main/support/). incus-6.0.4/.github/dependabot.yml000066400000000000000000000002051477363751000170440ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" labels: [] schedule: interval: "weekly" incus-6.0.4/.github/labeler.yml000066400000000000000000000003031477363751000163440ustar00rootroot00000000000000API: - changed-files: - any-glob-to-any-file: - doc/api-extensions.md - doc/rest-api.yaml - shared/api/**/* Documentation: - changed-files: - any-glob-to-any-file: - doc/**/* incus-6.0.4/.github/workflows/000077500000000000000000000000001477363751000162545ustar00rootroot00000000000000incus-6.0.4/.github/workflows/commits.yml000066400000000000000000000024231477363751000204530ustar00rootroot00000000000000name: Commits on: - pull_request permissions: contents: read jobs: dco-check: permissions: pull-requests: read # for tim-actions/get-pr-commits to get list of commits from the PR name: Signed-off-by (DCO) runs-on: ubuntu-24.04 steps: - name: Get PR Commits id: 'get-pr-commits' uses: tim-actions/get-pr-commits@master with: token: ${{ secrets.GITHUB_TOKEN }} - name: Check that all commits are signed-off uses: tim-actions/dco@master with: commits: ${{ steps.get-pr-commits.outputs.commits }} target-branch: permissions: contents: none name: Branch target runs-on: ubuntu-24.04 steps: - name: Check branch target env: TARGET: ${{ github.event.pull_request.base.ref }} TITLE: ${{ github.event.pull_request.title }} run: | set -eux TARGET_FROM_PR_TITLE="$(echo "${TITLE}" | sed -n 's/.*(\(stable-[0-9]\.[0-9]\))$/\1/p')" if [ -z "${TARGET_FROM_PR_TITLE}" ]; then TARGET_FROM_PR_TITLE="main" else echo "Branch target overridden from PR title" fi [ "${TARGET}" = "${TARGET_FROM_PR_TITLE}" ] && exit 0 echo "Invalid branch target: ${TARGET} != ${TARGET_FROM_PR_TITLE}" exit 1 incus-6.0.4/.github/workflows/tests.yml000066400000000000000000000407171477363751000201520ustar00rootroot00000000000000name: Tests on: push: branches: - main - stable-* pull_request: permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: code-tests: name: Code runs-on: ubuntu-24.04 strategy: fail-fast: false matrix: go: - oldstable - stable - tip steps: - name: Checkout uses: actions/checkout@v4 with: # Differential ShellCheck requires full git history fetch-depth: 0 - name: Dependency Review uses: actions/dependency-review-action@v4 if: github.event_name == 'pull_request' - id: ShellCheck name: Differential ShellCheck uses: redhat-plumbers-in-action/differential-shellcheck@v5 env: SHELLCHECK_OPTS: --shell sh with: token: ${{ secrets.GITHUB_TOKEN }} if: github.event_name == 'pull_request' && matrix.go == 'stable' - name: Upload artifact with ShellCheck defects in SARIF format uses: actions/upload-artifact@v4 with: name: Differential ShellCheck SARIF path: ${{ steps.ShellCheck.outputs.sarif }} if: github.event_name == 'pull_request' && matrix.go == 'stable' - name: Install Go (${{ matrix.go }}) uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} if: matrix.go != 'tip' - name: Install Go (stable) uses: actions/setup-go@v5 with: go-version: stable if: matrix.go == 'tip' - name: Install Go (tip) run: | go install golang.org/dl/gotip@latest gotip download ~/sdk/gotip/bin/go version echo "PATH=$HOME/go/bin:$HOME/sdk/gotip/bin/:$PATH" >> $GITHUB_ENV if: matrix.go == 'tip' - name: Install dependencies run: | sudo apt-get update sudo apt-get install --no-install-recommends -y \ curl \ gettext \ git \ libacl1-dev \ libcap-dev \ libdbus-1-dev \ libcowsql-dev \ liblxc-dev \ lxc-templates \ libseccomp-dev \ libselinux-dev \ libsqlite3-dev \ libtool \ libudev-dev \ make \ pipx \ pkg-config \ shellcheck # With pipx >= 1.5.0, we could use pipx --global instead. PIPX_HOME=/opt/pipx PIPX_BIN_DIR=/usr/local/bin \ pipx install codespell flake8 - name: Fix repository permissions run: | sudo chown -R runner:docker . - name: Check compatible min Go version run: | go mod tidy - name: Download go dependencies run: | go mod download - name: Run Incus build run: | make - name: Run static analysis env: GITHUB_BEFORE: ${{ github.event.before }} run: | make static-analysis - name: Unit tests (all) run: | sudo --preserve-env=CGO_CFLAGS,CGO_LDFLAGS,CGO_LDFLAGS_ALLOW,LD_LIBRARY_PATH LD_LIBRARY_PATH=${LD_LIBRARY_PATH} env "PATH=${PATH}" go test ./... system-tests: name: System strategy: fail-fast: false matrix: go: - stable suite: - cluster - standalone backend: - dir - btrfs - lvm - zfs - ceph - random os: - ubuntu-22.04 include: - go: oldstable suite: cluster backend: dir os: ubuntu-22.04 - go: oldstable suite: standalone backend: dir os: ubuntu-22.04 - go: tip suite: cluster backend: dir os: ubuntu-22.04 - go: tip suite: standalone backend: dir os: ubuntu-22.04 - go: oldstable suite: cluster backend: dir os: ubuntu-22.04-arm - go: oldstable suite: standalone backend: dir os: ubuntu-22.04-arm - go: stable suite: cluster backend: dir os: ubuntu-22.04-arm - go: stable suite: standalone backend: dir os: ubuntu-22.04-arm - go: tip suite: cluster backend: dir os: ubuntu-22.04-arm - go: tip suite: standalone backend: dir os: ubuntu-22.04-arm runs-on: ${{ matrix.os }} steps: - name: Performance tuning run: | set -eux # optimize ext4 FSes for performance, not reliability for fs in $(findmnt --noheading --type ext4 --list --uniq | awk '{print $1}'); do # nombcache and data=writeback cannot be changed on remount sudo mount -o remount,noatime,barrier=0,commit=6000 "${fs}" || true done # disable dpkg from calling sync() echo "force-unsafe-io" | sudo tee /etc/dpkg/dpkg.cfg.d/force-unsafe-io - name: Reclaim some space run: | set -eux sudo snap remove lxd --purge # Purge older snap revisions that are disabled/superseded by newer revisions of the same snap snap list --all | while read -r name _ rev _ _ notes _; do [ "${notes}" = "disabled" ] && snap remove "${name}" --revision "${rev}" --purge done || true # This was inspired from https://github.com/easimon/maximize-build-space df -h / # dotnet sudo rm -rf /usr/share/dotnet # android sudo rm -rf /usr/local/lib/android # haskell sudo rm -rf /opt/ghc df -h / - name: Remove docker run: | set -eux sudo apt-get autopurge -y moby-containerd docker uidmap sudo ip link delete docker0 sudo nft flush ruleset - name: Checkout uses: actions/checkout@v4 - name: Install Go (${{ matrix.go }}) uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} if: matrix.go != 'tip' - name: Install Go (stable) uses: actions/setup-go@v5 with: go-version: stable if: matrix.go == 'tip' - name: Install Go (tip) run: | go install golang.org/dl/gotip@latest gotip download ~/sdk/gotip/bin/go version echo "PATH=$HOME/go/bin:$HOME/sdk/gotip/bin/:$PATH" >> $GITHUB_ENV if: matrix.go == 'tip' - name: Install dependencies run: | set -x sudo add-apt-repository ppa:ubuntu-lxc/daily -y --no-update sudo add-apt-repository ppa:cowsql/stable -y --no-update sudo apt-get update sudo systemctl mask lxc.service lxc-net.service sudo apt-get install --no-install-recommends -y \ apparmor \ bsdextrautils \ bzip2 \ curl \ dosfstools \ git \ libacl1-dev \ libcap-dev \ libdbus-1-dev \ libcowsql-dev \ liblxc-dev \ libseccomp-dev \ libselinux-dev \ libsqlite3-dev \ libtool \ libudev-dev \ linux-modules-extra-$(uname -r) \ make \ pkg-config\ acl \ attr \ bind9-dnsutils \ btrfs-progs \ busybox-static \ dnsmasq-base \ easy-rsa \ gettext \ jq \ lxc-utils \ lvm2 \ nftables \ quota \ rsync \ s3cmd \ socat \ sqlite3 \ squashfs-tools \ tar \ tcl \ thin-provisioning-tools \ uuid-runtime \ xfsprogs \ xz-utils \ zfsutils-linux # Make sure all AppArmor profiles are loaded. sudo systemctl start apparmor # Reclaim some space sudo apt-get clean # Download minio. curl -sSfL https://dl.min.io/server/minio/release/linux-$(dpkg --print-architecture)/archive/minio_20240116160738.0.0_$(dpkg --print-architecture).deb --output /tmp/minio.deb sudo apt-get install /tmp/minio.deb --yes # Download MinIO client curl -sSfL https://dl.min.io/client/mc/release/linux-$(dpkg --print-architecture)/archive/mc.RELEASE.2024-01-16T16-06-34Z --output /tmp/mc sudo mv /tmp/mc /usr/local/bin/ sudo chmod +x /usr/local/bin/mc # Download latest release of openfga server. mkdir -p "$(go env GOPATH)/bin/" curl -sSfL https://api.github.com/repos/openfga/openfga/releases/latest | jq -r ".assets | .[] | .browser_download_url | select(. | test(\"_linux_$(dpkg --print-architecture).tar.gz$\"))" | xargs -I {} curl -sSfL {} -o openfga.tar.gz tar -xzf openfga.tar.gz -C "$(go env GOPATH)/bin/" # Download latest release of openfga cli. curl -sSfL https://api.github.com/repos/openfga/cli/releases/latest | jq -r ".assets | .[] | .browser_download_url | select(. | test(\"_linux_$(dpkg --print-architecture).tar.gz$\"))" | xargs -I {} curl -sSfL {} -o fga.tar.gz tar -xzf fga.tar.gz -C "$(go env GOPATH)/bin/" - name: Download go dependencies run: | go mod download - name: Run Incus build env: CGO_LDFLAGS_ALLOW: "(-Wl,-wrap,pthread_create)|(-Wl,-z,now)" run: | make - name: Setup MicroCeph if: ${{ matrix.backend == 'ceph' }} run: | set -x # If the rootfs and the ephemeral part are on the same physical disk, giving the whole # disk to microceph would wipe our rootfs. Since it is pretty rare for GitHub Action # runners to have a single disk, we immediately bail rather than trying to gracefully # handle it. Once snapd releases with https://github.com/snapcore/snapd/pull/13150, # we will be able to stop worrying about that special case. if [ "$(stat -c '%d' /)" = "$(stat -c '%d' /mnt)" ]; then echo "FAIL: rootfs and ephemeral part on the same disk, aborting" exit 1 fi sudo apt-get install --no-install-recommends -y snapd sudo snap install microceph --channel=quincy/stable sudo apt-get install --no-install-recommends -y ceph-common sudo microceph cluster bootstrap sudo microceph.ceph config set global osd_pool_default_size 1 sudo microceph.ceph config set global mon_allow_pool_delete true sudo microceph.ceph config set global osd_memory_target 939524096 sudo microceph.ceph osd crush rule rm replicated_rule sudo microceph.ceph osd crush rule create-replicated replicated default osd for flag in nosnaptrim noscrub nobackfill norebalance norecover noscrub nodeep-scrub; do sudo microceph.ceph osd set $flag done # Repurpose the ephemeral disk for ceph OSD. sudo swapoff /mnt/swapfile ephemeral_disk="$(findmnt --noheadings --output SOURCE --target /mnt | sed 's/[0-9]\+$//')" sudo umount /mnt sudo microceph disk add --wipe "${ephemeral_disk}" sudo rm -rf /etc/ceph sudo ln -s /var/snap/microceph/current/conf/ /etc/ceph sudo microceph enable rgw sudo microceph.ceph osd pool create cephfs_meta 32 sudo microceph.ceph osd pool create cephfs_data 32 sudo microceph.ceph fs new cephfs cephfs_meta cephfs_data sudo microceph.ceph fs ls sleep 30 sudo microceph.ceph status # Wait until there are no more "unkowns" pgs for _ in $(seq 60); do if sudo microceph.ceph pg stat | grep -wF unknown; then sleep 1 else break fi done sudo microceph.ceph status sudo rm -f /snap/bin/rbd - name: "Ensure offline mode (block image server)" run: | sudo nft add table inet filter sudo nft add chain 'inet filter output { type filter hook output priority 10 ; }' sudo nft add rule inet filter output ip daddr 45.45.148.8 reject sudo nft add rule inet filter output ip6 daddr 2602:fc62:a:1::8 reject - name: "Run system tests (${{ matrix.go }}, ${{ matrix.suite }}, ${{ matrix.backend }})" env: CGO_LDFLAGS_ALLOW: "(-Wl,-wrap,pthread_create)|(-Wl,-z,now)" INCUS_CEPH_CLUSTER: "ceph" INCUS_CEPH_CEPHFS: "cephfs" INCUS_CEPH_CEPHOBJECT_RADOSGW: "http://127.0.0.1" INCUS_CONCURRENT: "1" INCUS_VERBOSE: "1" INCUS_OFFLINE: "1" INCUS_TMPFS: "1" INCUS_REQUIRED_TESTS: "test_storage_buckets" run: | chmod +x ~ echo "root:1000000:1000000000" | sudo tee /etc/subuid /etc/subgid cd test sudo --preserve-env=PATH,GOPATH,GITHUB_ACTIONS,INCUS_VERBOSE,INCUS_BACKEND,INCUS_CEPH_CLUSTER,INCUS_CEPH_CEPHFS,INCUS_CEPH_CEPHOBJECT_RADOSGW,INCUS_OFFLINE,INCUS_SKIP_TESTS,INCUS_REQUIRED_TESTS, INCUS_BACKEND=${{ matrix.backend }} ./main.sh ${{ matrix.suite }} client: name: Client strategy: fail-fast: false matrix: go: - oldstable - stable os: - ubuntu-latest - macos-latest - windows-latest runs-on: ${{ matrix.os }} steps: - name: Checkout code uses: actions/checkout@v4 - name: Install Go uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} - name: Create build directory run: | mkdir bin - name: Build static x86_64 incus env: CGO_ENABLED: 0 GOARCH: amd64 run: | go build -o bin/incus.x86_64 ./cmd/incus - name: Build static aarch64 incus env: CGO_ENABLED: 0 GOARCH: arm64 run: | go build -o bin/incus.aarch64 ./cmd/incus - name: Build static incus-migrate if: runner.os == 'Linux' env: CGO_ENABLED: 0 run: | GOARCH=amd64 go build -o bin/incus-migrate.x86_64 ./cmd/incus-migrate GOARCH=arm64 go build -o bin/incus-migrate.aarch64 ./cmd/incus-migrate - name: Build static lxd-to-incus if: runner.os == 'Linux' env: CGO_ENABLED: 0 run: | GOARCH=amd64 go build -o bin/lxd-to-incus.x86_64 ./cmd/lxd-to-incus GOARCH=arm64 go build -o bin/lxd-to-incus.aarch64 ./cmd/lxd-to-incus - name: Unit tests (client) env: CGO_ENABLED: 0 run: go test -v ./client/... - name: Unit tests (incus) env: CGO_ENABLED: 0 run: go test -v ./cmd/incus/... - name: Unit tests (shared) env: CGO_ENABLED: 0 run: go test -v ./shared/... - name: Upload incus client artifacts if: matrix.go == 'stable' uses: actions/upload-artifact@v4 continue-on-error: true with: name: ${{ runner.os }} path: bin/ documentation: name: Documentation runs-on: ubuntu-24.04 steps: - name: Checkout uses: actions/checkout@v4 - name: Install Go uses: actions/setup-go@v5 with: go-version: stable - name: Install dependencies run: | sudo apt-get install aspell aspell-en sudo snap install mdl - name: Run markdown linter run: | make doc-lint - name: Run spell checker run: | make doc-spellcheck - name: Run inclusive naming checker uses: get-woke/woke-action@v0 with: fail-on-error: true woke-args: "*.md **/*.md -c https://github.com/canonical/Inclusive-naming/raw/main/config.yml" - name: Run link checker run: | make doc-linkcheck - name: Build docs (Sphinx) run: make doc - name: Print warnings (Sphinx) run: if [ -s doc/.sphinx/warnings.txt ]; then cat doc/.sphinx/warnings.txt; exit 1; fi - name: Upload documentation artifacts if: always() uses: actions/upload-artifact@v4 with: name: documentation path: doc/html incus-6.0.4/.github/workflows/triage.yml000066400000000000000000000006501477363751000202530ustar00rootroot00000000000000name: Triaging on: - pull_request_target permissions: contents: read jobs: label: permissions: contents: read # for actions/labeler to determine modified files pull-requests: write # for actions/labeler to add labels to PRs name: PR labels runs-on: ubuntu-24.04 steps: - uses: actions/labeler@v5 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" sync-labels: true incus-6.0.4/.gitignore000066400000000000000000000013621477363751000146510ustar00rootroot00000000000000*.swp po/*.mo po/*.po~ incus-*.tar.xz .vagrant *~ tags # Potential binaries cmd/fuidshift/fuidshift cmd/incus/incus cmd/lxc-to-incus/lxc-to-incus cmd/lxd-to-incus/lxd-to-incus cmd/incus-agent/incus-agent cmd/incus-benchmark/incus-benchmark cmd/incus-migrate/incus-migrate cmd/incus-user/incus-user test/dev_incus-client/dev_incus-client test/syscall/sysinfo/sysinfo test/mini-oidc/mini-oidc test/mini-oidc/user.data test/tls2jwt/tls2jwt # Sphinx doc/html/ doc/reference/manpages/**/*.md doc/.sphinx/deps/ doc/.sphinx/.doctrees/ doc/.sphinx/themes/ doc/.sphinx/venv/ doc/.sphinx/warnings.txt doc/.sphinx/.wordlist.dic doc/.sphinx/_static/swagger-ui doc/.sphinx/_static/download doc/__pycache__ # For Atom ctags .tags .tags1 # For JetBrains IDEs .idea incus-6.0.4/.golangci.yml000066400000000000000000000040521477363751000152440ustar00rootroot00000000000000version: "2" linters: enable: - godot - misspell - revive - whitespace settings: errcheck: exclude-functions: - (io.ReadCloser).Close - (io.WriteCloser).Close - (io.ReadWriteCloser).Close - (*os.File).Close - (*github.com/gorilla/websocket.Conn).Close - (*github.com/mdlayher/vsock.Listener).Close - os.Remove - (*compress/gzip.Writer).Close revive: rules: - name: exported arguments: - checkPrivateReceivers - disableStutteringCheck - name: import-shadowing - name: unchecked-type-assertion - name: var-naming arguments: - [] - [] - - upperCaseConst: true - name: early-return - name: redundant-import-alias - name: redefines-builtin-id - name: struct-tag - name: receiver-naming - name: deep-exit - name: defer - name: bool-literal-in-expr - name: comment-spacings - name: use-any - name: bare-return - name: empty-block - name: range-val-address - name: range-val-in-closure - name: var-declaration - name: useless-break - name: error-naming - name: indent-error-flow - name: datarace - name: modifies-value-receiver - name: empty-lines - name: duplicated-imports - name: error-return exclusions: generated: lax rules: - linters: - revive source: '^//generate-database:mapper ' - linters: - staticcheck text: "ST1005:" paths: - third_party$ - builtin$ - examples$ formatters: enable: - gci - gofumpt - goimports settings: gci: sections: - standard - default - prefix(github.com/lxc/incus) goimports: local-prefixes: - github.com/lxc/incus exclusions: generated: lax paths: - third_party$ - builtin$ - examples$ incus-6.0.4/AUTHORS000066400000000000000000000003631477363751000137310ustar00rootroot00000000000000Unless mentioned otherwise in a specific file's header, all code in this project is released under the Apache 2.0 license. The list of authors and contributors can be retrieved from the git commit history and in some cases, the file headers. incus-6.0.4/CODE_OF_CONDUCT.md000066400000000000000000000064361477363751000154670ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at coc@linuxcontainers.org. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see incus-6.0.4/CONTRIBUTING.md000066400000000000000000000107321477363751000151130ustar00rootroot00000000000000# Contributing The Incus team appreciates contributions to the project, through pull requests, issues on the [GitHub repository](https://github.com/lxc/incus/issues), or discussions or questions on the [forum](https://discuss.linuxcontainers.org). Check the following guidelines before contributing to the project. ## Code of Conduct When contributing, you must adhere to the Code of Conduct, which is available at: [`https://github.com/lxc/incus/blob/main/CODE_OF_CONDUCT.md`](https://github.com/lxc/incus/blob/main/CODE_OF_CONDUCT.md) ## License and copyright By default, any contribution to this project is made under the Apache 2.0 license. The author of a change remains the copyright holder of their code (no copyright assignment). ## Pull requests Changes to this project should be proposed as pull requests on GitHub at: [`https://github.com/lxc/incus`](https://github.com/lxc/incus) Proposed changes will then go through review there and once approved, be merged in the main branch. ### Commit structure Separate commits should be used for: - API extension (`api: Add XYZ extension`, contains `doc/api-extensions.md` and `internal/version/api.go`) - Documentation (`doc: Update XYZ` for files in `doc/`) - API structure (`shared/api: Add XYZ` for changes to `shared/api/`) - Go client package (`client: Add XYZ` for changes to `client/`) - CLI (`cmd/: Change XYZ` for changes to `cmd/`) - Incus daemon (`incus/: Add support for XYZ` for changes to `incus/`) - Tests (`tests: Add test for XYZ` for changes to `tests/`) The same kind of pattern extends to the other tools in the Incus code tree and depending on complexity, things may be split into even smaller chunks. When updating strings in the CLI tool (`cmd/`), you may need a commit to update the templates: make i18n git commit -a -s -m "i18n: Update translation templates" po/ When updating API (`shared/api`), you may need a commit to update the swagger YAML: make update-api git commit -s -m "doc/rest-api: Refresh swagger YAML" doc/rest-api.yaml This structure makes it easier for contributions to be reviewed and also greatly simplifies the process of back-porting fixes to stable branches. ### Developer Certificate of Origin To improve tracking of contributions to this project we use the DCO 1.1 and use a "sign-off" procedure for all changes going into the branch. The sign-off is a simple line at the end of the explanation for the commit which certifies that you wrote it or otherwise have the right to pass it on as an open-source contribution. ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` An example of a valid sign-off line is: ``` Signed-off-by: Random J Developer ``` Use a known identity and a valid e-mail address. Sorry, no anonymous contributions are allowed. We also require each commit be individually signed-off by their author, even when part of a larger set. You may find `git commit -s` useful. ## More information For more information, see [Contributing](https://linuxcontainers.org/incus/docs/main/contributing/) in the documentation. incus-6.0.4/COPYING000066400000000000000000000261361477363751000137220ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. incus-6.0.4/Makefile000066400000000000000000000300671477363751000143250ustar00rootroot00000000000000GO ?= go DOMAIN=incus POFILES=$(wildcard po/*.po) MOFILES=$(patsubst %.po,%.mo,$(POFILES)) LINGUAS=$(basename $(POFILES)) POTFILE=po/$(DOMAIN).pot VERSION=$(or ${CUSTOM_VERSION},$(shell grep "var Version" internal/version/flex.go | cut -d'"' -f2)) ARCHIVE=incus-$(VERSION).tar HASH := \# TAG_SQLITE3=$(shell printf "$(HASH)include \nvoid main(){cowsql_node_id n = 1;}" | $(CC) ${CGO_CFLAGS} -o /dev/null -xc - >/dev/null 2>&1 && echo "libsqlite3") GOPATH ?= $(shell $(GO) env GOPATH) CGO_LDFLAGS_ALLOW ?= (-Wl,-wrap,pthread_create)|(-Wl,-z,now) SPHINXENV=doc/.sphinx/venv/bin/activate SPHINXPIPPATH=doc/.sphinx/venv/bin/pip OVN_MINVER=22.03.0 OVS_MINVER=2.15.0 ifneq "$(wildcard vendor)" "" RAFT_PATH=$(CURDIR)/vendor/raft COWSQL_PATH=$(CURDIR)/vendor/cowsql else RAFT_PATH=$(GOPATH)/deps/raft COWSQL_PATH=$(GOPATH)/deps/cowsql endif # raft .PHONY: default default: build .PHONY: build build: ifeq "$(TAG_SQLITE3)" "" @echo "Missing cowsql, run \"make deps\" to setup." exit 1 endif CC="$(CC)" CGO_LDFLAGS_ALLOW="$(CGO_LDFLAGS_ALLOW)" $(GO) install -v -tags "$(TAG_SQLITE3)" $(DEBUG) ./... CGO_ENABLED=0 $(GO) install -v -tags netgo ./cmd/incus-migrate CGO_ENABLED=0 $(GO) install -v -tags agent,netgo ./cmd/incus-agent @echo "Incus built successfully" .PHONY: client client: $(GO) install -v -tags "$(TAG_SQLITE3)" $(DEBUG) ./cmd/incus @echo "Incus client built successfully" .PHONY: incus-agent incus-agent: CGO_ENABLED=0 $(GO) install -v -tags agent,netgo ./cmd/incus-agent @echo "Incus agent built successfully" .PHONY: incus-migrate incus-migrate: CGO_ENABLED=0 $(GO) install -v -tags netgo ./cmd/incus-migrate @echo "Incus migration tool built successfully" .PHONY: deps deps: @if [ ! -e "$(RAFT_PATH)" ]; then \ git clone --depth=1 "https://github.com/cowsql/raft" "$(RAFT_PATH)"; \ elif [ -e "$(RAFT_PATH)/.git" ]; then \ cd "$(RAFT_PATH)"; git pull; \ fi cd "$(RAFT_PATH)" && \ autoreconf -i && \ ./configure && \ make # cowsql @if [ ! -e "$(COWSQL_PATH)" ]; then \ git clone --depth=1 "https://github.com/cowsql/cowsql" "$(COWSQL_PATH)"; \ elif [ -e "$(COWSQL_PATH)/.git" ]; then \ cd "$(COWSQL_PATH)"; git pull; \ fi cd "$(COWSQL_PATH)" && \ autoreconf -i && \ PKG_CONFIG_PATH="$(RAFT_PATH)" ./configure && \ make CFLAGS="-I$(RAFT_PATH)/include/" LDFLAGS="-L$(RAFT_PATH)/.libs/" # environment @echo "" @echo "Please set the following in your environment (possibly ~/.bashrc)" @echo "export CGO_CFLAGS=\"-I$(RAFT_PATH)/include/ -I$(COWSQL_PATH)/include/\"" @echo "export CGO_LDFLAGS=\"-L$(RAFT_PATH)/.libs -L$(COWSQL_PATH)/.libs/\"" @echo "export LD_LIBRARY_PATH=\"$(RAFT_PATH)/.libs/:$(COWSQL_PATH)/.libs/\"" @echo "export CGO_LDFLAGS_ALLOW=\"(-Wl,-wrap,pthread_create)|(-Wl,-z,now)\"" .PHONY: update-gomod update-gomod: ifneq "$(INCUS_OFFLINE)" "" @echo "The update-gomod target cannot be run in offline mode." exit 1 endif $(GO) get -t -v -u ./... $(GO) mod tidy --go=1.23.7 $(GO) get toolchain@none @echo "Dependencies updated" .PHONY: update-ovsdb update-ovsdb: go install github.com/ovn-org/libovsdb/cmd/modelgen@main rm -Rf internal/server/network/ovs/schema mkdir internal/server/network/ovs/schema curl -s https://raw.githubusercontent.com/openvswitch/ovs/v$(OVS_MINVER)/vswitchd/vswitch.ovsschema -o internal/server/network/ovs/schema/ovs.json modelgen -o internal/server/network/ovs/schema/ovs internal/server/network/ovs/schema/ovs.json rm internal/server/network/ovs/schema/*.json rm -Rf internal/server/network/ovn/schema mkdir internal/server/network/ovn/schema curl -s https://raw.githubusercontent.com/ovn-org/ovn/v$(OVN_MINVER)/ovn-nb.ovsschema -o internal/server/network/ovn/schema/ovn-nb.json curl -s https://raw.githubusercontent.com/ovn-org/ovn/v$(OVN_MINVER)/ovn-sb.ovsschema -o internal/server/network/ovn/schema/ovn-sb.json curl -s https://raw.githubusercontent.com/ovn-org/ovn/v$(OVN_MINVER)/ovn-ic-nb.ovsschema -o internal/server/network/ovn/schema/ovn-ic-nb.json curl -s https://raw.githubusercontent.com/ovn-org/ovn/v$(OVN_MINVER)/ovn-ic-sb.ovsschema -o internal/server/network/ovn/schema/ovn-ic-sb.json modelgen -o internal/server/network/ovn/schema/ovn-nb internal/server/network/ovn/schema/ovn-nb.json modelgen -o internal/server/network/ovn/schema/ovn-sb internal/server/network/ovn/schema/ovn-sb.json modelgen -o internal/server/network/ovn/schema/ovn-ic-nb internal/server/network/ovn/schema/ovn-ic-nb.json modelgen -o internal/server/network/ovn/schema/ovn-ic-sb internal/server/network/ovn/schema/ovn-ic-sb.json rm internal/server/network/ovn/schema/*.json .PHONY: update-protobuf update-protobuf: protoc --go_out=. ./internal/migration/migrate.proto .PHONY: update-schema update-schema: cd cmd/generate-database && $(GO) build -o $(GOPATH)/bin/generate-database -tags "$(TAG_SQLITE3)" $(DEBUG) && cd - $(GO) generate ./... gofumpt -w ./internal/server/db/ goimports -w ./internal/server/db/ @echo "Code generation completed" .PHONY: update-api update-api: ifeq "$(INCUS_OFFLINE)" "" (cd / ; $(GO) install -v -x github.com/go-swagger/go-swagger/cmd/swagger@latest) endif swagger generate spec -o doc/rest-api.yaml -w ./cmd/incusd -m .PHONY: update-metadata update-metadata: build @echo "Generating golang documentation metadata" cd cmd/generate-config && CGO_ENABLED=0 $(GO) build -o $(GOPATH)/bin/generate-config $(GOPATH)/bin/generate-config . --json ./internal/server/metadata/configuration.json --txt ./doc/config_options.txt .PHONY: doc-setup doc-setup: client @echo "Setting up documentation build environment" python3 -m venv doc/.sphinx/venv . $(SPHINXENV) ; pip install --require-virtualenv --upgrade -r doc/.sphinx/requirements.txt --log doc/.sphinx/venv/pip_install.log @test ! -f doc/.sphinx/venv/pip_list.txt || \ mv doc/.sphinx/venv/pip_list.txt doc/.sphinx/venv/pip_list.txt.bak $(SPHINXPIPPATH) list --local --format=freeze > doc/.sphinx/venv/pip_list.txt find doc/reference/manpages/ -name "*.md" -type f -delete rm -Rf doc/html rm -Rf doc/.sphinx/.doctrees .PHONY: doc doc: doc-setup doc-incremental .PHONY: doc-incremental doc-incremental: @echo "Build the documentation" . $(SPHINXENV) ; sphinx-build -c doc/ -b dirhtml doc/ doc/html/ -d doc/.sphinx/.doctrees -w doc/.sphinx/warnings.txt .PHONY: doc-serve doc-serve: cd doc/html; python3 -m http.server 8001 .PHONY: doc-spellcheck doc-spellcheck: doc . $(SPHINXENV) ; python3 -m pyspelling -c doc/.sphinx/spellingcheck.yaml .PHONY: doc-linkcheck doc-linkcheck: doc-setup . $(SPHINXENV) ; LOCAL_SPHINX_BUILD=True sphinx-build -c doc/ -b linkcheck doc/ doc/html/ -d doc/.sphinx/.doctrees .PHONY: doc-lint doc-lint: doc/.sphinx/.markdownlint/doc-lint.sh .PHONY: woke-install woke-install: @type woke >/dev/null 2>&1 || \ { echo "Installing \"woke\" snap... \n"; sudo snap install woke; } .PHONY: doc-woke doc-woke: woke-install woke *.md **/*.md -c https://github.com/canonical/Inclusive-naming/raw/main/config.yml .PHONY: debug debug: ifeq "$(TAG_SQLITE3)" "" @echo "Missing custom libsqlite3, run \"make deps\" to setup." exit 1 endif CC="$(CC)" CGO_LDFLAGS_ALLOW="$(CGO_LDFLAGS_ALLOW)" $(GO) install -v -tags "$(TAG_SQLITE3) logdebug" $(DEBUG) ./... CGO_ENABLED=0 $(GO) install -v -tags "netgo,logdebug" ./cmd/incus-migrate CGO_ENABLED=0 $(GO) install -v -tags "agent,netgo,logdebug" ./cmd/incus-agent @echo "Incus built successfully" .PHONY: nocache nocache: ifeq "$(TAG_SQLITE3)" "" @echo "Missing custom libsqlite3, run \"make deps\" to setup." exit 1 endif CC="$(CC)" CGO_LDFLAGS_ALLOW="$(CGO_LDFLAGS_ALLOW)" $(GO) install -a -v -tags "$(TAG_SQLITE3)" $(DEBUG) ./... CGO_ENABLED=0 $(GO) install -a -v -tags netgo ./cmd/incus-migrate CGO_ENABLED=0 $(GO) install -a -v -tags agent,netgo ./cmd/incus-agent @echo "Incus built successfully" race: ifeq "$(TAG_SQLITE3)" "" @echo "Missing custom libsqlite3, run \"make deps\" to setup." exit 1 endif CC="$(CC)" CGO_LDFLAGS_ALLOW="$(CGO_LDFLAGS_ALLOW)" $(GO) install -race -v -tags "$(TAG_SQLITE3)" $(DEBUG) ./... CGO_ENABLED=0 $(GO) install -v -tags netgo ./cmd/incus-migrate CGO_ENABLED=0 $(GO) install -v -tags agent,netgo ./cmd/incus-agent @echo "Incus built successfully" .PHONY: check check: default ifeq "$(INCUS_OFFLINE)" "" (cd / ; $(GO) install -v -x github.com/rogpeppe/godeps@latest) (cd / ; $(GO) install -v -x github.com/tsenart/deadcode@latest) (cd / ; $(GO) install -v -x golang.org/x/lint/golint@latest) endif CGO_LDFLAGS_ALLOW="$(CGO_LDFLAGS_ALLOW)" $(GO) test -v -tags "$(TAG_SQLITE3)" $(DEBUG) ./... cd test && ./main.sh .PHONY: dist dist: doc # Cleanup rm -Rf $(ARCHIVE).xz # Create build dir $(eval TMP := $(shell mktemp -d)) git archive --prefix=incus-$(VERSION)/ HEAD | tar -x -C $(TMP) git show-ref HEAD | cut -d' ' -f1 > $(TMP)/incus-$(VERSION)/.gitref # Download dependencies (cd $(TMP)/incus-$(VERSION) ; $(GO) mod vendor) # Download the cowsql libraries git clone --depth=1 https://github.com/cowsql/cowsql $(TMP)/incus-$(VERSION)/vendor/cowsql (cd $(TMP)/incus-$(VERSION)/vendor/cowsql ; git show-ref HEAD | cut -d' ' -f1 > .gitref) git clone --depth=1 https://github.com/cowsql/raft $(TMP)/incus-$(VERSION)/vendor/raft (cd $(TMP)/incus-$(VERSION)/vendor/raft ; git show-ref HEAD | cut -d' ' -f1 > .gitref) # Copy doc output cp -r doc/html $(TMP)/incus-$(VERSION)/doc/html/ # Assemble tarball tar --exclude-vcs -C $(TMP) -Jcf $(ARCHIVE).xz incus-$(VERSION)/ # Cleanup rm -Rf $(TMP) .PHONY: i18n i18n: update-pot update-po po/%.mo: po/%.po msgfmt --statistics -o $@ $< po/%.po: po/$(DOMAIN).pot msgmerge -U po/$*.po po/$(DOMAIN).pot .PHONY: update-po update-po: set -eu; \ for lang in $(LINGUAS); do\ msgmerge --backup=none -U $$lang.po po/$(DOMAIN).pot; \ done .PHONY: update-pot update-pot: ifeq "$(INCUS_OFFLINE)" "" (cd / ; $(GO) install -v -x github.com/snapcore/snapd/i18n/xgettext-go@2.57.1) endif xgettext-go -o po/$(DOMAIN).pot --add-comments-tag=TRANSLATORS: --sort-output --package-name=$(DOMAIN) --msgid-bugs-address=lxc-devel@lists.linuxcontainers.org --keyword=i18n.G --keyword-plural=i18n.NG cmd/incus/*.go shared/cliconfig/*.go .PHONY: build-mo build-mo: $(MOFILES) .PHONY: static-analysis static-analysis: ifeq ($(shell command -v go-licenses),) (cd / ; $(GO) install -v -x github.com/google/go-licenses@latest) endif ifeq ($(shell command -v golangci-lint),) curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$($(GO) env GOPATH)/bin endif ifeq ($(shell command -v shellcheck),) echo "Please install shellcheck" exit 1 else endif ifeq ($(shell command -v flake8),) echo "Please install flake8" exit 1 endif ifeq ($(shell command -v codespell),) echo "Please install codespell" exit 1 endif flake8 test/deps/import-busybox shellcheck --shell sh test/*.sh test/includes/*.sh test/suites/*.sh test/backends/*.sh test/lint/*.sh shellcheck test/extras/*.sh run-parts $(shell run-parts -V >/dev/null 2>&1 && echo -n "--verbose --exit-on-error --regex '.sh'") test/lint .PHONY: staticcheck staticcheck: ifeq ($(shell command -v staticcheck),) (cd / ; $(GO) install -v -x honnef.co/go/tools/cmd/staticcheck@latest) endif # To get advance notice of deprecated function usage, consider running: # sed -i 's/^go 1\.[0-9]\+$/go 1.18/' go.mod # before 'make staticcheck'. # Run staticcheck against all the dirs containing Go files. staticcheck $$(git ls-files *.go | sed 's|^|./|; s|/[^/]\+\.go$$||' | sort -u) .PHONY: tags tags: */*.go ifeq ($(shell command -v gotags),) (cd / ; $(GO) install -v -x github.com/jstemmer/gotags@latest) endif find . -type f -name '*.go' | gotags -L - -f tags # OpenFGA Syntax Transformer: https://github.com/openfga/syntax-transformer .PHONY: update-openfga update-openfga: ifeq ($(shell command -v fga),) (cd / ; $(GO) install -v -x github.com/openfga/cli/cmd/fga@latest) endif @printf 'package auth\n\n// Code generated by Makefile; DO NOT EDIT.\n\nvar authModel = `%s`\n' '$(shell fga model transform --file=./internal/server/auth/driver_openfga_model.openfga | jq -c)' > ./internal/server/auth/driver_openfga_model.go .PHONY: unit-test unit-test: sudo --preserve-env=CGO_CFLAGS,CGO_LDFLAGS,CGO_LDFLAGS_ALLOW,LD_LIBRARY_PATH LD_LIBRARY_PATH=${LD_LIBRARY_PATH} env "PATH=${PATH}" $(GO) test ./... incus-6.0.4/README.md000066400000000000000000000125001477363751000141340ustar00rootroot00000000000000# Incus Incus is a modern, secure and powerful system container and virtual machine manager. It provides a unified experience for running and managing full Linux systems inside containers or virtual machines. Incus supports images for a large number of Linux distributions (official Ubuntu images and images provided by the community) and is built around a very powerful, yet pretty simple, REST API. Incus scales from one instance on a single machine to a cluster in a full data center rack, making it suitable for running workloads both for development and in production. Incus allows you to easily set up a system that feels like a small private cloud. You can run any type of workload in an efficient way while keeping your resources optimized. You should consider using Incus if you want to containerize different environments or run virtual machines, or in general run and manage your infrastructure in a cost-effective way. You can try Incus online at: [`https://linuxcontainers.org/incus/try-it/`](https://linuxcontainers.org/incus/try-it/) ## Project history Incus, which is named after the [Cumulonimbus incus](https://en.wikipedia.org/wiki/Cumulonimbus_incus) or anvil cloud started as a community fork of Canonical's LXD following [Canonical's takeover](https://linuxcontainers.org/lxd/) of the LXD project from the Linux Containers community. The project was then adopted by the Linux Containers community, taking back the spot left empty by LXD's departure. Incus is a true open source community project, free of any [CLA](https://en.wikipedia.org/wiki/Contributor_License_Agreement) and remains released under the [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0). It's maintained by the same team of developers that first created LXD. LXD users wishing to migrate to Incus can easily do so through a migration tool called [`lxd-to-incus`](https://linuxcontainers.org/incus/docs/main/howto/server_migrate_lxd/). ## Get started See [Getting started](https://linuxcontainers.org/incus/docs/main/tutorial/first_steps/) in the Incus documentation for installation instructions and first steps. - Release announcements: [`https://discuss.linuxcontainers.org/c/news/`](https://discuss.linuxcontainers.org/c/news/) - Release tarballs: [`https://github.com/lxc/incus/releases/`](https://github.com/lxc/incus/releases/) - Documentation: [`https://linuxcontainers.org/incus/docs/main/`](https://linuxcontainers.org/incus/docs/main/) ## Status Type | Service | Status --- | --- | --- Tests | GitHub | [![Build Status](https://github.com/lxc/incus/actions/workflows/tests.yml/badge.svg?branch=main)](https://github.com/lxc/incus/actions?query=event%3Apush+branch%3Amain) Go documentation | Godoc | [![GoDoc](https://godoc.org/github.com/lxc/incus/v6/client?status.svg)](https://godoc.org/github.com/lxc/incus/v6/client) Static analysis | GoReport | [![Go Report Card](https://goreportcard.com/badge/github.com/lxc/incus)](https://goreportcard.com/report/github.com/lxc/incus) Translations | Weblate | [![Translation status](https://hosted.weblate.org/widget/incus/svg-badge.svg)](https://hosted.weblate.org/projects/incus/) ## Security Consider the following aspects to ensure that your Incus installation is secure: - Keep your operating system up-to-date and install all available security patches. - Use only supported Incus versions. - Restrict access to the Incus daemon and the remote API. - Do not use privileged containers unless required. If you use privileged containers, put appropriate security measures in place. See the [LXC security page](https://linuxcontainers.org/lxc/security/) for more information. - Configure your network interfaces to be secure. See [Security](https://github.com/lxc/incus/blob/main/doc/explanation/security.md) for detailed information. **IMPORTANT:** Local access to Incus through the Unix socket always grants full access to Incus. This includes the ability to attach file system paths or devices to any instance as well as tweak the security features on any instance. Therefore, you should only give such access to users who you'd trust with root access to your system. ## Support and community The following channels are available for you to interact with the Incus community. ### Bug reports You can file bug reports and feature requests at: [`https://github.com/lxc/incus/issues/new`](https://github.com/lxc/incus/issues/new) ### Community support Community support is handling at: [`https://discuss.linuxcontainers.org`](https://discuss.linuxcontainers.org) ### Commercial support Commercial support is currently available from [Zabbly](https://zabbly.com) for users of their [Debian or Ubuntu packages](https://github.com/zabbly/incus). ## Documentation The official documentation is available at: [`https://github.com/lxc/incus/tree/main/doc`](https://github.com/lxc/incus/tree/main/doc) ## Contributing Fixes and new features are greatly appreciated. Make sure to read our [contributing guidelines](CONTRIBUTING.md) first! incus-6.0.4/SECURITY.md000066400000000000000000000022541477363751000144530ustar00rootroot00000000000000# Security policy ## Supported versions Incus has two types of releases: - Feature releases - LTS releases For feature releases, only the latest one is supported, and we usually don't do point releases. Instead, users are expected to wait until the next release. For LTS releases, we do periodic bugfix releases that include an accumulation of bugfixes from the feature releases. Such bugfix releases do not include new features. ## What qualifies as a security issue We don't consider privileged containers to be root safe, so any exploit allowing someone to escape them will not qualify as a security issue. This doesn't mean that we're not interested in preventing such escapes, but we simply do not consider such containers to be root safe. Unprivileged container escapes are certainly something we'd consider a security issue, especially if somehow facilitated by Incus. ## Reporting security issues Security issues can be reported by e-mail to security@linuxcontainers.org. Alternatively security issues can also be reported through Github at: https://github.com/lxc/incus/security/advisories/new incus-6.0.4/client/000077500000000000000000000000001477363751000141355ustar00rootroot00000000000000incus-6.0.4/client/connection.go000066400000000000000000000277231477363751000166360ustar00rootroot00000000000000package incus import ( "context" "crypto/sha256" "fmt" "net/http" "net/url" "os" "path/filepath" "slices" "strings" "time" "github.com/gorilla/websocket" "github.com/zitadel/oidc/v3/pkg/oidc" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/logger" "github.com/lxc/incus/v6/shared/simplestreams" "github.com/lxc/incus/v6/shared/util" ) // ConnectionArgs represents a set of common connection properties. type ConnectionArgs struct { // TLS certificate of the remote server. If not specified, the system CA is used. TLSServerCert string // TLS certificate to use for client authentication. TLSClientCert string // TLS key to use for client authentication. TLSClientKey string // TLS CA to validate against when in PKI mode. TLSCA string // User agent string UserAgent string // Authentication type AuthType string // Custom proxy Proxy func(*http.Request) (*url.URL, error) // Custom HTTP Client (used as base for the connection) HTTPClient *http.Client // TransportWrapper wraps the *http.Transport set by Incus TransportWrapper func(*http.Transport) HTTPTransporter // Controls whether a client verifies the server's certificate chain and host name. InsecureSkipVerify bool // Cookie jar CookieJar http.CookieJar // OpenID Connect tokens OIDCTokens *oidc.Tokens[*oidc.IDTokenClaims] // Skip automatic GetServer request upon connection SkipGetServer bool // Caching support for image servers CachePath string CacheExpiry time.Duration } // ConnectIncus lets you connect to a remote Incus daemon over HTTPs. // // A client certificate (TLSClientCert) and key (TLSClientKey) must be provided. // // If connecting to an Incus daemon running in PKI mode, the PKI CA (TLSCA) must also be provided. // // Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). func ConnectIncus(url string, args *ConnectionArgs) (InstanceServer, error) { return ConnectIncusWithContext(context.Background(), url, args) } // ConnectIncusWithContext lets you connect to a remote Incus daemon over HTTPs with context.Context. // // A client certificate (TLSClientCert) and key (TLSClientKey) must be provided. // // If connecting to an Incus daemon running in PKI mode, the PKI CA (TLSCA) must also be provided. // // Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). func ConnectIncusWithContext(ctx context.Context, url string, args *ConnectionArgs) (InstanceServer, error) { // Cleanup URL url = strings.TrimSuffix(url, "/") logger.Debug("Connecting to a remote Incus over HTTPS", logger.Ctx{"url": url}) return httpsIncus(ctx, url, args) } // ConnectIncusHTTP lets you connect to a VM agent over a VM socket. func ConnectIncusHTTP(args *ConnectionArgs, client *http.Client) (InstanceServer, error) { return ConnectIncusHTTPWithContext(context.Background(), args, client) } // ConnectIncusHTTPWithContext lets you connect to a VM agent over a VM socket with context.Context. func ConnectIncusHTTPWithContext(ctx context.Context, args *ConnectionArgs, client *http.Client) (InstanceServer, error) { logger.Debug("Connecting to a VM agent over a VM socket") // Use empty args if not specified if args == nil { args = &ConnectionArgs{} } httpBaseURL, err := url.Parse("https://custom.socket") if err != nil { return nil, err } ctxConnected, ctxConnectedCancel := context.WithCancel(context.Background()) // Initialize the client struct server := ProtocolIncus{ ctx: ctx, httpBaseURL: *httpBaseURL, httpProtocol: "custom", httpUserAgent: args.UserAgent, ctxConnected: ctxConnected, ctxConnectedCancel: ctxConnectedCancel, eventConns: make(map[string]*websocket.Conn), eventListeners: make(map[string][]*EventListener), } // Setup the HTTP client server.http = client // Test the connection and seed the server information if !args.SkipGetServer { serverStatus, _, err := server.GetServer() if err != nil { return nil, err } // Record the server certificate server.httpCertificate = serverStatus.Environment.Certificate } return &server, nil } // ConnectIncusUnix lets you connect to a remote Incus daemon over a local unix socket. // // If the path argument is empty, then $INCUS_SOCKET will be used, if // unset $INCUS_DIR/unix.socket will be used and if that one isn't set // either, then the path will default to /var/lib/incus/unix.socket or /run/incus/unix.socket. func ConnectIncusUnix(path string, args *ConnectionArgs) (InstanceServer, error) { return ConnectIncusUnixWithContext(context.Background(), path, args) } // ConnectIncusUnixWithContext lets you connect to a remote Incus daemon over a local unix socket with context.Context. // // If the path argument is empty, then $INCUS_SOCKET will be used, if // unset $INCUS_DIR/unix.socket will be used and if that one isn't set // either, then the path will default to /var/lib/incus/unix.socket or /run/incus/unix.socket. func ConnectIncusUnixWithContext(ctx context.Context, path string, args *ConnectionArgs) (InstanceServer, error) { logger.Debug("Connecting to a local Incus over a Unix socket") // Use empty args if not specified if args == nil { args = &ConnectionArgs{} } httpBaseURL, err := url.Parse("http://unix.socket") if err != nil { return nil, err } ctxConnected, ctxConnectedCancel := context.WithCancel(context.Background()) // Determine the socket path var projectName string if path == "" { path = os.Getenv("INCUS_SOCKET") if path == "" { incusDir := os.Getenv("INCUS_DIR") if incusDir == "" { _, err := os.Lstat("/run/incus/unix.socket") if err == nil { incusDir = "/run/incus" } else { incusDir = "/var/lib/incus" } } path = filepath.Join(incusDir, "unix.socket") userPath := filepath.Join(incusDir, "unix.socket.user") if !util.PathIsWritable(path) && util.PathIsWritable(userPath) { // Handle the use of incus-user. path = userPath // When using incus-user, the project list is typically restricted. // So let's try to be smart about the project we're using. projectName = fmt.Sprintf("user-%d", os.Geteuid()) } } } // Initialize the client struct server := ProtocolIncus{ ctx: ctx, httpBaseURL: *httpBaseURL, httpUnixPath: path, httpProtocol: "unix", httpUserAgent: args.UserAgent, ctxConnected: ctxConnected, ctxConnectedCancel: ctxConnectedCancel, eventConns: make(map[string]*websocket.Conn), eventListeners: make(map[string][]*EventListener), project: projectName, } // Setup the HTTP client httpClient, err := unixHTTPClient(args, path) if err != nil { return nil, err } server.http = httpClient // Test the connection and seed the server information if !args.SkipGetServer { serverStatus, _, err := server.GetServer() if err != nil { return nil, err } // Record the server certificate server.httpCertificate = serverStatus.Environment.Certificate } return &server, nil } // ConnectPublicIncus lets you connect to a remote public Incus daemon over HTTPs. // // Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). func ConnectPublicIncus(url string, args *ConnectionArgs) (ImageServer, error) { return ConnectPublicIncusWithContext(context.Background(), url, args) } // ConnectPublicIncusWithContext lets you connect to a remote public Incus daemon over HTTPs with context.Context. // // Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). func ConnectPublicIncusWithContext(ctx context.Context, url string, args *ConnectionArgs) (ImageServer, error) { logger.Debug("Connecting to a remote public Incus over HTTPS") // Cleanup URL url = strings.TrimSuffix(url, "/") return httpsIncus(ctx, url, args) } // ConnectSimpleStreams lets you connect to a remote SimpleStreams image server over HTTPs. // // Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). func ConnectSimpleStreams(url string, args *ConnectionArgs) (ImageServer, error) { logger.Debug("Connecting to a remote simplestreams server", logger.Ctx{"URL": url}) // Cleanup URL url = strings.TrimSuffix(url, "/") // Use empty args if not specified if args == nil { args = &ConnectionArgs{} } // Initialize the client struct server := ProtocolSimpleStreams{ httpHost: url, httpUserAgent: args.UserAgent, httpCertificate: args.TLSServerCert, } // Setup the HTTP client httpClient, err := tlsHTTPClient(args.HTTPClient, args.TLSClientCert, args.TLSClientKey, args.TLSCA, args.TLSServerCert, args.InsecureSkipVerify, args.Proxy, args.TransportWrapper) if err != nil { return nil, err } server.http = httpClient // Get simplestreams client ssClient := simplestreams.NewClient(url, *httpClient, args.UserAgent) server.ssClient = ssClient // Setup the cache if args.CachePath != "" { if !util.PathExists(args.CachePath) { return nil, fmt.Errorf("Cache directory %q doesn't exist", args.CachePath) } hashedURL := fmt.Sprintf("%x", sha256.Sum256([]byte(url))) cachePath := filepath.Join(args.CachePath, hashedURL) cacheExpiry := args.CacheExpiry if cacheExpiry == 0 { cacheExpiry = time.Hour } if !util.PathExists(cachePath) { err := os.Mkdir(cachePath, 0o755) if err != nil { return nil, err } } ssClient.SetCache(cachePath, cacheExpiry) } return &server, nil } // ConnectOCI lets you connect to a remote OCI image registry over HTTPs. // // Unless the remote server is trusted by the system CA, the remote certificate must be provided (TLSServerCert). func ConnectOCI(uri string, args *ConnectionArgs) (ImageServer, error) { logger.Debug("Connecting to a remote OCI server", logger.Ctx{"URL": uri}) // Cleanup URL uri = strings.TrimSuffix(uri, "/") // Use empty args if not specified if args == nil { args = &ConnectionArgs{} } // Initialize the client struct server := ProtocolOCI{ httpHost: uri, httpUserAgent: args.UserAgent, httpCertificate: args.TLSServerCert, cache: map[string]ociInfo{}, } // Setup the HTTP client httpClient, err := tlsHTTPClient(args.HTTPClient, args.TLSClientCert, args.TLSClientKey, args.TLSCA, args.TLSServerCert, args.InsecureSkipVerify, args.Proxy, args.TransportWrapper) if err != nil { return nil, err } server.http = httpClient return &server, nil } // Internal function called by ConnectIncus and ConnectPublicIncus. func httpsIncus(ctx context.Context, requestURL string, args *ConnectionArgs) (InstanceServer, error) { // Use empty args if not specified if args == nil { args = &ConnectionArgs{} } httpBaseURL, err := url.Parse(requestURL) if err != nil { return nil, err } ctxConnected, ctxConnectedCancel := context.WithCancel(context.Background()) // Initialize the client struct server := ProtocolIncus{ ctx: ctx, httpCertificate: args.TLSServerCert, httpBaseURL: *httpBaseURL, httpProtocol: "https", httpUserAgent: args.UserAgent, ctxConnected: ctxConnected, ctxConnectedCancel: ctxConnectedCancel, eventConns: make(map[string]*websocket.Conn), eventListeners: make(map[string][]*EventListener), } if slices.Contains([]string{api.AuthenticationMethodOIDC}, args.AuthType) { server.RequireAuthenticated(true) } // Setup the HTTP client httpClient, err := tlsHTTPClient(args.HTTPClient, args.TLSClientCert, args.TLSClientKey, args.TLSCA, args.TLSServerCert, args.InsecureSkipVerify, args.Proxy, args.TransportWrapper) if err != nil { return nil, err } if args.CookieJar != nil { httpClient.Jar = args.CookieJar } server.http = httpClient if args.AuthType == api.AuthenticationMethodOIDC { server.setupOIDCClient(args.OIDCTokens) } // Test the connection and seed the server information if !args.SkipGetServer { _, _, err := server.GetServer() if err != nil { return nil, err } } return &server, nil } incus-6.0.4/client/doc.go000066400000000000000000000067361477363751000152450ustar00rootroot00000000000000// Package incus implements a client for the Incus API // // # Overview // // This package lets you connect to Incus daemons or SimpleStream image // servers over a Unix socket or HTTPs. You can then interact with those // remote servers, creating instances, images, moving them around, ... // // The following examples make use of several imports: // // import ( // "github.com/lxc/incus/client" // "github.com/lxc/incus/shared/api" // "github.com/lxc/incus/shared/termios" // ) // // # Example - instance creation // // This creates a container on a local Incus daemon and then starts it. // // // Connect to Incus over the Unix socket // c, err := incus.ConnectIncusUnix("", nil) // if err != nil { // return err // } // // // Instance creation request // name := "my-container" // req := api.InstancesPost{ // Name: name, // Source: api.InstanceSource{ // Type: "image", // Alias: "my-image", # e.g. alpine/3.20 // Server: "https://images.linuxcontainers.org", // Protocol: "simplestreams", // }, // Type: "container" // } // // // Get Incus to create the instance (background operation) // op, err := c.CreateInstance(req) // if err != nil { // return err // } // // // Wait for the operation to complete // err = op.Wait() // if err != nil { // return err // } // // // Get Incus to start the instance (background operation) // reqState := api.InstanceStatePut{ // Action: "start", // Timeout: -1, // } // // op, err = c.UpdateInstanceState(name, reqState, "") // if err != nil { // return err // } // // // Wait for the operation to complete // err = op.Wait() // if err != nil { // return err // } // // # Example - command execution // // This executes an interactive bash terminal // // // Connect to Incus over the Unix socket // c, err := incus.ConnectIncusUnix("", nil) // if err != nil { // return err // } // // // Setup the exec request // req := api.InstanceExecPost{ // Command: []string{"bash"}, // WaitForWS: true, // Interactive: true, // Width: 80, // Height: 15, // } // // // Setup the exec arguments (fds) // args := incus.InstanceExecArgs{ // Stdin: os.Stdin, // Stdout: os.Stdout, // Stderr: os.Stderr, // } // // // Setup the terminal (set to raw mode) // if req.Interactive { // cfd := int(syscall.Stdin) // oldttystate, err := termios.MakeRaw(cfd) // if err != nil { // return err // } // // defer termios.Restore(cfd, oldttystate) // } // // // Get the current state // op, err := c.ExecInstance(name, req, &args) // if err != nil { // return err // } // // // Wait for it to complete // err = op.Wait() // if err != nil { // return err // } // // # Example - image copy // // This copies an image from a simplestreams server to a local Incus daemon // // // Connect to Incus over the Unix socket // c, err := incus.ConnectIncusUnix("", nil) // if err != nil { // return err // } // // // Connect to the remote SimpleStreams server // d, err = incus.ConnectSimpleStreams("https://images.linuxcontainers.org", nil) // if err != nil { // return err // } // // // Resolve the alias // alias, _, err := d.GetImageAlias("centos/7") // if err != nil { // return err // } // // // Get the image information // image, _, err := d.GetImage(alias.Target) // if err != nil { // return err // } // // // Ask Incus to copy the image from the remote server // op, err := d.CopyImage(*image, c, nil) // if err != nil { // return err // } // // // And wait for it to finish // err = op.Wait() // if err != nil { // return err // } package incus incus-6.0.4/client/events.go000066400000000000000000000054101477363751000157700ustar00rootroot00000000000000package incus import ( "context" "fmt" "sync" "github.com/lxc/incus/v6/shared/api" ) // The EventListener struct is used to interact with an Incus event stream. type EventListener struct { r *ProtocolIncus ctx context.Context ctxCancel context.CancelFunc err error // projectName stores which project this event listener is associated with (empty for all projects). projectName string targets []*EventTarget targetsLock sync.Mutex } // The EventTarget struct is returned to the caller of AddHandler and used in RemoveHandler. type EventTarget struct { function func(api.Event) types []string } // AddHandler adds a function to be called whenever an event is received. func (e *EventListener) AddHandler(types []string, function func(api.Event)) (*EventTarget, error) { if function == nil { return nil, fmt.Errorf("A valid function must be provided") } // Handle locking e.targetsLock.Lock() defer e.targetsLock.Unlock() // Create a new target target := EventTarget{ function: function, types: types, } // And add it to the targets e.targets = append(e.targets, &target) return &target, nil } // RemoveHandler removes a function to be called whenever an event is received. func (e *EventListener) RemoveHandler(target *EventTarget) error { if target == nil { return fmt.Errorf("A valid event target must be provided") } // Handle locking e.targetsLock.Lock() defer e.targetsLock.Unlock() // Locate and remove the function from the list for i, entry := range e.targets { if entry == target { copy(e.targets[i:], e.targets[i+1:]) e.targets[len(e.targets)-1] = nil e.targets = e.targets[:len(e.targets)-1] return nil } } return fmt.Errorf("Couldn't find this function and event types combination") } // Disconnect must be used once done listening for events. func (e *EventListener) Disconnect() { // Handle locking e.r.eventListenersLock.Lock() defer e.r.eventListenersLock.Unlock() if e.ctx.Err() != nil { return } // Locate and remove it from the global list for i, listener := range e.r.eventListeners[e.projectName] { if listener == e { copy(e.r.eventListeners[e.projectName][i:], e.r.eventListeners[e.projectName][i+1:]) e.r.eventListeners[e.projectName][len(e.r.eventListeners[e.projectName])-1] = nil e.r.eventListeners[e.projectName] = e.r.eventListeners[e.projectName][:len(e.r.eventListeners[e.projectName])-1] break } } // Turn off the handler e.err = nil e.ctxCancel() } // Wait blocks until the server disconnects the connection or Disconnect() is called. func (e *EventListener) Wait() error { <-e.ctx.Done() return e.err } // IsActive returns true if this listener is still connected, false otherwise. func (e *EventListener) IsActive() bool { return e.ctx.Err() == nil } incus-6.0.4/client/incus.go000066400000000000000000000372771477363751000156250ustar00rootroot00000000000000package incus import ( "bytes" "context" "encoding/json" "fmt" "io" "net/http" neturl "net/url" "slices" "strings" "sync" "time" "github.com/gorilla/websocket" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/logger" "github.com/lxc/incus/v6/shared/tcp" ) // ProtocolIncus represents an Incus API server. type ProtocolIncus struct { ctx context.Context server *api.Server ctxConnected context.Context ctxConnectedCancel context.CancelFunc // eventConns contains event listener connections associated to a project name (or empty for all projects). eventConns map[string]*websocket.Conn // eventConnsLock controls write access to the eventConns. eventConnsLock sync.Mutex // eventListeners is a slice of event listeners associated to a project name (or empty for all projects). eventListeners map[string][]*EventListener eventListenersLock sync.Mutex http *http.Client httpCertificate string httpBaseURL neturl.URL httpUnixPath string httpProtocol string httpUserAgent string requireAuthenticated bool clusterTarget string project string oidcClient *oidcClient } // Disconnect gets rid of any background goroutines. func (r *ProtocolIncus) Disconnect() { if r.ctxConnected.Err() != nil { r.ctxConnectedCancel() } } // GetConnectionInfo returns the basic connection information used to interact with the server. func (r *ProtocolIncus) GetConnectionInfo() (*ConnectionInfo, error) { info := ConnectionInfo{} info.Certificate = r.httpCertificate info.Protocol = "incus" info.URL = r.httpBaseURL.String() info.SocketPath = r.httpUnixPath info.Project = r.project if info.Project == "" { info.Project = api.ProjectDefaultName } info.Target = r.clusterTarget if info.Target == "" && r.server != nil { info.Target = r.server.Environment.ServerName } urls := []string{} if r.httpProtocol == "https" { urls = append(urls, r.httpBaseURL.String()) } if r.server != nil && len(r.server.Environment.Addresses) > 0 { for _, addr := range r.server.Environment.Addresses { if strings.HasPrefix(addr, ":") { continue } url := fmt.Sprintf("https://%s", addr) if !slices.Contains(urls, url) { urls = append(urls, url) } } } info.Addresses = urls return &info, nil } // isSameServer compares the calling ProtocolIncus object with the provided server object to check if they are the same server. // It verifies the equality based on their connection information (Protocol, Certificate, Project, and Target). func (r *ProtocolIncus) isSameServer(server Server) bool { // Short path checking if the two structs are identical. if r == server { return true } // Short path if either of the structs are nil. if r == nil || server == nil { return false } // When dealing with uninitialized servers, we can't safely compare. if r.server == nil { return false } // Get the connection info from both servers. srcInfo, err := r.GetConnectionInfo() if err != nil { return false } dstInfo, err := server.GetConnectionInfo() if err != nil { return false } // Check whether we're dealing with the same server. return srcInfo.Protocol == dstInfo.Protocol && srcInfo.Certificate == dstInfo.Certificate && srcInfo.Project == dstInfo.Project && srcInfo.Target == dstInfo.Target } // GetHTTPClient returns the http client used for the connection. This can be used to set custom http options. func (r *ProtocolIncus) GetHTTPClient() (*http.Client, error) { if r.http == nil { return nil, fmt.Errorf("HTTP client isn't set, bad connection") } return r.http, nil } // DoHTTP performs a Request, using OIDC authentication if set. func (r *ProtocolIncus) DoHTTP(req *http.Request) (*http.Response, error) { r.addClientHeaders(req) if r.oidcClient != nil { return r.oidcClient.do(req) } resp, err := r.http.Do(req) if resp != nil && resp.StatusCode == http.StatusUseProxy && req.GetBody != nil { // Reset the request body. body, err := req.GetBody() if err != nil { return nil, err } req.Body = body // Retry the request. return r.http.Do(req) } return resp, err } // DoWebsocket performs a websocket connection, using OIDC authentication if set. func (r *ProtocolIncus) DoWebsocket(dialer websocket.Dialer, uri string, req *http.Request) (*websocket.Conn, *http.Response, error) { r.addClientHeaders(req) if r.oidcClient != nil { return r.oidcClient.dial(dialer, uri, req) } return dialer.Dial(uri, req.Header) } // addClientHeaders sets headers from client settings. // User-Agent (if r.httpUserAgent is set). // X-Incus-authenticated (if r.requireAuthenticated is set). // OIDC Authorization header (if r.oidcClient is set). func (r *ProtocolIncus) addClientHeaders(req *http.Request) { if r.httpUserAgent != "" { req.Header.Set("User-Agent", r.httpUserAgent) } if r.requireAuthenticated { req.Header.Set("X-Incus-authenticated", "true") } if r.oidcClient != nil { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", r.oidcClient.getAccessToken())) } } // RequireAuthenticated sets whether we expect to be authenticated with the server. func (r *ProtocolIncus) RequireAuthenticated(authenticated bool) { r.requireAuthenticated = authenticated } // RawQuery allows directly querying the Incus API // // This should only be used by internal Incus tools. func (r *ProtocolIncus) RawQuery(method string, path string, data any, ETag string) (*api.Response, string, error) { // Generate the URL url := fmt.Sprintf("%s%s", r.httpBaseURL.String(), path) return r.rawQuery(method, url, data, ETag) } // RawWebsocket allows directly connection to Incus API websockets // // This should only be used by internal Incus tools. func (r *ProtocolIncus) RawWebsocket(path string) (*websocket.Conn, error) { return r.websocket(path) } // RawOperation allows direct querying of an Incus API endpoint returning // background operations. func (r *ProtocolIncus) RawOperation(method string, path string, data any, ETag string) (Operation, string, error) { return r.queryOperation(method, path, data, ETag) } // Internal functions. func incusParseResponse(resp *http.Response) (*api.Response, string, error) { // Get the ETag etag := resp.Header.Get("ETag") // Decode the response decoder := json.NewDecoder(resp.Body) response := api.Response{} err := decoder.Decode(&response) if err != nil { // Check the return value for a cleaner error if resp.StatusCode != http.StatusOK { return nil, "", fmt.Errorf("Failed to fetch %s: %s", resp.Request.URL.String(), resp.Status) } return nil, "", err } // Handle errors if response.Type == api.ErrorResponse { return &response, "", api.StatusErrorf(resp.StatusCode, response.Error) } return &response, etag, nil } // rawQuery is a method that sends an HTTP request to the Incus server with the provided method, URL, data, and ETag. // It processes the request based on the data's type and handles the HTTP response, returning parsed results or an error if it occurs. func (r *ProtocolIncus) rawQuery(method string, url string, data any, ETag string) (*api.Response, string, error) { var req *http.Request var err error // Log the request logger.Debug("Sending request to Incus", logger.Ctx{ "method": method, "url": url, "etag": ETag, }) // Get a new HTTP request setup if data != nil { switch data := data.(type) { case io.Reader: // Some data to be sent along with the request req, err = http.NewRequestWithContext(r.ctx, method, url, io.NopCloser(data)) if err != nil { return nil, "", err } req.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(data), nil } // Set the encoding accordingly req.Header.Set("Content-Type", "application/octet-stream") default: // Encode the provided data buf := bytes.Buffer{} err := json.NewEncoder(&buf).Encode(data) if err != nil { return nil, "", err } // Some data to be sent along with the request // Use a reader since the request body needs to be seekable req, err = http.NewRequestWithContext(r.ctx, method, url, bytes.NewReader(buf.Bytes())) if err != nil { return nil, "", err } req.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(buf.Bytes())), nil } // Set the encoding accordingly req.Header.Set("Content-Type", "application/json") // Log the data logger.Debugf(logger.Pretty(data)) } } else { // No data to be sent along with the request req, err = http.NewRequestWithContext(r.ctx, method, url, nil) if err != nil { return nil, "", err } } // Set the ETag if ETag != "" { req.Header.Set("If-Match", ETag) } // Send the request resp, err := r.DoHTTP(req) if err != nil { return nil, "", err } defer func() { _ = resp.Body.Close() }() return incusParseResponse(resp) } // setURLQueryAttributes modifies the supplied URL's query string with the client's current target and project. func (r *ProtocolIncus) setURLQueryAttributes(apiURL *neturl.URL) { // Extract query fields and update for cluster targeting or project values := apiURL.Query() if r.clusterTarget != "" { if values.Get("target") == "" { values.Set("target", r.clusterTarget) } } if r.project != "" { if values.Get("project") == "" && values.Get("all-projects") == "" { values.Set("project", r.project) } } apiURL.RawQuery = values.Encode() } func (r *ProtocolIncus) setQueryAttributes(uri string) (string, error) { // Parse the full URI fields, err := neturl.Parse(uri) if err != nil { return "", err } r.setURLQueryAttributes(fields) return fields.String(), nil } func (r *ProtocolIncus) query(method string, path string, data any, ETag string) (*api.Response, string, error) { // Generate the URL url := fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path) // Add project/target url, err := r.setQueryAttributes(url) if err != nil { return nil, "", err } // Run the actual query return r.rawQuery(method, url, data, ETag) } // queryStruct sends a query to the Incus server, then converts the response metadata into the specified target struct. // The function logs the retrieved data, returns the etag of the response, and handles any errors during this process. func (r *ProtocolIncus) queryStruct(method string, path string, data any, ETag string, target any) (string, error) { resp, etag, err := r.query(method, path, data, ETag) if err != nil { return "", err } err = resp.MetadataAsStruct(&target) if err != nil { return "", err } // Log the data logger.Debugf("Got response struct from Incus") logger.Debugf(logger.Pretty(target)) return etag, nil } // queryOperation sends a query to the Incus server and then converts the response metadata into an Operation object. // It sets up an early event listener, performs the query, processes the response, and manages the lifecycle of the event listener. func (r *ProtocolIncus) queryOperation(method string, path string, data any, ETag string) (Operation, string, error) { // Attempt to setup an early event listener skipListener := false listener, err := r.GetEvents() if err != nil { if api.StatusErrorCheck(err, http.StatusForbidden) { skipListener = true } listener = nil } // Send the query resp, etag, err := r.query(method, path, data, ETag) if err != nil { if listener != nil { listener.Disconnect() } return nil, "", err } // Get to the operation respOperation, err := resp.MetadataAsOperation() if err != nil { if listener != nil { listener.Disconnect() } return nil, "", err } // Setup an Operation wrapper op := operation{ Operation: *respOperation, r: r, listener: listener, skipListener: skipListener, chActive: make(chan bool), } // Log the data logger.Debugf("Got operation from Incus") logger.Debugf(logger.Pretty(op.Operation)) return &op, etag, nil } // rawWebsocket creates a websocket connection to the provided URL using the underlying HTTP transport of the ProtocolIncus receiver. // It sets up the request headers, manages the connection handshake, sets TCP timeouts, and handles any errors that may occur during these operations. func (r *ProtocolIncus) rawWebsocket(url string) (*websocket.Conn, error) { // Grab the http transport handler httpTransport, err := r.getUnderlyingHTTPTransport() if err != nil { return nil, err } // Setup a new websocket dialer based on it dialer := websocket.Dialer{ NetDialTLSContext: httpTransport.DialTLSContext, NetDialContext: httpTransport.DialContext, TLSClientConfig: httpTransport.TLSClientConfig, Proxy: httpTransport.Proxy, HandshakeTimeout: time.Second * 5, } // Create temporary http.Request using the http url, not the ws one, so that we can add the client headers // for the websocket request. req := &http.Request{URL: &r.httpBaseURL, Header: http.Header{}} // Establish the connection conn, resp, err := r.DoWebsocket(dialer, url, req) if err != nil { if resp != nil { _, _, err = incusParseResponse(resp) } return nil, err } // Set TCP timeout options. remoteTCP, _ := tcp.ExtractConn(conn.UnderlyingConn()) if remoteTCP != nil { err = tcp.SetTimeouts(remoteTCP, 0) if err != nil { logger.Warn("Failed setting TCP timeouts on remote connection", logger.Ctx{"err": err}) } } // Log the data logger.Debugf("Connected to the websocket: %v", url) return conn, nil } // websocket generates a websocket URL based on the provided path and the base URL of the ProtocolIncus receiver. // It then leverages the rawWebsocket method to establish and return a websocket connection to the generated URL. func (r *ProtocolIncus) websocket(path string) (*websocket.Conn, error) { // Generate the URL var url string if r.httpBaseURL.Scheme == "https" { url = fmt.Sprintf("wss://%s/1.0%s", r.httpBaseURL.Host, path) } else { url = fmt.Sprintf("ws://%s/1.0%s", r.httpBaseURL.Host, path) } return r.rawWebsocket(url) } // WithContext returns a client that will add context.Context. func (r *ProtocolIncus) WithContext(ctx context.Context) InstanceServer { rr := r rr.ctx = ctx return rr } // getUnderlyingHTTPTransport returns the *http.Transport used by the http client. If the http // client was initialized with a HTTPTransporter, it returns the wrapped *http.Transport. func (r *ProtocolIncus) getUnderlyingHTTPTransport() (*http.Transport, error) { switch t := r.http.Transport.(type) { case *http.Transport: return t, nil case HTTPTransporter: return t.Transport(), nil default: return nil, fmt.Errorf("Unexpected http.Transport type, %T", r) } } // getSourceImageConnectionInfo returns the connection information for the source image. // The returned `info` is nil if the source image is local. In this process, the `instSrc` // is also updated with the minimal source fields. func (r *ProtocolIncus) getSourceImageConnectionInfo(source ImageServer, image api.Image, instSrc *api.InstanceSource) (info *ConnectionInfo, err error) { // Set the minimal source fields instSrc.Type = "image" // Optimization for the local image case if r.isSameServer(source) { // Always use fingerprints for local case instSrc.Fingerprint = image.Fingerprint instSrc.Alias = "" return nil, nil } // Minimal source fields for remote image instSrc.Mode = "pull" // If we have an alias and the image is public, use that if instSrc.Alias != "" && image.Public { instSrc.Fingerprint = "" } else { instSrc.Fingerprint = image.Fingerprint instSrc.Alias = "" } // Get source server connection information info, err = source.GetConnectionInfo() if err != nil { return nil, err } instSrc.Protocol = info.Protocol instSrc.Certificate = info.Certificate // Generate secret token if needed if !image.Public { secret, err := source.GetImageSecret(image.Fingerprint) if err != nil { return nil, err } instSrc.Secret = secret } return info, nil } incus-6.0.4/client/incus_certificates.go000066400000000000000000000055611477363751000203410ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/lxc/incus/v6/shared/api" ) // Certificate handling functions // GetCertificateFingerprints returns a list of certificate fingerprints. func (r *ProtocolIncus) GetCertificateFingerprints() ([]string, error) { // Fetch the raw URL values. urls := []string{} baseURL := "/certificates" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetCertificates returns a list of certificates. func (r *ProtocolIncus) GetCertificates() ([]api.Certificate, error) { certificates := []api.Certificate{} // Fetch the raw value _, err := r.queryStruct("GET", "/certificates?recursion=1", nil, "", &certificates) if err != nil { return nil, err } return certificates, nil } // GetCertificate returns the certificate entry for the provided fingerprint. func (r *ProtocolIncus) GetCertificate(fingerprint string) (*api.Certificate, string, error) { certificate := api.Certificate{} // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("/certificates/%s", url.PathEscape(fingerprint)), nil, "", &certificate) if err != nil { return nil, "", err } return &certificate, etag, nil } // CreateCertificate adds a new certificate to the Incus trust store. func (r *ProtocolIncus) CreateCertificate(certificate api.CertificatesPost) error { // Send the request _, _, err := r.query("POST", "/certificates", certificate, "") if err != nil { return err } return nil } // UpdateCertificate updates the certificate definition. func (r *ProtocolIncus) UpdateCertificate(fingerprint string, certificate api.CertificatePut, ETag string) error { if !r.HasExtension("certificate_update") { return fmt.Errorf("The server is missing the required \"certificate_update\" API extension") } // Send the request _, _, err := r.query("PUT", fmt.Sprintf("/certificates/%s", url.PathEscape(fingerprint)), certificate, ETag) if err != nil { return err } return nil } // DeleteCertificate removes a certificate from the Incus trust store. func (r *ProtocolIncus) DeleteCertificate(fingerprint string) error { // Send the request _, _, err := r.query("DELETE", fmt.Sprintf("/certificates/%s", url.PathEscape(fingerprint)), nil, "") if err != nil { return err } return nil } // CreateCertificateToken requests a certificate add token. func (r *ProtocolIncus) CreateCertificateToken(certificate api.CertificatesPost) (Operation, error) { if !r.HasExtension("certificate_token") { return nil, fmt.Errorf("The server is missing the required \"certificate_token\" API extension") } if !certificate.Token { return nil, fmt.Errorf("Token needs to be true if requesting a token") } // Send the request op, _, err := r.queryOperation("POST", "/certificates", certificate, "") if err != nil { return nil, err } return op, nil } incus-6.0.4/client/incus_cluster.go000066400000000000000000000220371477363751000173520ustar00rootroot00000000000000package incus import ( "fmt" "github.com/lxc/incus/v6/shared/api" ) // GetCluster returns information about a cluster. func (r *ProtocolIncus) GetCluster() (*api.Cluster, string, error) { if !r.HasExtension("clustering") { return nil, "", fmt.Errorf("The server is missing the required \"clustering\" API extension") } cluster := &api.Cluster{} etag, err := r.queryStruct("GET", "/cluster", nil, "", &cluster) if err != nil { return nil, "", err } return cluster, etag, nil } // UpdateCluster requests to bootstrap a new cluster or join an existing one. func (r *ProtocolIncus) UpdateCluster(cluster api.ClusterPut, ETag string) (Operation, error) { if !r.HasExtension("clustering") { return nil, fmt.Errorf("The server is missing the required \"clustering\" API extension") } if cluster.ServerAddress != "" || cluster.ClusterToken != "" || len(cluster.MemberConfig) > 0 { if !r.HasExtension("clustering_join") { return nil, fmt.Errorf("The server is missing the required \"clustering_join\" API extension") } } op, _, err := r.queryOperation("PUT", "/cluster", cluster, ETag) if err != nil { return nil, err } return op, nil } // DeleteClusterMember makes the given member leave the cluster (gracefully or not, // depending on the force flag). func (r *ProtocolIncus) DeleteClusterMember(name string, force bool) error { if !r.HasExtension("clustering") { return fmt.Errorf("The server is missing the required \"clustering\" API extension") } params := "" if force { params += "?force=1" } _, _, err := r.query("DELETE", fmt.Sprintf("/cluster/members/%s%s", name, params), nil, "") if err != nil { return err } return nil } // GetClusterMemberNames returns the URLs of the current members in the cluster. func (r *ProtocolIncus) GetClusterMemberNames() ([]string, error) { if !r.HasExtension("clustering") { return nil, fmt.Errorf("The server is missing the required \"clustering\" API extension") } // Fetch the raw URL values. urls := []string{} baseURL := "/cluster/members" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetClusterMembers returns the current members of the cluster. func (r *ProtocolIncus) GetClusterMembers() ([]api.ClusterMember, error) { if !r.HasExtension("clustering") { return nil, fmt.Errorf("The server is missing the required \"clustering\" API extension") } members := []api.ClusterMember{} _, err := r.queryStruct("GET", "/cluster/members?recursion=1", nil, "", &members) if err != nil { return nil, err } return members, nil } // GetClusterMember returns information about the given member. func (r *ProtocolIncus) GetClusterMember(name string) (*api.ClusterMember, string, error) { if !r.HasExtension("clustering") { return nil, "", fmt.Errorf("The server is missing the required \"clustering\" API extension") } member := api.ClusterMember{} etag, err := r.queryStruct("GET", fmt.Sprintf("/cluster/members/%s", name), nil, "", &member) if err != nil { return nil, "", err } return &member, etag, nil } // UpdateClusterMember updates information about the given member. func (r *ProtocolIncus) UpdateClusterMember(name string, member api.ClusterMemberPut, ETag string) error { if !r.HasExtension("clustering_edit_roles") { return fmt.Errorf("The server is missing the required \"clustering_edit_roles\" API extension") } if member.FailureDomain != "" { if !r.HasExtension("clustering_failure_domains") { return fmt.Errorf("The server is missing the required \"clustering_failure_domains\" API extension") } } // Send the request _, _, err := r.query("PUT", fmt.Sprintf("/cluster/members/%s", name), member, ETag) if err != nil { return err } return nil } // RenameClusterMember changes the name of an existing member. func (r *ProtocolIncus) RenameClusterMember(name string, member api.ClusterMemberPost) error { if !r.HasExtension("clustering") { return fmt.Errorf("The server is missing the required \"clustering\" API extension") } _, _, err := r.query("POST", fmt.Sprintf("/cluster/members/%s", name), member, "") if err != nil { return err } return nil } // CreateClusterMember generates a join token to add a cluster member. func (r *ProtocolIncus) CreateClusterMember(member api.ClusterMembersPost) (Operation, error) { if !r.HasExtension("clustering_join_token") { return nil, fmt.Errorf("The server is missing the required \"clustering_join_token\" API extension") } op, _, err := r.queryOperation("POST", "/cluster/members", member, "") if err != nil { return nil, err } return op, nil } // UpdateClusterCertificate updates the cluster certificate for every node in the cluster. func (r *ProtocolIncus) UpdateClusterCertificate(certs api.ClusterCertificatePut, ETag string) error { if !r.HasExtension("clustering_update_cert") { return fmt.Errorf("The server is missing the required \"clustering_update_cert\" API extension") } _, _, err := r.query("PUT", "/cluster/certificate", certs, ETag) if err != nil { return err } return nil } // GetClusterMemberState gets state information about a cluster member. func (r *ProtocolIncus) GetClusterMemberState(name string) (*api.ClusterMemberState, string, error) { err := r.CheckExtension("cluster_member_state") if err != nil { return nil, "", err } state := api.ClusterMemberState{} u := api.NewURL().Path("cluster", "members", name, "state") etag, err := r.queryStruct("GET", u.String(), nil, "", &state) if err != nil { return nil, "", err } return &state, etag, err } // UpdateClusterMemberState evacuates or restores a cluster member. func (r *ProtocolIncus) UpdateClusterMemberState(name string, state api.ClusterMemberStatePost) (Operation, error) { if !r.HasExtension("clustering_evacuation") { return nil, fmt.Errorf("The server is missing the required \"clustering_evacuation\" API extension") } op, _, err := r.queryOperation("POST", fmt.Sprintf("/cluster/members/%s/state", name), state, "") if err != nil { return nil, err } return op, nil } // GetClusterGroups returns the cluster groups. func (r *ProtocolIncus) GetClusterGroups() ([]api.ClusterGroup, error) { if !r.HasExtension("clustering_groups") { return nil, fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") } groups := []api.ClusterGroup{} _, err := r.queryStruct("GET", "/cluster/groups?recursion=1", nil, "", &groups) if err != nil { return nil, err } return groups, nil } // GetClusterGroupNames returns the cluster group names. func (r *ProtocolIncus) GetClusterGroupNames() ([]string, error) { if !r.HasExtension("clustering_groups") { return nil, fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") } urls := []string{} _, err := r.queryStruct("GET", "/cluster/groups", nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames("/1.0/cluster/groups", urls...) } // RenameClusterGroup changes the name of an existing cluster group. func (r *ProtocolIncus) RenameClusterGroup(name string, group api.ClusterGroupPost) error { if !r.HasExtension("clustering_groups") { return fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") } _, _, err := r.query("POST", fmt.Sprintf("/cluster/groups/%s", name), group, "") if err != nil { return err } return nil } // CreateClusterGroup creates a new cluster group. func (r *ProtocolIncus) CreateClusterGroup(group api.ClusterGroupsPost) error { if !r.HasExtension("clustering_groups") { return fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") } _, _, err := r.query("POST", "/cluster/groups", group, "") if err != nil { return err } return nil } // DeleteClusterGroup deletes an existing cluster group. func (r *ProtocolIncus) DeleteClusterGroup(name string) error { if !r.HasExtension("clustering_groups") { return fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") } _, _, err := r.query("DELETE", fmt.Sprintf("/cluster/groups/%s", name), nil, "") if err != nil { return err } return nil } // UpdateClusterGroup updates information about the given cluster group. func (r *ProtocolIncus) UpdateClusterGroup(name string, group api.ClusterGroupPut, ETag string) error { if !r.HasExtension("clustering_groups") { return fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") } // Send the request _, _, err := r.query("PUT", fmt.Sprintf("/cluster/groups/%s", name), group, ETag) if err != nil { return err } return nil } // GetClusterGroup returns information about the given cluster group. func (r *ProtocolIncus) GetClusterGroup(name string) (*api.ClusterGroup, string, error) { if !r.HasExtension("clustering_groups") { return nil, "", fmt.Errorf("The server is missing the required \"clustering_groups\" API extension") } group := api.ClusterGroup{} etag, err := r.queryStruct("GET", fmt.Sprintf("/cluster/groups/%s", name), nil, "", &group) if err != nil { return nil, "", err } return &group, etag, nil } incus-6.0.4/client/incus_events.go000066400000000000000000000115641477363751000172000ustar00rootroot00000000000000package incus import ( "context" "encoding/json" "fmt" "slices" "time" "github.com/gorilla/websocket" "github.com/lxc/incus/v6/shared/api" ) // Event handling functions // getEvents connects to the Incus monitoring interface. func (r *ProtocolIncus) getEvents(allProjects bool) (*EventListener, error) { // Prevent anything else from interacting with the listeners r.eventListenersLock.Lock() defer r.eventListenersLock.Unlock() ctx, cancel := context.WithCancel(context.Background()) // Setup a new listener listener := EventListener{ r: r, ctx: ctx, ctxCancel: cancel, } connInfo, _ := r.GetConnectionInfo() if connInfo.Project == "" { return nil, fmt.Errorf("Unexpected empty project in connection info") } if !allProjects { listener.projectName = connInfo.Project } // There is an existing Go routine for the required project filter, so just add another target. if r.eventListeners[listener.projectName] != nil { r.eventListeners[listener.projectName] = append(r.eventListeners[listener.projectName], &listener) return &listener, nil } // Setup a new connection with Incus var url string var err error if allProjects { url, err = r.setQueryAttributes("/events?all-projects=true") } else { url, err = r.setQueryAttributes("/events") } if err != nil { return nil, err } // Connect websocket and save. wsConn, err := r.websocket(url) if err != nil { return nil, err } r.eventConnsLock.Lock() r.eventConns[listener.projectName] = wsConn // Save for others to use. r.eventConnsLock.Unlock() // Initialize the event listener list if we were able to connect to the events websocket. r.eventListeners[listener.projectName] = []*EventListener{&listener} // Spawn a watcher that will close the websocket connection after all // listeners are gone. stopCh := make(chan struct{}) go func() { for { select { case <-time.After(time.Minute): case <-r.ctxConnected.Done(): case <-stopCh: } r.eventListenersLock.Lock() r.eventConnsLock.Lock() if len(r.eventListeners[listener.projectName]) == 0 { // We don't need the connection anymore, disconnect and clear. if r.eventListeners[listener.projectName] != nil { _ = r.eventConns[listener.projectName].Close() delete(r.eventConns, listener.projectName) } r.eventListeners[listener.projectName] = nil r.eventListenersLock.Unlock() r.eventConnsLock.Unlock() return } r.eventListenersLock.Unlock() r.eventConnsLock.Unlock() } }() // Spawn the listener go func() { for { _, data, err := wsConn.ReadMessage() if err != nil { // Prevent anything else from interacting with the listeners r.eventListenersLock.Lock() defer r.eventListenersLock.Unlock() // Tell all the current listeners about the failure for _, listener := range r.eventListeners[listener.projectName] { listener.err = err listener.ctxCancel() } // And remove them all from the list so that when watcher routine runs it will // close the websocket connection. r.eventListeners[listener.projectName] = nil close(stopCh) // Instruct watcher go routine to cleanup. return } // Attempt to unpack the message event := api.Event{} err = json.Unmarshal(data, &event) if err != nil { continue } // Extract the message type if event.Type == "" { continue } // Send the message to all handlers r.eventListenersLock.Lock() for _, listener := range r.eventListeners[listener.projectName] { listener.targetsLock.Lock() for _, target := range listener.targets { if target.types != nil && !slices.Contains(target.types, event.Type) { continue } go target.function(event) } listener.targetsLock.Unlock() } r.eventListenersLock.Unlock() } }() return &listener, nil } // GetEvents gets the events for the project defined on the client. func (r *ProtocolIncus) GetEvents() (*EventListener, error) { return r.getEvents(false) } // GetEventsAllProjects gets events for all projects. func (r *ProtocolIncus) GetEventsAllProjects() (*EventListener, error) { return r.getEvents(true) } // SendEvent send an event to the server via the client's event listener connection. func (r *ProtocolIncus) SendEvent(event api.Event) error { r.eventConnsLock.Lock() defer r.eventConnsLock.Unlock() // Find an available event listener connection. // It doesn't matter which project the event listener connection is using, as this only affects which // events are received from the server, not which events we can send to it. var eventConn *websocket.Conn for _, eventConn = range r.eventConns { break } if eventConn == nil { return fmt.Errorf("No available event listener connection") } deadline, ok := r.ctx.Deadline() if !ok { deadline = time.Now().Add(5 * time.Second) } _ = eventConn.SetWriteDeadline(deadline) return eventConn.WriteJSON(event) } incus-6.0.4/client/incus_images.go000066400000000000000000000634311477363751000171410ustar00rootroot00000000000000package incus import ( "crypto/sha256" "fmt" "io" "mime" "mime/multipart" "net/http" "net/url" "os" "slices" "strings" "time" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/cancel" "github.com/lxc/incus/v6/shared/ioprogress" localtls "github.com/lxc/incus/v6/shared/tls" "github.com/lxc/incus/v6/shared/units" "github.com/lxc/incus/v6/shared/util" ) // Image handling functions // GetImages returns a list of available images as Image structs. func (r *ProtocolIncus) GetImages() ([]api.Image, error) { images := []api.Image{} _, err := r.queryStruct("GET", "/images?recursion=1", nil, "", &images) if err != nil { return nil, err } return images, nil } // GetImagesAllProjects returns a list of images across all projects as Image structs. func (r *ProtocolIncus) GetImagesAllProjects() ([]api.Image, error) { images := []api.Image{} v := url.Values{} v.Set("recursion", "1") v.Set("all-projects", "true") if !r.HasExtension("images_all_projects") { return nil, fmt.Errorf("The server is missing the required \"images_all_projects\" API extension") } _, err := r.queryStruct("GET", fmt.Sprintf("/images?%s", v.Encode()), nil, "", &images) if err != nil { return nil, err } return images, nil } // GetImagesWithFilter returns a filtered list of available images as Image structs. func (r *ProtocolIncus) GetImagesWithFilter(filters []string) ([]api.Image, error) { if !r.HasExtension("api_filtering") { return nil, fmt.Errorf("The server is missing the required \"api_filtering\" API extension") } images := []api.Image{} v := url.Values{} v.Set("recursion", "1") v.Set("filter", parseFilters(filters)) _, err := r.queryStruct("GET", fmt.Sprintf("/images?%s", v.Encode()), nil, "", &images) if err != nil { return nil, err } return images, nil } // GetImageFingerprints returns a list of available image fingerprints. func (r *ProtocolIncus) GetImageFingerprints() ([]string, error) { // Fetch the raw URL values. urls := []string{} baseURL := "/images" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetImage returns an Image struct for the provided fingerprint. func (r *ProtocolIncus) GetImage(fingerprint string) (*api.Image, string, error) { return r.GetPrivateImage(fingerprint, "") } // GetImageFile downloads an image from the server, returning an ImageFileRequest struct. func (r *ProtocolIncus) GetImageFile(fingerprint string, req ImageFileRequest) (*ImageFileResponse, error) { return r.GetPrivateImageFile(fingerprint, "", req) } // GetImageSecret is a helper around CreateImageSecret that returns a secret for the image. func (r *ProtocolIncus) GetImageSecret(fingerprint string) (string, error) { op, err := r.CreateImageSecret(fingerprint) if err != nil { return "", err } opAPI := op.Get() return opAPI.Metadata["secret"].(string), nil } // GetPrivateImage is similar to GetImage but allows passing a secret download token. func (r *ProtocolIncus) GetPrivateImage(fingerprint string, secret string) (*api.Image, string, error) { image := api.Image{} // Build the API path path := fmt.Sprintf("/images/%s", url.PathEscape(fingerprint)) var err error path, err = r.setQueryAttributes(path) if err != nil { return nil, "", err } if secret != "" { path, err = setQueryParam(path, "secret", secret) if err != nil { return nil, "", err } } // Fetch the raw value etag, err := r.queryStruct("GET", path, nil, "", &image) if err != nil { return nil, "", err } return &image, etag, nil } // GetPrivateImageFile is similar to GetImageFile but allows passing a secret download token. func (r *ProtocolIncus) GetPrivateImageFile(fingerprint string, secret string, req ImageFileRequest) (*ImageFileResponse, error) { // Quick checks. if req.MetaFile == nil && req.RootfsFile == nil { return nil, fmt.Errorf("No file requested") } uri := fmt.Sprintf("/1.0/images/%s/export", url.PathEscape(fingerprint)) var err error uri, err = r.setQueryAttributes(uri) if err != nil { return nil, err } // Attempt to download from host if secret == "" && util.PathExists("/dev/incus/sock") && os.Geteuid() == 0 { unixURI := fmt.Sprintf("http://unix.socket%s", uri) // Setup the HTTP client devIncusHTTP, err := unixHTTPClient(nil, "/dev/incus/sock") if err == nil { resp, err := incusDownloadImage(fingerprint, unixURI, r.httpUserAgent, devIncusHTTP.Do, req) if err == nil { return resp, nil } } } // Build the URL uri = fmt.Sprintf("%s%s", r.httpBaseURL.String(), uri) if secret != "" { uri, err = setQueryParam(uri, "secret", secret) if err != nil { return nil, err } } // Use relatively short response header timeout so as not to hold the image lock open too long. // Deference client and transport in order to clone them so as to not modify timeout of base client. httpClient := *r.http httpTransport := httpClient.Transport.(*http.Transport).Clone() httpTransport.ResponseHeaderTimeout = 30 * time.Second httpClient.Transport = httpTransport return incusDownloadImage(fingerprint, uri, r.httpUserAgent, r.DoHTTP, req) } func incusDownloadImage(fingerprint string, uri string, userAgent string, do func(*http.Request) (*http.Response, error), req ImageFileRequest) (*ImageFileResponse, error) { // Prepare the response resp := ImageFileResponse{} // Prepare the download request request, err := http.NewRequest("GET", uri, nil) if err != nil { return nil, err } if userAgent != "" { request.Header.Set("User-Agent", userAgent) } // Start the request response, doneCh, err := cancel.CancelableDownload(req.Canceler, do, request) if err != nil { return nil, err } defer func() { _ = response.Body.Close() }() defer close(doneCh) if response.StatusCode != http.StatusOK { _, _, err := incusParseResponse(response) if err != nil { return nil, err } } ctype, ctypeParams, err := mime.ParseMediaType(response.Header.Get("Content-Type")) if err != nil { ctype = "application/octet-stream" } // Check the image type. imageType := response.Header.Get("X-Incus-Type") if imageType == "" { imageType = "incus" } // Handle the data body := response.Body if req.ProgressHandler != nil { reader := &ioprogress.ProgressReader{ ReadCloser: response.Body, Tracker: &ioprogress.ProgressTracker{ Length: response.ContentLength, }, } if response.ContentLength > 0 { reader.Tracker.Handler = func(percent int64, speed int64) { req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))}) } } else { reader.Tracker.Handler = func(received int64, speed int64) { req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%s (%s/s)", units.GetByteSizeString(received, 2), units.GetByteSizeString(speed, 2))}) } } body = reader } // Hashing sha256 := sha256.New() // Deal with split images if ctype == "multipart/form-data" { if req.MetaFile == nil || req.RootfsFile == nil { return nil, fmt.Errorf("Multi-part image but only one target file provided") } // Parse the POST data mr := multipart.NewReader(body, ctypeParams["boundary"]) // Get the metadata tarball part, err := mr.NextPart() if err != nil { return nil, err } if part.FormName() != "metadata" { return nil, fmt.Errorf("Invalid multipart image") } size, err := io.Copy(io.MultiWriter(req.MetaFile, sha256), part) if err != nil { return nil, err } resp.MetaSize = size resp.MetaName = part.FileName() // Get the rootfs tarball part, err = mr.NextPart() if err != nil { return nil, err } if !slices.Contains([]string{"rootfs", "rootfs.img"}, part.FormName()) { return nil, fmt.Errorf("Invalid multipart image") } size, err = io.Copy(io.MultiWriter(req.RootfsFile, sha256), part) if err != nil { return nil, err } resp.RootfsSize = size resp.RootfsName = part.FileName() // Check the hash hash := fmt.Sprintf("%x", sha256.Sum(nil)) if imageType != "oci" && !strings.HasPrefix(hash, fingerprint) { return nil, fmt.Errorf("Image fingerprint doesn't match. Got %s expected %s", hash, fingerprint) } return &resp, nil } // Deal with unified images _, cdParams, err := mime.ParseMediaType(response.Header.Get("Content-Disposition")) if err != nil { return nil, err } filename, ok := cdParams["filename"] if !ok { return nil, fmt.Errorf("No filename in Content-Disposition header") } size, err := io.Copy(io.MultiWriter(req.MetaFile, sha256), body) if err != nil { return nil, err } resp.MetaSize = size resp.MetaName = filename // Check the hash hash := fmt.Sprintf("%x", sha256.Sum(nil)) if imageType != "oci" && !strings.HasPrefix(hash, fingerprint) { return nil, fmt.Errorf("Image fingerprint doesn't match. Got %s expected %s", hash, fingerprint) } return &resp, nil } // GetImageAliases returns the list of available aliases as ImageAliasesEntry structs. func (r *ProtocolIncus) GetImageAliases() ([]api.ImageAliasesEntry, error) { aliases := []api.ImageAliasesEntry{} // Fetch the raw value _, err := r.queryStruct("GET", "/images/aliases?recursion=1", nil, "", &aliases) if err != nil { return nil, err } return aliases, nil } // GetImageAliasNames returns the list of available alias names. func (r *ProtocolIncus) GetImageAliasNames() ([]string, error) { // Fetch the raw URL values. urls := []string{} baseURL := "/images/aliases" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetImageAlias returns an existing alias as an ImageAliasesEntry struct. func (r *ProtocolIncus) GetImageAlias(name string) (*api.ImageAliasesEntry, string, error) { alias := api.ImageAliasesEntry{} // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("/images/aliases/%s", url.PathEscape(name)), nil, "", &alias) if err != nil { return nil, "", err } return &alias, etag, nil } // GetImageAliasType returns an existing alias as an ImageAliasesEntry struct. func (r *ProtocolIncus) GetImageAliasType(imageType string, name string) (*api.ImageAliasesEntry, string, error) { alias, etag, err := r.GetImageAlias(name) if err != nil { return nil, "", err } if imageType != "" { if alias.Type == "" { alias.Type = "container" } if alias.Type != imageType { return nil, "", fmt.Errorf("Alias doesn't exist for the specified type") } } return alias, etag, nil } // GetImageAliasArchitectures returns a map of architectures / targets. func (r *ProtocolIncus) GetImageAliasArchitectures(imageType string, name string) (map[string]*api.ImageAliasesEntry, error) { alias, _, err := r.GetImageAliasType(imageType, name) if err != nil { return nil, err } img, _, err := r.GetImage(alias.Target) if err != nil { return nil, err } return map[string]*api.ImageAliasesEntry{img.Architecture: alias}, nil } // CreateImage requests that Incus creates, copies or import a new image. func (r *ProtocolIncus) CreateImage(image api.ImagesPost, args *ImageCreateArgs) (Operation, error) { if image.CompressionAlgorithm != "" { if !r.HasExtension("image_compression_algorithm") { return nil, fmt.Errorf("The server is missing the required \"image_compression_algorithm\" API extension") } } // Send the JSON based request if args == nil { op, _, err := r.queryOperation("POST", "/images", image, "") if err != nil { return nil, err } return op, nil } // Prepare an image upload if args.MetaFile == nil { return nil, fmt.Errorf("Metadata file is required") } // Prepare the body var ioErr error var body io.Reader var contentType string if args.RootfsFile == nil { // If unified image, just pass it through body = args.MetaFile contentType = "application/octet-stream" } else { pr, pw := io.Pipe() // Setup the multipart writer w := multipart.NewWriter(pw) go func() { var ioErr error defer func() { cerr := w.Close() if ioErr == nil && cerr != nil { ioErr = cerr } _ = pw.CloseWithError(ioErr) }() // Metadata file fw, ioErr := w.CreateFormFile("metadata", args.MetaName) if ioErr != nil { return } _, ioErr = io.Copy(fw, args.MetaFile) if ioErr != nil { return } // Rootfs file if args.Type == "virtual-machine" { fw, ioErr = w.CreateFormFile("rootfs.img", args.RootfsName) } else { fw, ioErr = w.CreateFormFile("rootfs", args.RootfsName) } if ioErr != nil { return } _, ioErr = io.Copy(fw, args.RootfsFile) if ioErr != nil { return } // Done writing to multipart ioErr = w.Close() if ioErr != nil { return } ioErr = pw.Close() if ioErr != nil { return } }() // Setup progress handler if args.ProgressHandler != nil { body = &ioprogress.ProgressReader{ ReadCloser: pr, Tracker: &ioprogress.ProgressTracker{ Handler: func(received int64, speed int64) { args.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%s (%s/s)", units.GetByteSizeString(received, 2), units.GetByteSizeString(speed, 2))}) }, }, } } else { body = pr } contentType = w.FormDataContentType() } // Prepare the HTTP request reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0/images", r.httpBaseURL.String())) if err != nil { return nil, err } req, err := http.NewRequest("POST", reqURL, body) if err != nil { return nil, err } // Setup the headers req.Header.Set("Content-Type", contentType) if image.Public { req.Header.Set("X-Incus-public", "true") } if image.Filename != "" { req.Header.Set("X-Incus-filename", image.Filename) } if len(image.Properties) > 0 { imgProps := url.Values{} for k, v := range image.Properties { imgProps.Set(k, v) } req.Header.Set("X-Incus-properties", imgProps.Encode()) } if len(image.Profiles) > 0 { imgProfiles := url.Values{} for _, v := range image.Profiles { imgProfiles.Add("profile", v) } req.Header.Set("X-Incus-profiles", imgProfiles.Encode()) } if len(image.Aliases) > 0 { imgProfiles := url.Values{} for _, v := range image.Aliases { imgProfiles.Add("alias", v.Name) } req.Header.Set("X-Incus-aliases", imgProfiles.Encode()) } // Set the user agent if image.Source != nil && image.Source.Fingerprint != "" && image.Source.Secret != "" && image.Source.Mode == "push" { // Set fingerprint req.Header.Set("X-Incus-fingerprint", image.Source.Fingerprint) // Set secret req.Header.Set("X-Incus-secret", image.Source.Secret) } // Send the request resp, err := r.DoHTTP(req) if err != nil { return nil, err } defer func() { _ = resp.Body.Close() }() if ioErr != nil { return nil, err } // Handle errors response, _, err := incusParseResponse(resp) if err != nil { return nil, err } // Get to the operation respOperation, err := response.MetadataAsOperation() if err != nil { return nil, err } // Setup an Operation wrapper op := operation{ Operation: *respOperation, r: r, chActive: make(chan bool), } return &op, nil } // tryCopyImage iterates through the source server URLs until one lets it download the image. func (r *ProtocolIncus) tryCopyImage(req api.ImagesPost, urls []string) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The source server isn't listening on the network") } rop := remoteOperation{ chDone: make(chan bool), } // For older servers, apply the aliases after copy if !r.HasExtension("image_create_aliases") && req.Aliases != nil { rop.chPost = make(chan bool) go func() { defer close(rop.chPost) // Wait for the main operation to finish <-rop.chDone if rop.err != nil { return } var errors []remoteOperationResult // Get the operation data op, err := rop.GetTarget() if err != nil { errors = append(errors, remoteOperationResult{Error: err}) rop.err = remoteOperationError("Failed to get operation data", errors) return } // Extract the fingerprint fingerprint := op.Metadata["fingerprint"].(string) // Add the aliases for _, entry := range req.Aliases { alias := api.ImageAliasesPost{} alias.Name = entry.Name alias.Target = fingerprint err := r.CreateImageAlias(alias) if err != nil { errors = append(errors, remoteOperationResult{Error: err}) rop.err = remoteOperationError("Failed to create image alias", errors) return } } }() } // Forward targetOp to remote op go func() { success := false var errors []remoteOperationResult for _, serverURL := range urls { req.Source.Server = serverURL op, err := r.CreateImage(req, nil) if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) continue } rop.handlerLock.Lock() rop.targetOp = op rop.handlerLock.Unlock() for _, handler := range rop.handlers { _, _ = rop.targetOp.AddHandler(handler) } err = rop.targetOp.Wait() if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) if localtls.IsConnectionError(err) { continue } break } success = true break } if !success { rop.err = remoteOperationError("Failed remote image download", errors) } close(rop.chDone) }() return &rop, nil } // CopyImage copies an image from a remote server. Additional options can be passed using ImageCopyArgs. func (r *ProtocolIncus) CopyImage(source ImageServer, image api.Image, args *ImageCopyArgs) (RemoteOperation, error) { // Quick checks. if r.isSameServer(source) { return nil, fmt.Errorf("The source and target servers must be different") } // Handle profile list overrides. if args != nil && args.Profiles != nil { if !r.HasExtension("image_copy_profile") { return nil, fmt.Errorf("The server is missing the required \"image_copy_profile\" API extension") } image.Profiles = args.Profiles } else { // If profiles aren't provided, clear the list on the source to // avoid requiring the destination to have them all. image.Profiles = nil } // Get source server connection information info, err := source.GetConnectionInfo() if err != nil { return nil, err } // Push mode if args != nil && args.Mode == "push" { // Get certificate and URL info, err := r.GetConnectionInfo() if err != nil { return nil, err } imagesPost := api.ImagesPost{ Source: &api.ImagesPostSource{ Fingerprint: image.Fingerprint, Mode: args.Mode, }, } imagesPost.Aliases = args.Aliases if args.CopyAliases { imagesPost.Aliases = image.Aliases if args.Aliases != nil { imagesPost.Aliases = append(imagesPost.Aliases, args.Aliases...) } } imagesPost.ExpiresAt = image.ExpiresAt imagesPost.Properties = image.Properties imagesPost.Public = args.Public // Receive token from target server. This token is later passed to the source which will use // it, together with the URL and certificate, to connect to the target. tokenOp, err := r.CreateImage(imagesPost, nil) if err != nil { return nil, err } opAPI := tokenOp.Get() secret, ok := opAPI.Metadata["secret"] if !ok { return nil, fmt.Errorf("No token provided") } req := api.ImageExportPost{ Target: info.URL, Certificate: info.Certificate, Secret: secret.(string), Project: info.Project, Profiles: image.Profiles, } exportOp, err := source.ExportImage(image.Fingerprint, req) if err != nil { _ = tokenOp.Cancel() return nil, err } rop := remoteOperation{ targetOp: exportOp, chDone: make(chan bool), } // Forward targetOp to remote op go func() { rop.err = rop.targetOp.Wait() _ = tokenOp.Cancel() close(rop.chDone) }() return &rop, nil } // Relay mode if args != nil && args.Mode == "relay" { metaFile, err := os.CreateTemp("", "incus_image_") if err != nil { return nil, err } defer func() { _ = os.Remove(metaFile.Name()) }() rootfsFile, err := os.CreateTemp("", "incus_image_") if err != nil { return nil, err } defer func() { _ = os.Remove(rootfsFile.Name()) }() // Import image req := ImageFileRequest{ MetaFile: metaFile, RootfsFile: rootfsFile, } resp, err := source.GetImageFile(image.Fingerprint, req) if err != nil { return nil, err } // Export image _, err = metaFile.Seek(0, io.SeekStart) if err != nil { return nil, err } _, err = rootfsFile.Seek(0, io.SeekStart) if err != nil { return nil, err } imagePost := api.ImagesPost{} imagePost.Public = args.Public imagePost.Profiles = image.Profiles imagePost.Aliases = args.Aliases if args.CopyAliases { imagePost.Aliases = image.Aliases if args.Aliases != nil { imagePost.Aliases = append(imagePost.Aliases, args.Aliases...) } } createArgs := &ImageCreateArgs{ MetaFile: metaFile, MetaName: image.Filename, Type: image.Type, } if resp.RootfsName != "" { // Deal with split images createArgs.RootfsFile = rootfsFile createArgs.RootfsName = image.Filename } rop := remoteOperation{ chDone: make(chan bool), } go func() { defer close(rop.chDone) op, err := r.CreateImage(imagePost, createArgs) if err != nil { rop.err = remoteOperationError("Failed to copy image", nil) return } rop.handlerLock.Lock() rop.targetOp = op rop.handlerLock.Unlock() for _, handler := range rop.handlers { _, _ = rop.targetOp.AddHandler(handler) } err = rop.targetOp.Wait() if err != nil { rop.err = remoteOperationError("Failed to copy image", nil) return } // Apply the aliases. for _, entry := range imagePost.Aliases { alias := api.ImageAliasesPost{} alias.Name = entry.Name alias.Target = image.Fingerprint err := r.CreateImageAlias(alias) if err != nil { rop.err = remoteOperationError("Failed to add alias", nil) return } } }() return &rop, nil } // Prepare the copy request req := api.ImagesPost{ Source: &api.ImagesPostSource{ ImageSource: api.ImageSource{ Certificate: info.Certificate, Protocol: info.Protocol, }, Fingerprint: image.Fingerprint, Mode: "pull", Type: "image", Project: info.Project, }, ImagePut: api.ImagePut{ Profiles: image.Profiles, }, } if args != nil { req.Source.ImageType = args.Type } // Generate secret token if needed if !image.Public { secret, err := source.GetImageSecret(image.Fingerprint) if err != nil { return nil, err } req.Source.Secret = secret } // Process the arguments if args != nil { req.Aliases = args.Aliases req.AutoUpdate = args.AutoUpdate req.Public = args.Public if args.CopyAliases { req.Aliases = image.Aliases if args.Aliases != nil { req.Aliases = append(req.Aliases, args.Aliases...) } } } return r.tryCopyImage(req, info.Addresses) } // UpdateImage updates the image definition. func (r *ProtocolIncus) UpdateImage(fingerprint string, image api.ImagePut, ETag string) error { // Send the request _, _, err := r.query("PUT", fmt.Sprintf("/images/%s", url.PathEscape(fingerprint)), image, ETag) if err != nil { return err } return nil } // DeleteImage requests that Incus removes an image from the store. func (r *ProtocolIncus) DeleteImage(fingerprint string) (Operation, error) { // Send the request op, _, err := r.queryOperation("DELETE", fmt.Sprintf("/images/%s", url.PathEscape(fingerprint)), nil, "") if err != nil { return nil, err } return op, nil } // RefreshImage requests that Incus issues an image refresh. func (r *ProtocolIncus) RefreshImage(fingerprint string) (Operation, error) { if !r.HasExtension("image_force_refresh") { return nil, fmt.Errorf("The server is missing the required \"image_force_refresh\" API extension") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("/images/%s/refresh", url.PathEscape(fingerprint)), nil, "") if err != nil { return nil, err } return op, nil } // CreateImageSecret requests that Incus issues a temporary image secret. func (r *ProtocolIncus) CreateImageSecret(fingerprint string) (Operation, error) { // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("/images/%s/secret", url.PathEscape(fingerprint)), nil, "") if err != nil { return nil, err } return op, nil } // CreateImageAlias sets up a new image alias. func (r *ProtocolIncus) CreateImageAlias(alias api.ImageAliasesPost) error { // Send the request _, _, err := r.query("POST", "/images/aliases", alias, "") if err != nil { return err } return nil } // UpdateImageAlias updates the image alias definition. func (r *ProtocolIncus) UpdateImageAlias(name string, alias api.ImageAliasesEntryPut, ETag string) error { // Send the request _, _, err := r.query("PUT", fmt.Sprintf("/images/aliases/%s", url.PathEscape(name)), alias, ETag) if err != nil { return err } return nil } // RenameImageAlias renames an existing image alias. func (r *ProtocolIncus) RenameImageAlias(name string, alias api.ImageAliasesEntryPost) error { // Send the request _, _, err := r.query("POST", fmt.Sprintf("/images/aliases/%s", url.PathEscape(name)), alias, "") if err != nil { return err } return nil } // DeleteImageAlias removes an alias from the Incus image store. func (r *ProtocolIncus) DeleteImageAlias(name string) error { // Send the request _, _, err := r.query("DELETE", fmt.Sprintf("/images/aliases/%s", url.PathEscape(name)), nil, "") if err != nil { return err } return nil } // ExportImage exports (copies) an image to a remote server. func (r *ProtocolIncus) ExportImage(fingerprint string, image api.ImageExportPost) (Operation, error) { if !r.HasExtension("images_push_relay") { return nil, fmt.Errorf("The server is missing the required \"images_push_relay\" API extension") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("/images/%s/export", url.PathEscape(fingerprint)), &image, "") if err != nil { return nil, err } return op, nil } incus-6.0.4/client/incus_instances.go000066400000000000000000002372461477363751000176720ustar00rootroot00000000000000package incus import ( "bufio" "context" "encoding/json" "fmt" "io" "net" "net/http" "net/url" "path/filepath" "slices" "strings" "github.com/gorilla/websocket" "github.com/pkg/sftp" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/cancel" "github.com/lxc/incus/v6/shared/ioprogress" "github.com/lxc/incus/v6/shared/tcp" localtls "github.com/lxc/incus/v6/shared/tls" "github.com/lxc/incus/v6/shared/units" "github.com/lxc/incus/v6/shared/ws" ) // Instance handling functions. // instanceTypeToPath converts the instance type to a URL path prefix and query string values. func (r *ProtocolIncus) instanceTypeToPath(instanceType api.InstanceType) (string, url.Values, error) { v := url.Values{} // If a specific instance type has been requested, add the instance-type filter parameter // to the returned URL values so that it can be used in the final URL if needed to filter // the result set being returned. if instanceType != api.InstanceTypeAny { v.Set("instance-type", string(instanceType)) } return "/instances", v, nil } // GetInstanceNames returns a list of instance names. func (r *ProtocolIncus) GetInstanceNames(instanceType api.InstanceType) ([]string, error) { baseURL, v, err := r.instanceTypeToPath(instanceType) if err != nil { return nil, err } // Fetch the raw URL values. urls := []string{} _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", baseURL, v.Encode()), nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetInstanceNamesAllProjects returns a list of instance names from all projects. func (r *ProtocolIncus) GetInstanceNamesAllProjects(instanceType api.InstanceType) (map[string][]string, error) { instances := []api.Instance{} path, v, err := r.instanceTypeToPath(instanceType) if err != nil { return nil, err } v.Set("recursion", "1") v.Set("all-projects", "true") // Fetch the raw URL values. _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) if err != nil { return nil, err } names := map[string][]string{} for _, instance := range instances { names[instance.Project] = append(names[instance.Project], instance.Name) } return names, nil } // GetInstances returns a list of instances. func (r *ProtocolIncus) GetInstances(instanceType api.InstanceType) ([]api.Instance, error) { instances := []api.Instance{} path, v, err := r.instanceTypeToPath(instanceType) if err != nil { return nil, err } v.Set("recursion", "1") // Fetch the raw value _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) if err != nil { return nil, err } return instances, nil } // GetInstancesWithFilter returns a filtered list of instances. func (r *ProtocolIncus) GetInstancesWithFilter(instanceType api.InstanceType, filters []string) ([]api.Instance, error) { if !r.HasExtension("api_filtering") { return nil, fmt.Errorf("The server is missing the required \"api_filtering\" API extension") } instances := []api.Instance{} path, v, err := r.instanceTypeToPath(instanceType) if err != nil { return nil, err } v.Set("recursion", "1") v.Set("filter", parseFilters(filters)) // Fetch the raw value _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) if err != nil { return nil, err } return instances, nil } // GetInstancesAllProjects returns a list of instances from all projects. func (r *ProtocolIncus) GetInstancesAllProjects(instanceType api.InstanceType) ([]api.Instance, error) { instances := []api.Instance{} path, v, err := r.instanceTypeToPath(instanceType) if err != nil { return nil, err } v.Set("recursion", "1") v.Set("all-projects", "true") if !r.HasExtension("instance_all_projects") { return nil, fmt.Errorf("The server is missing the required \"instance_all_projects\" API extension") } // Fetch the raw value _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) if err != nil { return nil, err } return instances, nil } // GetInstancesAllProjectsWithFilter returns a filtered list of instances from all projects. func (r *ProtocolIncus) GetInstancesAllProjectsWithFilter(instanceType api.InstanceType, filters []string) ([]api.Instance, error) { if !r.HasExtension("api_filtering") { return nil, fmt.Errorf("The server is missing the required \"api_filtering\" API extension") } instances := []api.Instance{} path, v, err := r.instanceTypeToPath(instanceType) if err != nil { return nil, err } v.Set("recursion", "1") v.Set("all-projects", "true") v.Set("filter", parseFilters(filters)) if !r.HasExtension("instance_all_projects") { return nil, fmt.Errorf("The server is missing the required \"instance_all_projects\" API extension") } // Fetch the raw value _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) if err != nil { return nil, err } return instances, nil } // UpdateInstances updates all instances to match the requested state. func (r *ProtocolIncus) UpdateInstances(state api.InstancesPut, ETag string) (Operation, error) { path, v, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Send the request op, _, err := r.queryOperation("PUT", fmt.Sprintf("%s?%s", path, v.Encode()), state, ETag) if err != nil { return nil, err } return op, nil } // rebuildInstance initiates a rebuild of a given instance on the Incus Protocol server and returns the corresponding operation or an error. func (r *ProtocolIncus) rebuildInstance(instanceName string, instance api.InstanceRebuildPost) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/rebuild", path, url.PathEscape(instanceName)), instance, "") if err != nil { return nil, err } return op, nil } // tryRebuildInstance attempts to rebuild a specific instance on multiple target servers identified by their URLs. // It runs the rebuild process asynchronously and returns a RemoteOperation to monitor the progress and any errors. func (r *ProtocolIncus) tryRebuildInstance(instanceName string, req api.InstanceRebuildPost, urls []string, op Operation) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The source server isn't listening on the network") } rop := remoteOperation{ chDone: make(chan bool), } operation := req.Source.Operation // Forward targetOp to remote op go func() { success := false var errors []remoteOperationResult for _, serverURL := range urls { if operation == "" { req.Source.Server = serverURL } else { req.Source.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) } op, err := r.rebuildInstance(instanceName, req) if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) continue } rop.handlerLock.Lock() rop.targetOp = op rop.handlerLock.Unlock() for _, handler := range rop.handlers { _, _ = rop.targetOp.AddHandler(handler) } err = rop.targetOp.Wait() if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) if localtls.IsConnectionError(err) { continue } break } success = true break } if !success { rop.err = remoteOperationError("Failed instance rebuild", errors) if op != nil { _ = op.Cancel() } } close(rop.chDone) }() return &rop, nil } // RebuildInstanceFromImage rebuilds an instance from an image. func (r *ProtocolIncus) RebuildInstanceFromImage(source ImageServer, image api.Image, instanceName string, req api.InstanceRebuildPost) (RemoteOperation, error) { err := r.CheckExtension("instances_rebuild") if err != nil { return nil, err } info, err := r.getSourceImageConnectionInfo(source, image, &req.Source) if err != nil { return nil, err } if info == nil { op, err := r.rebuildInstance(instanceName, req) if err != nil { return nil, err } rop := remoteOperation{ targetOp: op, chDone: make(chan bool), } // Forward targetOp to remote op go func() { rop.err = rop.targetOp.Wait() close(rop.chDone) }() return &rop, nil } return r.tryRebuildInstance(instanceName, req, info.Addresses, nil) } // RebuildInstance rebuilds an instance as empty. func (r *ProtocolIncus) RebuildInstance(instanceName string, instance api.InstanceRebuildPost) (op Operation, err error) { err = r.CheckExtension("instances_rebuild") if err != nil { return nil, err } return r.rebuildInstance(instanceName, instance) } // GetInstancesFull returns a list of instances including snapshots, backups and state. func (r *ProtocolIncus) GetInstancesFull(instanceType api.InstanceType) ([]api.InstanceFull, error) { instances := []api.InstanceFull{} path, v, err := r.instanceTypeToPath(instanceType) if err != nil { return nil, err } v.Set("recursion", "2") if !r.HasExtension("container_full") { return nil, fmt.Errorf("The server is missing the required \"container_full\" API extension") } // Fetch the raw value _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) if err != nil { return nil, err } return instances, nil } // GetInstancesFullWithFilter returns a filtered list of instances including snapshots, backups and state. func (r *ProtocolIncus) GetInstancesFullWithFilter(instanceType api.InstanceType, filters []string) ([]api.InstanceFull, error) { if !r.HasExtension("api_filtering") { return nil, fmt.Errorf("The server is missing the required \"api_filtering\" API extension") } instances := []api.InstanceFull{} path, v, err := r.instanceTypeToPath(instanceType) if err != nil { return nil, err } v.Set("recursion", "2") v.Set("filter", parseFilters(filters)) if !r.HasExtension("container_full") { return nil, fmt.Errorf("The server is missing the required \"container_full\" API extension") } // Fetch the raw value _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) if err != nil { return nil, err } return instances, nil } // GetInstancesFullAllProjects returns a list of instances including snapshots, backups and state from all projects. func (r *ProtocolIncus) GetInstancesFullAllProjects(instanceType api.InstanceType) ([]api.InstanceFull, error) { instances := []api.InstanceFull{} path, v, err := r.instanceTypeToPath(instanceType) if err != nil { return nil, err } v.Set("recursion", "2") v.Set("all-projects", "true") if !r.HasExtension("container_full") { return nil, fmt.Errorf("The server is missing the required \"container_full\" API extension") } if !r.HasExtension("instance_all_projects") { return nil, fmt.Errorf("The server is missing the required \"instance_all_projects\" API extension") } // Fetch the raw value _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) if err != nil { return nil, err } return instances, nil } // GetInstancesFullAllProjectsWithFilter returns a filtered list of instances including snapshots, backups and state from all projects. func (r *ProtocolIncus) GetInstancesFullAllProjectsWithFilter(instanceType api.InstanceType, filters []string) ([]api.InstanceFull, error) { if !r.HasExtension("api_filtering") { return nil, fmt.Errorf("The server is missing the required \"api_filtering\" API extension") } instances := []api.InstanceFull{} path, v, err := r.instanceTypeToPath(instanceType) if err != nil { return nil, err } v.Set("recursion", "2") v.Set("all-projects", "true") v.Set("filter", parseFilters(filters)) if !r.HasExtension("container_full") { return nil, fmt.Errorf("The server is missing the required \"container_full\" API extension") } if !r.HasExtension("instance_all_projects") { return nil, fmt.Errorf("The server is missing the required \"instance_all_projects\" API extension") } // Fetch the raw value _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &instances) if err != nil { return nil, err } return instances, nil } // GetInstance returns the instance entry for the provided name. func (r *ProtocolIncus) GetInstance(name string) (*api.Instance, string, error) { instance := api.Instance{} path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, "", err } // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("%s/%s", path, url.PathEscape(name)), nil, "", &instance) if err != nil { return nil, "", err } return &instance, etag, nil } // GetInstanceFull returns the instance entry for the provided name along with snapshot information. func (r *ProtocolIncus) GetInstanceFull(name string) (*api.InstanceFull, string, error) { instance := api.InstanceFull{} if !r.HasExtension("instance_get_full") { // Backward compatibility. ct, _, err := r.GetInstance(name) if err != nil { return nil, "", err } cs, _, err := r.GetInstanceState(name) if err != nil { return nil, "", err } snaps, err := r.GetInstanceSnapshots(name) if err != nil { return nil, "", err } backups, err := r.GetInstanceBackups(name) if err != nil { return nil, "", err } instance.Instance = *ct instance.State = cs instance.Snapshots = snaps instance.Backups = backups return &instance, "", nil } path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, "", err } // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("%s/%s?recursion=1", path, url.PathEscape(name)), nil, "", &instance) if err != nil { return nil, "", err } return &instance, etag, nil } // CreateInstanceFromBackup is a convenience function to make it easier to // create a instance from a backup. func (r *ProtocolIncus) CreateInstanceFromBackup(args InstanceBackupArgs) (Operation, error) { if !r.HasExtension("container_backup") { return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") } path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if args.PoolName == "" && args.Name == "" { // Send the request op, _, err := r.queryOperation("POST", path, args.BackupFile, "") if err != nil { return nil, err } return op, nil } if args.PoolName != "" && !r.HasExtension("container_backup_override_pool") { return nil, fmt.Errorf(`The server is missing the required "container_backup_override_pool" API extension`) } if args.Name != "" && !r.HasExtension("backup_override_name") { return nil, fmt.Errorf(`The server is missing the required "backup_override_name" API extension`) } // Prepare the HTTP request reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path)) if err != nil { return nil, err } req, err := http.NewRequest("POST", reqURL, args.BackupFile) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/octet-stream") if args.PoolName != "" { req.Header.Set("X-Incus-pool", args.PoolName) } if args.Name != "" { req.Header.Set("X-Incus-name", args.Name) } // Send the request resp, err := r.DoHTTP(req) if err != nil { return nil, err } defer func() { _ = resp.Body.Close() }() // Handle errors response, _, err := incusParseResponse(resp) if err != nil { return nil, err } // Get to the operation respOperation, err := response.MetadataAsOperation() if err != nil { return nil, err } // Setup an Operation wrapper op := operation{ Operation: *respOperation, r: r, chActive: make(chan bool), } return &op, nil } // CreateInstance requests that Incus creates a new instance. func (r *ProtocolIncus) CreateInstance(instance api.InstancesPost) (Operation, error) { path, _, err := r.instanceTypeToPath(instance.Type) if err != nil { return nil, err } if instance.Source.InstanceOnly { if !r.HasExtension("container_only_migration") { return nil, fmt.Errorf("The server is missing the required \"container_only_migration\" API extension") } } // Send the request op, _, err := r.queryOperation("POST", path, instance, "") if err != nil { return nil, err } return op, nil } // tryCreateInstance attempts to create a new instance on multiple target servers specified by their URLs. // It runs the instance creation asynchronously and returns a RemoteOperation to monitor the progress and any errors. func (r *ProtocolIncus) tryCreateInstance(req api.InstancesPost, urls []string, op Operation) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The source server isn't listening on the network") } rop := remoteOperation{ chDone: make(chan bool), } operation := req.Source.Operation // Forward targetOp to remote op chConnect := make(chan error, 1) chWait := make(chan error, 1) go func() { success := false var errors []remoteOperationResult for _, serverURL := range urls { if operation == "" { req.Source.Server = serverURL } else { req.Source.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) } op, err := r.CreateInstance(req) if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) continue } rop.handlerLock.Lock() rop.targetOp = op rop.handlerLock.Unlock() for _, handler := range rop.handlers { _, _ = rop.targetOp.AddHandler(handler) } err = rop.targetOp.Wait() if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) if localtls.IsConnectionError(err) { continue } break } success = true break } if success { chConnect <- nil close(chConnect) } else { chConnect <- remoteOperationError("Failed instance creation", errors) close(chConnect) if op != nil { _ = op.Cancel() } } }() if op != nil { go func() { chWait <- op.Wait() close(chWait) }() } go func() { var err error select { case err = <-chConnect: case err = <-chWait: } rop.err = err close(rop.chDone) }() return &rop, nil } // CreateInstanceFromImage is a convenience function to make it easier to create a instance from an existing image. func (r *ProtocolIncus) CreateInstanceFromImage(source ImageServer, image api.Image, req api.InstancesPost) (RemoteOperation, error) { info, err := r.getSourceImageConnectionInfo(source, image, &req.Source) if err != nil { return nil, err } // If the source server is the same as the target server, create the instance directly. if info == nil { op, err := r.CreateInstance(req) if err != nil { return nil, err } rop := remoteOperation{ targetOp: op, chDone: make(chan bool), } // Forward targetOp to remote op go func() { rop.err = rop.targetOp.Wait() close(rop.chDone) }() return &rop, nil } return r.tryCreateInstance(req, info.Addresses, nil) } // CopyInstance copies a instance from a remote server. Additional options can be passed using InstanceCopyArgs. func (r *ProtocolIncus) CopyInstance(source InstanceServer, instance api.Instance, args *InstanceCopyArgs) (RemoteOperation, error) { // Base request req := api.InstancesPost{ Name: instance.Name, InstancePut: instance.Writable(), Type: api.InstanceType(instance.Type), } req.Source.BaseImage = instance.Config["volatile.base_image"] // Process the copy arguments if args != nil { // Quick checks. if args.InstanceOnly { if !r.HasExtension("container_only_migration") { return nil, fmt.Errorf("The target server is missing the required \"container_only_migration\" API extension") } if !source.HasExtension("container_only_migration") { return nil, fmt.Errorf("The source server is missing the required \"container_only_migration\" API extension") } } if slices.Contains([]string{"push", "relay"}, args.Mode) { if !r.HasExtension("container_push") { return nil, fmt.Errorf("The target server is missing the required \"container_push\" API extension") } if !source.HasExtension("container_push") { return nil, fmt.Errorf("The source server is missing the required \"container_push\" API extension") } } if args.Mode == "push" && !source.HasExtension("container_push_target") { return nil, fmt.Errorf("The source server is missing the required \"container_push_target\" API extension") } if args.Refresh { if !r.HasExtension("container_incremental_copy") { return nil, fmt.Errorf("The target server is missing the required \"container_incremental_copy\" API extension") } if !source.HasExtension("container_incremental_copy") { return nil, fmt.Errorf("The source server is missing the required \"container_incremental_copy\" API extension") } } if args.RefreshExcludeOlder && !source.HasExtension("custom_volume_refresh_exclude_older_snapshots") { return nil, fmt.Errorf("The source server is missing the required \"custom_volume_refresh_exclude_older_snapshots\" API extension") } if args.AllowInconsistent { if !r.HasExtension("instance_allow_inconsistent_copy") { return nil, fmt.Errorf("The source server is missing the required \"instance_allow_inconsistent_copy\" API extension") } } // Allow overriding the target name if args.Name != "" { req.Name = args.Name } req.Source.Live = args.Live req.Source.InstanceOnly = args.InstanceOnly req.Source.Refresh = args.Refresh req.Source.RefreshExcludeOlder = args.RefreshExcludeOlder req.Source.AllowInconsistent = args.AllowInconsistent } if req.Source.Live { req.Source.Live = instance.StatusCode == api.Running } sourceInfo, err := source.GetConnectionInfo() if err != nil { return nil, fmt.Errorf("Failed to get source connection info: %w", err) } destInfo, err := r.GetConnectionInfo() if err != nil { return nil, fmt.Errorf("Failed to get destination connection info: %w", err) } // Optimization for the local copy case if destInfo.URL == sourceInfo.URL && destInfo.SocketPath == sourceInfo.SocketPath && (!r.IsClustered() || instance.Location == r.clusterTarget || r.HasExtension("cluster_internal_copy")) { // Project handling if destInfo.Project != sourceInfo.Project { if !r.HasExtension("container_copy_project") { return nil, fmt.Errorf("The server is missing the required \"container_copy_project\" API extension") } req.Source.Project = sourceInfo.Project } // Local copy source fields req.Source.Type = "copy" req.Source.Source = instance.Name // Copy the instance op, err := r.CreateInstance(req) if err != nil { return nil, err } rop := remoteOperation{ targetOp: op, chDone: make(chan bool), } // Forward targetOp to remote op go func() { rop.err = rop.targetOp.Wait() close(rop.chDone) }() return &rop, nil } // Source request sourceReq := api.InstancePost{ Migration: true, Live: req.Source.Live, InstanceOnly: req.Source.InstanceOnly, AllowInconsistent: req.Source.AllowInconsistent, } // Push mode migration if args != nil && args.Mode == "push" { // Get target server connection information info, err := r.GetConnectionInfo() if err != nil { return nil, err } // Create the instance req.Source.Type = "migration" req.Source.Mode = "push" req.Source.Refresh = args.Refresh req.Source.RefreshExcludeOlder = args.RefreshExcludeOlder op, err := r.CreateInstance(req) if err != nil { return nil, err } opAPI := op.Get() targetSecrets := map[string]string{} for k, v := range opAPI.Metadata { targetSecrets[k] = v.(string) } // Prepare the source request target := api.InstancePostTarget{} target.Operation = opAPI.ID target.Websockets = targetSecrets target.Certificate = info.Certificate sourceReq.Target = &target return r.tryMigrateInstance(source, instance.Name, sourceReq, info.Addresses, op) } // Get source server connection information info, err := source.GetConnectionInfo() if err != nil { return nil, err } op, err := source.MigrateInstance(instance.Name, sourceReq) if err != nil { return nil, err } opAPI := op.Get() sourceSecrets := map[string]string{} for k, v := range opAPI.Metadata { sourceSecrets[k] = v.(string) } // Relay mode migration if args != nil && args.Mode == "relay" { // Push copy source fields req.Source.Type = "migration" req.Source.Mode = "push" // Start the process targetOp, err := r.CreateInstance(req) if err != nil { return nil, err } targetOpAPI := targetOp.Get() // Extract the websockets targetSecrets := map[string]string{} for k, v := range targetOpAPI.Metadata { targetSecrets[k] = v.(string) } // Launch the relay err = r.proxyMigration(targetOp.(*operation), targetSecrets, source, op.(*operation), sourceSecrets) if err != nil { return nil, err } // Prepare a tracking operation rop := remoteOperation{ targetOp: targetOp, chDone: make(chan bool), } // Forward targetOp to remote op go func() { rop.err = rop.targetOp.Wait() close(rop.chDone) }() return &rop, nil } // Pull mode migration req.Source.Type = "migration" req.Source.Mode = "pull" req.Source.Operation = opAPI.ID req.Source.Websockets = sourceSecrets req.Source.Certificate = info.Certificate return r.tryCreateInstance(req, info.Addresses, op) } // UpdateInstance updates the instance definition. func (r *ProtocolIncus) UpdateInstance(name string, instance api.InstancePut, ETag string) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Send the request op, _, err := r.queryOperation("PUT", fmt.Sprintf("%s/%s", path, url.PathEscape(name)), instance, ETag) if err != nil { return nil, err } return op, nil } // RenameInstance requests that Incus renames the instance. func (r *ProtocolIncus) RenameInstance(name string, instance api.InstancePost) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Quick check. if instance.Migration { return nil, fmt.Errorf("Can't ask for a migration through RenameInstance") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s", path, url.PathEscape(name)), instance, "") if err != nil { return nil, err } return op, nil } // tryMigrateInstance attempts to migrate a specific instance from a source server to one of the target URLs. // The function runs the migration operation asynchronously and returns a RemoteOperation to track the progress and handle any errors. func (r *ProtocolIncus) tryMigrateInstance(source InstanceServer, name string, req api.InstancePost, urls []string, op Operation) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The target server isn't listening on the network") } rop := remoteOperation{ chDone: make(chan bool), } operation := req.Target.Operation // Forward targetOp to remote op chConnect := make(chan error, 1) chWait := make(chan error, 1) go func() { success := false var errors []remoteOperationResult for _, serverURL := range urls { req.Target.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) op, err := source.MigrateInstance(name, req) if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) continue } rop.targetOp = op for _, handler := range rop.handlers { _, _ = rop.targetOp.AddHandler(handler) } err = rop.targetOp.Wait() if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) if localtls.IsConnectionError(err) { continue } break } success = true break } if success { chConnect <- nil close(chConnect) } else { chConnect <- remoteOperationError("Failed instance migration", errors) close(chConnect) if op != nil { _ = op.Cancel() } } }() if op != nil { go func() { chWait <- op.Wait() close(chWait) }() } go func() { var err error select { case err = <-chConnect: case err = <-chWait: } rop.err = err close(rop.chDone) }() return &rop, nil } // MigrateInstance requests that Incus prepares for a instance migration. func (r *ProtocolIncus) MigrateInstance(name string, instance api.InstancePost) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if instance.InstanceOnly { if !r.HasExtension("container_only_migration") { return nil, fmt.Errorf("The server is missing the required \"container_only_migration\" API extension") } } if instance.Pool != "" && !r.HasExtension("instance_pool_move") { return nil, fmt.Errorf("The server is missing the required \"instance_pool_move\" API extension") } if instance.Project != "" && !r.HasExtension("instance_project_move") { return nil, fmt.Errorf("The server is missing the required \"instance_project_move\" API extension") } if instance.AllowInconsistent && !r.HasExtension("cluster_migration_inconsistent_copy") { return nil, fmt.Errorf("The server is missing the required \"cluster_migration_inconsistent_copy\" API extension") } // Quick check. if !instance.Migration { return nil, fmt.Errorf("Can't ask for a rename through MigrateInstance") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s", path, url.PathEscape(name)), instance, "") if err != nil { return nil, err } return op, nil } // DeleteInstance requests that Incus deletes the instance. func (r *ProtocolIncus) DeleteInstance(name string) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Send the request op, _, err := r.queryOperation("DELETE", fmt.Sprintf("%s/%s", path, url.PathEscape(name)), nil, "") if err != nil { return nil, err } return op, nil } // ExecInstance requests that Incus spawns a command inside the instance. func (r *ProtocolIncus) ExecInstance(instanceName string, exec api.InstanceExecPost, args *InstanceExecArgs) (Operation, error) { // Ensure args are equivalent to empty InstanceExecArgs. if args == nil { args = &InstanceExecArgs{} } if exec.RecordOutput { if !r.HasExtension("container_exec_recording") { return nil, fmt.Errorf("The server is missing the required \"container_exec_recording\" API extension") } } if exec.User > 0 || exec.Group > 0 || exec.Cwd != "" { if !r.HasExtension("container_exec_user_group_cwd") { return nil, fmt.Errorf("The server is missing the required \"container_exec_user_group_cwd\" API extension") } } var uri string if r.IsAgent() { uri = "/exec" } else { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } uri = fmt.Sprintf("%s/%s/exec", path, url.PathEscape(instanceName)) } // Send the request op, _, err := r.queryOperation("POST", uri, exec, "") if err != nil { return nil, err } opAPI := op.Get() // Process additional arguments // Parse the fds fds := map[string]string{} value, ok := opAPI.Metadata["fds"] if ok { values := value.(map[string]any) for k, v := range values { fds[k] = v.(string) } } if exec.RecordOutput && (args.Stdout != nil || args.Stderr != nil) { err = op.Wait() if err != nil { return nil, err } opAPI = op.Get() outputFiles := map[string]string{} outputs, ok := opAPI.Metadata["output"].(map[string]any) if ok { for k, v := range outputs { outputFiles[k] = v.(string) } } if outputFiles["1"] != "" { reader, _ := r.getInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["1"])) if args.Stdout != nil { _, errCopy := io.Copy(args.Stdout, reader) // Regardless of errCopy value, we want to delete the file after a copy operation errDelete := r.deleteInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["1"])) if errDelete != nil { return nil, errDelete } if errCopy != nil { return nil, fmt.Errorf("Could not copy the content of the exec output log file to stdout: %w", err) } } err = r.deleteInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["1"])) if err != nil { return nil, err } } if outputFiles["2"] != "" { reader, _ := r.getInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["2"])) if args.Stderr != nil { _, errCopy := io.Copy(args.Stderr, reader) errDelete := r.deleteInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["1"])) if errDelete != nil { return nil, errDelete } if errCopy != nil { return nil, fmt.Errorf("Could not copy the content of the exec output log file to stderr: %w", err) } } err = r.deleteInstanceExecOutputLogFile(instanceName, filepath.Base(outputFiles["2"])) if err != nil { return nil, err } } } if fds[api.SecretNameControl] != "" { conn, err := r.GetOperationWebsocket(opAPI.ID, fds[api.SecretNameControl]) if err != nil { return nil, err } go func() { _, _, _ = conn.ReadMessage() // Consume pings from server. }() if args.Control != nil { // Call the control handler with a connection to the control socket go args.Control(conn) } } if exec.Interactive { // Handle interactive sections if args.Stdin != nil && args.Stdout != nil { // Connect to the websocket conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) if err != nil { return nil, err } // And attach stdin and stdout to it go func() { ws.MirrorRead(conn, args.Stdin) <-ws.MirrorWrite(conn, args.Stdout) _ = conn.Close() if args.DataDone != nil { close(args.DataDone) } }() } else { if args.DataDone != nil { close(args.DataDone) } } } else { // Handle non-interactive sessions dones := make(map[int]chan error) conns := []*websocket.Conn{} // Handle stdin if fds["0"] != "" { conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) if err != nil { return nil, err } go func() { _, _, _ = conn.ReadMessage() // Consume pings from server. }() conns = append(conns, conn) dones[0] = ws.MirrorRead(conn, args.Stdin) } waitConns := 0 // Used for keeping track of when stdout and stderr have finished. // Handle stdout if fds["1"] != "" { conn, err := r.GetOperationWebsocket(opAPI.ID, fds["1"]) if err != nil { return nil, err } // Discard Stdout from remote command if output writer not supplied. if args.Stdout == nil { args.Stdout = io.Discard } conns = append(conns, conn) dones[1] = ws.MirrorWrite(conn, args.Stdout) waitConns++ } // Handle stderr if fds["2"] != "" { conn, err := r.GetOperationWebsocket(opAPI.ID, fds["2"]) if err != nil { return nil, err } // Discard Stderr from remote command if output writer not supplied. if args.Stderr == nil { args.Stderr = io.Discard } conns = append(conns, conn) dones[2] = ws.MirrorWrite(conn, args.Stderr) waitConns++ } // Wait for everything to be done go func() { for { select { case <-dones[0]: // Handle stdin finish, but don't wait for it if output channels // have all finished. dones[0] = nil _ = conns[0].Close() case <-dones[1]: dones[1] = nil _ = conns[1].Close() waitConns-- case <-dones[2]: dones[2] = nil _ = conns[2].Close() waitConns-- } if waitConns <= 0 { // Close stdin websocket if defined and not already closed. if dones[0] != nil { conns[0].Close() } break } } if args.DataDone != nil { close(args.DataDone) } }() } return op, nil } // GetInstanceFile retrieves the provided path from the instance. func (r *ProtocolIncus) GetInstanceFile(instanceName string, filePath string) (io.ReadCloser, *InstanceFileResponse, error) { var err error var requestURL string urlEncode := func(path string, query map[string]string) (string, error) { u, err := url.Parse(path) if err != nil { return "", err } params := url.Values{} for key, value := range query { params.Add(key, value) } u.RawQuery = params.Encode() return u.String(), nil } if r.IsAgent() { requestURL, err = urlEncode( fmt.Sprintf("%s/1.0/files", r.httpBaseURL.String()), map[string]string{"path": filePath}) } else { var path string path, _, err = r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, nil, err } // Prepare the HTTP request requestURL, err = urlEncode( fmt.Sprintf("%s/1.0%s/%s/files", r.httpBaseURL.String(), path, url.PathEscape(instanceName)), map[string]string{"path": filePath}) } if err != nil { return nil, nil, err } requestURL, err = r.setQueryAttributes(requestURL) if err != nil { return nil, nil, err } req, err := http.NewRequest("GET", requestURL, nil) if err != nil { return nil, nil, err } // Send the request resp, err := r.DoHTTP(req) if err != nil { return nil, nil, err } // Check the return value for a cleaner error if resp.StatusCode != http.StatusOK { _, _, err := incusParseResponse(resp) if err != nil { return nil, nil, err } } // Parse the headers uid, gid, mode, fileType, _ := api.ParseFileHeaders(resp.Header) fileResp := InstanceFileResponse{ UID: uid, GID: gid, Mode: mode, Type: fileType, } if fileResp.Type == "directory" { // Decode the response response := api.Response{} decoder := json.NewDecoder(resp.Body) err = decoder.Decode(&response) if err != nil { return nil, nil, err } // Get the file list entries := []string{} err = response.MetadataAsStruct(&entries) if err != nil { return nil, nil, err } fileResp.Entries = entries return nil, &fileResp, err } return resp.Body, &fileResp, err } // CreateInstanceFile tells Incus to create a file in the instance. func (r *ProtocolIncus) CreateInstanceFile(instanceName string, filePath string, args InstanceFileArgs) error { if args.Type == "directory" { if !r.HasExtension("directory_manipulation") { return fmt.Errorf("The server is missing the required \"directory_manipulation\" API extension") } } if args.Type == "symlink" { if !r.HasExtension("file_symlinks") { return fmt.Errorf("The server is missing the required \"file_symlinks\" API extension") } } if args.WriteMode == "append" { if !r.HasExtension("file_append") { return fmt.Errorf("The server is missing the required \"file_append\" API extension") } } var requestURL string if r.IsAgent() { requestURL = fmt.Sprintf("%s/1.0/files?path=%s", r.httpBaseURL.String(), url.QueryEscape(filePath)) } else { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return err } // Prepare the HTTP request requestURL = fmt.Sprintf("%s/1.0%s/%s/files?path=%s", r.httpBaseURL.String(), path, url.PathEscape(instanceName), url.QueryEscape(filePath)) } requestURL, err := r.setQueryAttributes(requestURL) if err != nil { return err } req, err := http.NewRequest("POST", requestURL, args.Content) if err != nil { return err } req.GetBody = func() (io.ReadCloser, error) { _, err := args.Content.Seek(0, 0) if err != nil { return nil, err } return io.NopCloser(args.Content), nil } // Set the various headers if args.UID > -1 { req.Header.Set("X-Incus-uid", fmt.Sprintf("%d", args.UID)) } if args.GID > -1 { req.Header.Set("X-Incus-gid", fmt.Sprintf("%d", args.GID)) } if args.Mode > -1 { req.Header.Set("X-Incus-mode", fmt.Sprintf("%04o", args.Mode)) } if args.Type != "" { req.Header.Set("X-Incus-type", args.Type) } if args.WriteMode != "" { req.Header.Set("X-Incus-write", args.WriteMode) } // Send the request resp, err := r.DoHTTP(req) if err != nil { return err } // Check the return value for a cleaner error _, _, err = incusParseResponse(resp) if err != nil { return err } return nil } // DeleteInstanceFile deletes a file in the instance. func (r *ProtocolIncus) DeleteInstanceFile(instanceName string, filePath string) error { if !r.HasExtension("file_delete") { return fmt.Errorf("The server is missing the required \"file_delete\" API extension") } var requestURL string if r.IsAgent() { requestURL = fmt.Sprintf("/files?path=%s", url.QueryEscape(filePath)) } else { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return err } // Prepare the HTTP request requestURL = fmt.Sprintf("%s/%s/files?path=%s", path, url.PathEscape(instanceName), url.QueryEscape(filePath)) } requestURL, err := r.setQueryAttributes(requestURL) if err != nil { return err } // Send the request _, _, err = r.query("DELETE", requestURL, nil, "") if err != nil { return err } return nil } // rawSFTPConn connects to the apiURL, upgrades to an SFTP raw connection and returns it. func (r *ProtocolIncus) rawSFTPConn(apiURL *url.URL) (net.Conn, error) { // Get the HTTP transport. httpTransport, err := r.getUnderlyingHTTPTransport() if err != nil { return nil, err } req := &http.Request{ Method: http.MethodGet, URL: apiURL, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, Header: make(http.Header), Host: apiURL.Host, } req.Header["Upgrade"] = []string{"sftp"} req.Header["Connection"] = []string{"Upgrade"} r.addClientHeaders(req) // Establish the connection. var conn net.Conn if httpTransport.TLSClientConfig != nil { conn, err = httpTransport.DialTLSContext(context.Background(), "tcp", apiURL.Host) } else { conn, err = httpTransport.DialContext(context.Background(), "tcp", apiURL.Host) } if err != nil { return nil, err } remoteTCP, _ := tcp.ExtractConn(conn) if remoteTCP != nil { err = tcp.SetTimeouts(remoteTCP, 0) if err != nil { return nil, err } } err = req.Write(conn) if err != nil { return nil, err } resp, err := http.ReadResponse(bufio.NewReader(conn), req) if err != nil { return nil, err } if resp.StatusCode != http.StatusSwitchingProtocols { _, _, err := incusParseResponse(resp) if err != nil { return nil, err } } if resp.Header.Get("Upgrade") != "sftp" { return nil, fmt.Errorf("Missing or unexpected Upgrade header in response") } return conn, err } // GetInstanceFileSFTPConn returns a connection to the instance's SFTP endpoint. func (r *ProtocolIncus) GetInstanceFileSFTPConn(instanceName string) (net.Conn, error) { apiURL := api.NewURL() apiURL.URL = r.httpBaseURL // Preload the URL with the client base URL. apiURL.Path("1.0", "instances", instanceName, "sftp") r.setURLQueryAttributes(&apiURL.URL) return r.rawSFTPConn(&apiURL.URL) } // GetInstanceFileSFTP returns an SFTP connection to the instance. func (r *ProtocolIncus) GetInstanceFileSFTP(instanceName string) (*sftp.Client, error) { conn, err := r.GetInstanceFileSFTPConn(instanceName) if err != nil { return nil, err } // Get a SFTP client. client, err := sftp.NewClientPipe(conn, conn, sftp.MaxPacketUnchecked(128*1024)) if err != nil { _ = conn.Close() return nil, err } go func() { // Wait for the client to be done before closing the connection. _ = client.Wait() _ = conn.Close() }() return client, nil } // GetInstanceSnapshotNames returns a list of snapshot names for the instance. func (r *ProtocolIncus) GetInstanceSnapshotNames(instanceName string) ([]string, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Fetch the raw URL values. urls := []string{} baseURL := fmt.Sprintf("%s/%s/snapshots", path, url.PathEscape(instanceName)) _, err = r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetInstanceSnapshots returns a list of snapshots for the instance. func (r *ProtocolIncus) GetInstanceSnapshots(instanceName string) ([]api.InstanceSnapshot, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } snapshots := []api.InstanceSnapshot{} // Fetch the raw value _, err = r.queryStruct("GET", fmt.Sprintf("%s/%s/snapshots?recursion=1", path, url.PathEscape(instanceName)), nil, "", &snapshots) if err != nil { return nil, err } return snapshots, nil } // GetInstanceSnapshot returns a Snapshot struct for the provided instance and snapshot names. func (r *ProtocolIncus) GetInstanceSnapshot(instanceName string, name string) (*api.InstanceSnapshot, string, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, "", err } snapshot := api.InstanceSnapshot{} // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("%s/%s/snapshots/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), nil, "", &snapshot) if err != nil { return nil, "", err } return &snapshot, etag, nil } // CreateInstanceSnapshot requests that Incus creates a new snapshot for the instance. func (r *ProtocolIncus) CreateInstanceSnapshot(instanceName string, snapshot api.InstanceSnapshotsPost) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Validate the request if snapshot.ExpiresAt != nil && !r.HasExtension("snapshot_expiry_creation") { return nil, fmt.Errorf("The server is missing the required \"snapshot_expiry_creation\" API extension") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/snapshots", path, url.PathEscape(instanceName)), snapshot, "") if err != nil { return nil, err } return op, nil } // CopyInstanceSnapshot copies a snapshot from a remote server into a new instance. Additional options can be passed using InstanceCopyArgs. func (r *ProtocolIncus) CopyInstanceSnapshot(source InstanceServer, instanceName string, snapshot api.InstanceSnapshot, args *InstanceSnapshotCopyArgs) (RemoteOperation, error) { // Backward compatibility (with broken Name field) fields := strings.Split(snapshot.Name, "/") cName := instanceName sName := fields[len(fields)-1] // Base request req := api.InstancesPost{ Name: cName, InstancePut: api.InstancePut{ Architecture: snapshot.Architecture, Config: snapshot.Config, Devices: snapshot.Devices, Ephemeral: snapshot.Ephemeral, Profiles: snapshot.Profiles, }, } if snapshot.Stateful && args.Live { if !r.HasExtension("container_snapshot_stateful_migration") { return nil, fmt.Errorf("The server is missing the required \"container_snapshot_stateful_migration\" API extension") } req.InstancePut.Stateful = snapshot.Stateful req.Source.Live = false // Snapshots are never running and so we don't need live migration. } req.Source.BaseImage = snapshot.Config["volatile.base_image"] // Process the copy arguments if args != nil { // Quick checks. if slices.Contains([]string{"push", "relay"}, args.Mode) { if !r.HasExtension("container_push") { return nil, fmt.Errorf("The target server is missing the required \"container_push\" API extension") } if !source.HasExtension("container_push") { return nil, fmt.Errorf("The source server is missing the required \"container_push\" API extension") } } if args.Mode == "push" && !source.HasExtension("container_push_target") { return nil, fmt.Errorf("The source server is missing the required \"container_push_target\" API extension") } // Allow overriding the target name if args.Name != "" { req.Name = args.Name } } sourceInfo, err := source.GetConnectionInfo() if err != nil { return nil, fmt.Errorf("Failed to get source connection info: %w", err) } destInfo, err := r.GetConnectionInfo() if err != nil { return nil, fmt.Errorf("Failed to get destination connection info: %w", err) } instance, _, err := source.GetInstance(cName) if err != nil { return nil, fmt.Errorf("Failed to get instance info: %w", err) } // Optimization for the local copy case if destInfo.URL == sourceInfo.URL && destInfo.SocketPath == sourceInfo.SocketPath && (!r.IsClustered() || instance.Location == r.clusterTarget || r.HasExtension("cluster_internal_copy")) { // Project handling if destInfo.Project != sourceInfo.Project { if !r.HasExtension("container_copy_project") { return nil, fmt.Errorf("The server is missing the required \"container_copy_project\" API extension") } req.Source.Project = sourceInfo.Project } // Local copy source fields req.Source.Type = "copy" req.Source.Source = fmt.Sprintf("%s/%s", cName, sName) // Copy the instance op, err := r.CreateInstance(req) if err != nil { return nil, err } rop := remoteOperation{ targetOp: op, chDone: make(chan bool), } // Forward targetOp to remote op go func() { rop.err = rop.targetOp.Wait() close(rop.chDone) }() return &rop, nil } // If deadling with migration, we need to set the type. if source.HasExtension("virtual-machines") { inst, _, err := source.GetInstance(instanceName) if err != nil { return nil, err } req.Type = api.InstanceType(inst.Type) } // Source request sourceReq := api.InstanceSnapshotPost{ Migration: true, Name: args.Name, } if snapshot.Stateful && args.Live { sourceReq.Live = args.Live } // Push mode migration if args != nil && args.Mode == "push" { // Get target server connection information info, err := r.GetConnectionInfo() if err != nil { return nil, err } // Create the instance req.Source.Type = "migration" req.Source.Mode = "push" op, err := r.CreateInstance(req) if err != nil { return nil, err } opAPI := op.Get() targetSecrets := map[string]string{} for k, v := range opAPI.Metadata { targetSecrets[k] = v.(string) } // Prepare the source request target := api.InstancePostTarget{} target.Operation = opAPI.ID target.Websockets = targetSecrets target.Certificate = info.Certificate sourceReq.Target = &target return r.tryMigrateInstanceSnapshot(source, cName, sName, sourceReq, info.Addresses) } // Get source server connection information info, err := source.GetConnectionInfo() if err != nil { return nil, err } op, err := source.MigrateInstanceSnapshot(cName, sName, sourceReq) if err != nil { return nil, err } opAPI := op.Get() sourceSecrets := map[string]string{} for k, v := range opAPI.Metadata { sourceSecrets[k] = v.(string) } // Relay mode migration if args != nil && args.Mode == "relay" { // Push copy source fields req.Source.Type = "migration" req.Source.Mode = "push" // Start the process targetOp, err := r.CreateInstance(req) if err != nil { return nil, err } targetOpAPI := targetOp.Get() // Extract the websockets targetSecrets := map[string]string{} for k, v := range targetOpAPI.Metadata { targetSecrets[k] = v.(string) } // Launch the relay err = r.proxyMigration(targetOp.(*operation), targetSecrets, source, op.(*operation), sourceSecrets) if err != nil { return nil, err } // Prepare a tracking operation rop := remoteOperation{ targetOp: targetOp, chDone: make(chan bool), } // Forward targetOp to remote op go func() { rop.err = rop.targetOp.Wait() close(rop.chDone) }() return &rop, nil } // Pull mode migration req.Source.Type = "migration" req.Source.Mode = "pull" req.Source.Operation = opAPI.ID req.Source.Websockets = sourceSecrets req.Source.Certificate = info.Certificate return r.tryCreateInstance(req, info.Addresses, op) } // RenameInstanceSnapshot requests that Incus renames the snapshot. func (r *ProtocolIncus) RenameInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPost) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Quick check. if instance.Migration { return nil, fmt.Errorf("Can't ask for a migration through RenameInstanceSnapshot") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/snapshots/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), instance, "") if err != nil { return nil, err } return op, nil } func (r *ProtocolIncus) tryMigrateInstanceSnapshot(source InstanceServer, instanceName string, name string, req api.InstanceSnapshotPost, urls []string) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The target server isn't listening on the network") } rop := remoteOperation{ chDone: make(chan bool), } operation := req.Target.Operation // Forward targetOp to remote op go func() { success := false var errors []remoteOperationResult for _, serverURL := range urls { req.Target.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) op, err := source.MigrateInstanceSnapshot(instanceName, name, req) if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) continue } rop.targetOp = op for _, handler := range rop.handlers { _, _ = rop.targetOp.AddHandler(handler) } err = rop.targetOp.Wait() if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) if localtls.IsConnectionError(err) { continue } break } success = true break } if !success { rop.err = remoteOperationError("Failed instance migration", errors) } close(rop.chDone) }() return &rop, nil } // MigrateInstanceSnapshot requests that Incus prepares for a snapshot migration. func (r *ProtocolIncus) MigrateInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPost) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Quick check. if !instance.Migration { return nil, fmt.Errorf("Can't ask for a rename through MigrateInstanceSnapshot") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/snapshots/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), instance, "") if err != nil { return nil, err } return op, nil } // DeleteInstanceSnapshot requests that Incus deletes the instance snapshot. func (r *ProtocolIncus) DeleteInstanceSnapshot(instanceName string, name string) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Send the request op, _, err := r.queryOperation("DELETE", fmt.Sprintf("%s/%s/snapshots/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), nil, "") if err != nil { return nil, err } return op, nil } // UpdateInstanceSnapshot requests that Incus updates the instance snapshot. func (r *ProtocolIncus) UpdateInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPut, ETag string) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if !r.HasExtension("snapshot_expiry") { return nil, fmt.Errorf("The server is missing the required \"snapshot_expiry\" API extension") } // Send the request op, _, err := r.queryOperation("PUT", fmt.Sprintf("%s/%s/snapshots/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), instance, ETag) if err != nil { return nil, err } return op, nil } // GetInstanceState returns a InstanceState entry for the provided instance name. func (r *ProtocolIncus) GetInstanceState(name string) (*api.InstanceState, string, error) { var uri string if r.IsAgent() { uri = "/state" } else { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, "", err } uri = fmt.Sprintf("%s/%s/state", path, url.PathEscape(name)) } state := api.InstanceState{} // Fetch the raw value etag, err := r.queryStruct("GET", uri, nil, "", &state) if err != nil { return nil, "", err } return &state, etag, nil } // UpdateInstanceState updates the instance to match the requested state. func (r *ProtocolIncus) UpdateInstanceState(name string, state api.InstanceStatePut, ETag string) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Send the request op, _, err := r.queryOperation("PUT", fmt.Sprintf("%s/%s/state", path, url.PathEscape(name)), state, ETag) if err != nil { return nil, err } return op, nil } // GetInstanceAccess returns an Access entry for the provided instance name. func (r *ProtocolIncus) GetInstanceAccess(name string) (api.Access, error) { access := api.Access{} if !r.HasExtension("instance_access") { return nil, fmt.Errorf("The server is missing the required \"instance_access\" API extension") } // Fetch the raw value _, err := r.queryStruct("GET", fmt.Sprintf("/instances/%s/access", url.PathEscape(name)), nil, "", &access) if err != nil { return nil, err } return access, nil } // GetInstanceLogfiles returns a list of logfiles for the instance. func (r *ProtocolIncus) GetInstanceLogfiles(name string) ([]string, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Fetch the raw URL values. urls := []string{} baseURL := fmt.Sprintf("%s/%s/logs", path, url.PathEscape(name)) _, err = r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetInstanceLogfile returns the content of the requested logfile. // // Note that it's the caller's responsibility to close the returned ReadCloser. func (r *ProtocolIncus) GetInstanceLogfile(name string, filename string) (io.ReadCloser, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Prepare the HTTP request url := fmt.Sprintf("%s/1.0%s/%s/logs/%s", r.httpBaseURL.String(), path, url.PathEscape(name), url.PathEscape(filename)) url, err = r.setQueryAttributes(url) if err != nil { return nil, err } req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } // Send the request resp, err := r.DoHTTP(req) if err != nil { return nil, err } // Check the return value for a cleaner error if resp.StatusCode != http.StatusOK { _, _, err := incusParseResponse(resp) if err != nil { return nil, err } } return resp.Body, err } // DeleteInstanceLogfile deletes the requested logfile. func (r *ProtocolIncus) DeleteInstanceLogfile(name string, filename string) error { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return err } // Send the request _, _, err = r.query("DELETE", fmt.Sprintf("%s/%s/logs/%s", path, url.PathEscape(name), url.PathEscape(filename)), nil, "") if err != nil { return err } return nil } // getInstanceExecOutputLogFile returns the content of the requested exec logfile. // // Note that it's the caller's responsibility to close the returned ReadCloser. func (r *ProtocolIncus) getInstanceExecOutputLogFile(name string, filename string) (io.ReadCloser, error) { err := r.CheckExtension("container_exec_recording") if err != nil { return nil, err } path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Prepare the HTTP request url := fmt.Sprintf("%s/1.0%s/%s/logs/exec-output/%s", r.httpBaseURL.String(), path, url.PathEscape(name), url.PathEscape(filename)) url, err = r.setQueryAttributes(url) if err != nil { return nil, err } req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } // Send the request resp, err := r.DoHTTP(req) if err != nil { return nil, err } // Check the return value for a cleaner error if resp.StatusCode != http.StatusOK { _, _, err := incusParseResponse(resp) if err != nil { return nil, err } } return resp.Body, nil } // deleteInstanceExecOutputLogFiles deletes the requested exec logfile. func (r *ProtocolIncus) deleteInstanceExecOutputLogFile(instanceName string, filename string) error { err := r.CheckExtension("container_exec_recording") if err != nil { return err } path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return err } // Send the request _, _, err = r.query("DELETE", fmt.Sprintf("%s/%s/logs/exec-output/%s", path, url.PathEscape(instanceName), url.PathEscape(filename)), nil, "") if err != nil { return err } return nil } // GetInstanceMetadata returns instance metadata. func (r *ProtocolIncus) GetInstanceMetadata(name string) (*api.ImageMetadata, string, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, "", err } if !r.HasExtension("container_edit_metadata") { return nil, "", fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") } metadata := api.ImageMetadata{} url := fmt.Sprintf("%s/%s/metadata", path, url.PathEscape(name)) etag, err := r.queryStruct("GET", url, nil, "", &metadata) if err != nil { return nil, "", err } return &metadata, etag, err } // UpdateInstanceMetadata sets the content of the instance metadata file. func (r *ProtocolIncus) UpdateInstanceMetadata(name string, metadata api.ImageMetadata, ETag string) error { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return err } if !r.HasExtension("container_edit_metadata") { return fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") } url := fmt.Sprintf("%s/%s/metadata", path, url.PathEscape(name)) _, _, err = r.query("PUT", url, metadata, ETag) if err != nil { return err } return nil } // GetInstanceTemplateFiles returns the list of names of template files for a instance. func (r *ProtocolIncus) GetInstanceTemplateFiles(instanceName string) ([]string, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if !r.HasExtension("container_edit_metadata") { return nil, fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") } templates := []string{} url := fmt.Sprintf("%s/%s/metadata/templates", path, url.PathEscape(instanceName)) _, err = r.queryStruct("GET", url, nil, "", &templates) if err != nil { return nil, err } return templates, nil } // GetInstanceTemplateFile returns the content of a template file for a instance. func (r *ProtocolIncus) GetInstanceTemplateFile(instanceName string, templateName string) (io.ReadCloser, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if !r.HasExtension("container_edit_metadata") { return nil, fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") } url := fmt.Sprintf("%s/1.0%s/%s/metadata/templates?path=%s", r.httpBaseURL.String(), path, url.PathEscape(instanceName), url.QueryEscape(templateName)) url, err = r.setQueryAttributes(url) if err != nil { return nil, err } req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } // Send the request resp, err := r.DoHTTP(req) if err != nil { return nil, err } // Check the return value for a cleaner error if resp.StatusCode != http.StatusOK { _, _, err := incusParseResponse(resp) if err != nil { return nil, err } } return resp.Body, err } // CreateInstanceTemplateFile creates an a template for a instance. func (r *ProtocolIncus) CreateInstanceTemplateFile(instanceName string, templateName string, content io.ReadSeeker) error { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return err } if !r.HasExtension("container_edit_metadata") { return fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") } url := fmt.Sprintf("%s/1.0%s/%s/metadata/templates?path=%s", r.httpBaseURL.String(), path, url.PathEscape(instanceName), url.QueryEscape(templateName)) url, err = r.setQueryAttributes(url) if err != nil { return err } req, err := http.NewRequest("POST", url, content) if err != nil { return err } req.GetBody = func() (io.ReadCloser, error) { _, err := content.Seek(0, 0) if err != nil { return nil, err } return io.NopCloser(content), nil } req.Header.Set("Content-Type", "application/octet-stream") // Send the request resp, err := r.DoHTTP(req) // Check the return value for a cleaner error if resp.StatusCode != http.StatusOK { _, _, err := incusParseResponse(resp) if err != nil { return err } } return err } // DeleteInstanceTemplateFile deletes a template file for a instance. func (r *ProtocolIncus) DeleteInstanceTemplateFile(name string, templateName string) error { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return err } if !r.HasExtension("container_edit_metadata") { return fmt.Errorf("The server is missing the required \"container_edit_metadata\" API extension") } _, _, err = r.query("DELETE", fmt.Sprintf("%s/%s/metadata/templates?path=%s", path, url.PathEscape(name), url.QueryEscape(templateName)), nil, "") return err } // ConsoleInstance requests that Incus attaches to the console device of a instance. func (r *ProtocolIncus) ConsoleInstance(instanceName string, console api.InstanceConsolePost, args *InstanceConsoleArgs) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if !r.HasExtension("console") { return nil, fmt.Errorf("The server is missing the required \"console\" API extension") } if console.Type == "" { console.Type = "console" } if console.Type == "vga" && !r.HasExtension("console_vga_type") { return nil, fmt.Errorf("The server is missing the required \"console_vga_type\" API extension") } if console.Force && !r.HasExtension("console_force") { return nil, fmt.Errorf(`The server is missing the required "console_force" API extension`) } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/console", path, url.PathEscape(instanceName)), console, "") if err != nil { return nil, err } opAPI := op.Get() if args == nil || args.Terminal == nil { return nil, fmt.Errorf("A terminal must be set") } if args.Control == nil { return nil, fmt.Errorf("A control channel must be set") } // Parse the fds fds := map[string]string{} value, ok := opAPI.Metadata["fds"] if ok { values := value.(map[string]any) for k, v := range values { fds[k] = v.(string) } } var controlConn *websocket.Conn // Call the control handler with a connection to the control socket if fds[api.SecretNameControl] == "" { return nil, fmt.Errorf("Did not receive a file descriptor for the control channel") } controlConn, err = r.GetOperationWebsocket(opAPI.ID, fds[api.SecretNameControl]) if err != nil { return nil, err } go args.Control(controlConn) // Connect to the websocket conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) if err != nil { return nil, err } // Detach from console. go func(consoleDisconnect <-chan bool) { <-consoleDisconnect msg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "Detaching from console") // We don't care if this fails. This is just for convenience. _ = controlConn.WriteMessage(websocket.CloseMessage, msg) _ = controlConn.Close() }(args.ConsoleDisconnect) // And attach stdin and stdout to it go func() { _, writeDone := ws.Mirror(conn, args.Terminal) <-writeDone _ = conn.Close() }() return op, nil } // ConsoleInstanceDynamic requests that Incus attaches to the console device of a // instance with the possibility of opening multiple connections to it. // // Every time the returned 'console' function is called, a new connection will // be established and proxied to the given io.ReadWriteCloser. func (r *ProtocolIncus) ConsoleInstanceDynamic(instanceName string, console api.InstanceConsolePost, args *InstanceConsoleArgs) (Operation, func(io.ReadWriteCloser) error, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, nil, err } if !r.HasExtension("console") { return nil, nil, fmt.Errorf("The server is missing the required \"console\" API extension") } if console.Type == "" { console.Type = "console" } if console.Type == "vga" && !r.HasExtension("console_vga_type") { return nil, nil, fmt.Errorf("The server is missing the required \"console_vga_type\" API extension") } if console.Force && !r.HasExtension("console_force") { return nil, nil, fmt.Errorf(`The server is missing the required "console_force" API extension`) } // Send the request. op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/console", path, url.PathEscape(instanceName)), console, "") if err != nil { return nil, nil, err } opAPI := op.Get() if args == nil { return nil, nil, fmt.Errorf("No arguments provided") } if args.Control == nil { return nil, nil, fmt.Errorf("A control channel must be set") } // Parse the fds. fds := map[string]string{} value, ok := opAPI.Metadata["fds"] if ok { values := value.(map[string]any) for k, v := range values { fds[k] = v.(string) } } // Call the control handler with a connection to the control socket. if fds[api.SecretNameControl] == "" { return nil, nil, fmt.Errorf("Did not receive a file descriptor for the control channel") } controlConn, err := r.GetOperationWebsocket(opAPI.ID, fds[api.SecretNameControl]) if err != nil { return nil, nil, err } go args.Control(controlConn) // Handle main disconnect. go func(consoleDisconnect <-chan bool) { <-consoleDisconnect msg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "Detaching from console") // We don't care if this fails. This is just for convenience. _ = controlConn.WriteMessage(websocket.CloseMessage, msg) _ = controlConn.Close() }(args.ConsoleDisconnect) f := func(rwc io.ReadWriteCloser) error { // Connect to the websocket. conn, err := r.GetOperationWebsocket(opAPI.ID, fds["0"]) if err != nil { return err } // Attach reader/writer. _, writeDone := ws.Mirror(conn, rwc) <-writeDone _ = conn.Close() return nil } return op, f, nil } // GetInstanceConsoleLog requests that Incus attaches to the console device of a instance. // // Note that it's the caller's responsibility to close the returned ReadCloser. func (r *ProtocolIncus) GetInstanceConsoleLog(instanceName string, args *InstanceConsoleLogArgs) (io.ReadCloser, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if !r.HasExtension("console") { return nil, fmt.Errorf("The server is missing the required \"console\" API extension") } // Prepare the HTTP request url := fmt.Sprintf("%s/1.0%s/%s/console", r.httpBaseURL.String(), path, url.PathEscape(instanceName)) url, err = r.setQueryAttributes(url) if err != nil { return nil, err } req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } // Send the request resp, err := r.DoHTTP(req) if err != nil { return nil, err } // Check the return value for a cleaner error if resp.StatusCode != http.StatusOK { _, _, err := incusParseResponse(resp) if err != nil { return nil, err } } return resp.Body, err } // DeleteInstanceConsoleLog deletes the requested instance's console log. func (r *ProtocolIncus) DeleteInstanceConsoleLog(instanceName string, args *InstanceConsoleLogArgs) error { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return err } if !r.HasExtension("console") { return fmt.Errorf("The server is missing the required \"console\" API extension") } // Send the request _, _, err = r.query("DELETE", fmt.Sprintf("%s/%s/console", path, url.PathEscape(instanceName)), nil, "") if err != nil { return err } return nil } // GetInstanceBackupNames returns a list of backup names for the instance. func (r *ProtocolIncus) GetInstanceBackupNames(instanceName string) ([]string, error) { if !r.HasExtension("container_backup") { return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") } path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } // Fetch the raw URL values. urls := []string{} baseURL := fmt.Sprintf("%s/%s/backups", path, url.PathEscape(instanceName)) _, err = r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetInstanceBackups returns a list of backups for the instance. func (r *ProtocolIncus) GetInstanceBackups(instanceName string) ([]api.InstanceBackup, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if !r.HasExtension("container_backup") { return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") } // Fetch the raw value backups := []api.InstanceBackup{} _, err = r.queryStruct("GET", fmt.Sprintf("%s/%s/backups?recursion=1", path, url.PathEscape(instanceName)), nil, "", &backups) if err != nil { return nil, err } return backups, nil } // GetInstanceBackup returns a Backup struct for the provided instance and backup names. func (r *ProtocolIncus) GetInstanceBackup(instanceName string, name string) (*api.InstanceBackup, string, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, "", err } if !r.HasExtension("container_backup") { return nil, "", fmt.Errorf("The server is missing the required \"container_backup\" API extension") } // Fetch the raw value backup := api.InstanceBackup{} etag, err := r.queryStruct("GET", fmt.Sprintf("%s/%s/backups/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), nil, "", &backup) if err != nil { return nil, "", err } return &backup, etag, nil } // CreateInstanceBackup requests that Incus creates a new backup for the instance. func (r *ProtocolIncus) CreateInstanceBackup(instanceName string, backup api.InstanceBackupsPost) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if !r.HasExtension("container_backup") { return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/backups", path, url.PathEscape(instanceName)), backup, "") if err != nil { return nil, err } return op, nil } // RenameInstanceBackup requests that Incus renames the backup. func (r *ProtocolIncus) RenameInstanceBackup(instanceName string, name string, backup api.InstanceBackupPost) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if !r.HasExtension("container_backup") { return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("%s/%s/backups/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), backup, "") if err != nil { return nil, err } return op, nil } // DeleteInstanceBackup requests that Incus deletes the instance backup. func (r *ProtocolIncus) DeleteInstanceBackup(instanceName string, name string) (Operation, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if !r.HasExtension("container_backup") { return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") } // Send the request op, _, err := r.queryOperation("DELETE", fmt.Sprintf("%s/%s/backups/%s", path, url.PathEscape(instanceName), url.PathEscape(name)), nil, "") if err != nil { return nil, err } return op, nil } // GetInstanceBackupFile requests the instance backup content. func (r *ProtocolIncus) GetInstanceBackupFile(instanceName string, name string, req *BackupFileRequest) (*BackupFileResponse, error) { path, _, err := r.instanceTypeToPath(api.InstanceTypeAny) if err != nil { return nil, err } if !r.HasExtension("container_backup") { return nil, fmt.Errorf("The server is missing the required \"container_backup\" API extension") } // Build the URL uri := fmt.Sprintf("%s/1.0%s/%s/backups/%s/export", r.httpBaseURL.String(), path, url.PathEscape(instanceName), url.PathEscape(name)) if r.project != "" { uri += fmt.Sprintf("?project=%s", url.QueryEscape(r.project)) } // Prepare the download request request, err := http.NewRequest("GET", uri, nil) if err != nil { return nil, err } if r.httpUserAgent != "" { request.Header.Set("User-Agent", r.httpUserAgent) } // Start the request response, doneCh, err := cancel.CancelableDownload(req.Canceler, r.DoHTTP, request) if err != nil { return nil, err } defer func() { _ = response.Body.Close() }() defer close(doneCh) if response.StatusCode != http.StatusOK { _, _, err := incusParseResponse(response) if err != nil { return nil, err } } // Handle the data body := response.Body if req.ProgressHandler != nil { body = &ioprogress.ProgressReader{ ReadCloser: response.Body, Tracker: &ioprogress.ProgressTracker{ Length: response.ContentLength, Handler: func(percent int64, speed int64) { req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))}) }, }, } } size, err := io.Copy(req.BackupFile, body) if err != nil { return nil, err } resp := BackupFileResponse{} resp.Size = size return &resp, nil } func (r *ProtocolIncus) proxyMigration(targetOp *operation, targetSecrets map[string]string, source InstanceServer, sourceOp *operation, sourceSecrets map[string]string) error { // Quick checks. for n := range targetSecrets { _, ok := sourceSecrets[n] if !ok { return fmt.Errorf("Migration target expects the \"%s\" socket but source isn't providing it", n) } } if targetSecrets[api.SecretNameControl] == "" { return fmt.Errorf("Migration target didn't setup the required \"control\" socket") } // Struct used to hold everything together type proxy struct { done chan struct{} sourceConn *websocket.Conn targetConn *websocket.Conn } proxies := map[string]*proxy{} // Connect the control socket sourceConn, err := source.GetOperationWebsocket(sourceOp.ID, sourceSecrets[api.SecretNameControl]) if err != nil { return err } targetConn, err := r.GetOperationWebsocket(targetOp.ID, targetSecrets[api.SecretNameControl]) if err != nil { return err } proxies[api.SecretNameControl] = &proxy{ done: ws.Proxy(sourceConn, targetConn), sourceConn: sourceConn, targetConn: targetConn, } // Connect the data sockets for name := range sourceSecrets { if name == api.SecretNameControl { continue } // Handle resets (used for multiple objects) sourceConn, err := source.GetOperationWebsocket(sourceOp.ID, sourceSecrets[name]) if err != nil { break } targetConn, err := r.GetOperationWebsocket(targetOp.ID, targetSecrets[name]) if err != nil { break } proxies[name] = &proxy{ sourceConn: sourceConn, targetConn: targetConn, done: ws.Proxy(sourceConn, targetConn), } } // Cleanup once everything is done go func() { // Wait for control socket <-proxies[api.SecretNameControl].done _ = proxies[api.SecretNameControl].sourceConn.Close() _ = proxies[api.SecretNameControl].targetConn.Close() // Then deal with the others for name, proxy := range proxies { if name == api.SecretNameControl { continue } <-proxy.done _ = proxy.sourceConn.Close() _ = proxy.targetConn.Close() } }() return nil } // GetInstanceDebugMemory retrieves memory debug information for a given instance and saves it to the specified file path. func (r *ProtocolIncus) GetInstanceDebugMemory(name string, format string) (io.ReadCloser, error) { path, v, err := r.instanceTypeToPath(api.InstanceTypeVM) if err != nil { return nil, err } v.Set("format", format) // Prepare the HTTP request requestURL := fmt.Sprintf("%s/1.0%s/%s/debug/memory?%s", r.httpBaseURL.String(), path, url.PathEscape(name), v.Encode()) requestURL, err = r.setQueryAttributes(requestURL) if err != nil { return nil, err } req, err := http.NewRequest("GET", requestURL, nil) if err != nil { return nil, err } // Send the request resp, err := r.DoHTTP(req) if err != nil { return nil, err } // Check the return value for a cleaner error if resp.StatusCode != http.StatusOK { _, _, err := incusParseResponse(resp) if err != nil { return nil, err } } return resp.Body, nil } incus-6.0.4/client/incus_metadata.go000066400000000000000000000011431477363751000174440ustar00rootroot00000000000000package incus import ( "fmt" "github.com/lxc/incus/v6/shared/api" ) // GetMetadataConfiguration returns a configuration metadata struct. func (r *ProtocolIncus) GetMetadataConfiguration() (*api.MetadataConfiguration, error) { metadataConfiguration := api.MetadataConfiguration{} if !r.HasExtension("metadata_configuration") { return nil, fmt.Errorf("The server is missing the required \"metadata_configuration\" API extension") } _, err := r.queryStruct("GET", "/metadata/configuration", nil, "", &metadataConfiguration) if err != nil { return nil, err } return &metadataConfiguration, nil } incus-6.0.4/client/incus_network_acls.go000066400000000000000000000111661477363751000203650ustar00rootroot00000000000000package incus import ( "fmt" "io" "net/http" "net/url" "github.com/lxc/incus/v6/shared/api" ) // GetNetworkACLNames returns a list of network ACL names. func (r *ProtocolIncus) GetNetworkACLNames() ([]string, error) { if !r.HasExtension("network_acl") { return nil, fmt.Errorf(`The server is missing the required "network_acl" API extension`) } // Fetch the raw URL values. urls := []string{} baseURL := "/network-acls" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetNetworkACLs returns a list of Network ACL structs. func (r *ProtocolIncus) GetNetworkACLs() ([]api.NetworkACL, error) { if !r.HasExtension("network_acl") { return nil, fmt.Errorf(`The server is missing the required "network_acl" API extension`) } acls := []api.NetworkACL{} // Fetch the raw value. _, err := r.queryStruct("GET", "/network-acls?recursion=1", nil, "", &acls) if err != nil { return nil, err } return acls, nil } // GetNetworkACLsAllProjects returns all list of Network ACL structs across all projects. func (r *ProtocolIncus) GetNetworkACLsAllProjects() ([]api.NetworkACL, error) { if !r.HasExtension("network_acls_all_projects") { return nil, fmt.Errorf(`The server is missing the required "network_acls_all_projects" API extension`) } acls := []api.NetworkACL{} _, err := r.queryStruct("GET", "/network-acls?recursion=1&all-projects=true", nil, "", &acls) if err != nil { return nil, err } return acls, nil } // GetNetworkACL returns a Network ACL entry for the provided name. func (r *ProtocolIncus) GetNetworkACL(name string) (*api.NetworkACL, string, error) { if !r.HasExtension("network_acl") { return nil, "", fmt.Errorf(`The server is missing the required "network_acl" API extension`) } acl := api.NetworkACL{} // Fetch the raw value. etag, err := r.queryStruct("GET", fmt.Sprintf("/network-acls/%s", url.PathEscape(name)), nil, "", &acl) if err != nil { return nil, "", err } return &acl, etag, nil } // GetNetworkACLLogfile returns a reader for the ACL log file. // // Note that it's the caller's responsibility to close the returned ReadCloser. func (r *ProtocolIncus) GetNetworkACLLogfile(name string) (io.ReadCloser, error) { if !r.HasExtension("network_acl_log") { return nil, fmt.Errorf(`The server is missing the required "network_acl_log" API extension`) } // Prepare the HTTP request url := fmt.Sprintf("%s/1.0/network-acls/%s/log", r.httpBaseURL.String(), url.PathEscape(name)) url, err := r.setQueryAttributes(url) if err != nil { return nil, err } req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } // Send the request resp, err := r.DoHTTP(req) if err != nil { return nil, err } // Check the return value for a cleaner error if resp.StatusCode != http.StatusOK { _, _, err := incusParseResponse(resp) if err != nil { return nil, err } } return resp.Body, err } // CreateNetworkACL defines a new network ACL using the provided struct. func (r *ProtocolIncus) CreateNetworkACL(acl api.NetworkACLsPost) error { if !r.HasExtension("network_acl") { return fmt.Errorf(`The server is missing the required "network_acl" API extension`) } // Send the request. _, _, err := r.query("POST", "/network-acls", acl, "") if err != nil { return err } return nil } // UpdateNetworkACL updates the network ACL to match the provided struct. func (r *ProtocolIncus) UpdateNetworkACL(name string, acl api.NetworkACLPut, ETag string) error { if !r.HasExtension("network_acl") { return fmt.Errorf(`The server is missing the required "network_acl" API extension`) } // Send the request. _, _, err := r.query("PUT", fmt.Sprintf("/network-acls/%s", url.PathEscape(name)), acl, ETag) if err != nil { return err } return nil } // RenameNetworkACL renames an existing network ACL entry. func (r *ProtocolIncus) RenameNetworkACL(name string, acl api.NetworkACLPost) error { if !r.HasExtension("network_acl") { return fmt.Errorf(`The server is missing the required "network_acl" API extension`) } // Send the request. _, _, err := r.query("POST", fmt.Sprintf("/network-acls/%s", url.PathEscape(name)), acl, "") if err != nil { return err } return nil } // DeleteNetworkACL deletes an existing network ACL. func (r *ProtocolIncus) DeleteNetworkACL(name string) error { if !r.HasExtension("network_acl") { return fmt.Errorf(`The server is missing the required "network_acl" API extension`) } // Send the request. _, _, err := r.query("DELETE", fmt.Sprintf("/network-acls/%s", url.PathEscape(name)), nil, "") if err != nil { return err } return nil } incus-6.0.4/client/incus_network_allocations.go000066400000000000000000000020451477363751000217470ustar00rootroot00000000000000package incus import ( "github.com/lxc/incus/v6/shared/api" ) // GetNetworkAllocations returns a list of Network allocations for a specific project. func (r *ProtocolIncus) GetNetworkAllocations() ([]api.NetworkAllocations, error) { err := r.CheckExtension("network_allocations") if err != nil { return nil, err } // Fetch the raw value. netAllocations := []api.NetworkAllocations{} _, err = r.queryStruct("GET", "/network-allocations", nil, "", &netAllocations) if err != nil { return nil, err } return netAllocations, nil } // GetNetworkAllocationsAllProjects returns a list of Network allocations across all projects. func (r *ProtocolIncus) GetNetworkAllocationsAllProjects() ([]api.NetworkAllocations, error) { err := r.CheckExtension("network_allocations") if err != nil { return nil, err } // Fetch the raw value. netAllocations := []api.NetworkAllocations{} _, err = r.queryStruct("GET", "/network-allocations?all-projects=true", nil, "", &netAllocations) if err != nil { return nil, err } return netAllocations, nil } incus-6.0.4/client/incus_network_forwards.go000066400000000000000000000066201477363751000212710ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/lxc/incus/v6/shared/api" ) // GetNetworkForwardAddresses returns a list of network forward listen addresses. func (r *ProtocolIncus) GetNetworkForwardAddresses(networkName string) ([]string, error) { if !r.HasExtension("network_forward") { return nil, fmt.Errorf(`The server is missing the required "network_forward" API extension`) } // Fetch the raw URL values. urls := []string{} baseURL := fmt.Sprintf("/networks/%s/forwards", url.PathEscape(networkName)) _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetNetworkForwards returns a list of Network forward structs. func (r *ProtocolIncus) GetNetworkForwards(networkName string) ([]api.NetworkForward, error) { if !r.HasExtension("network_forward") { return nil, fmt.Errorf(`The server is missing the required "network_forward" API extension`) } forwards := []api.NetworkForward{} // Fetch the raw value. _, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/forwards?recursion=1", url.PathEscape(networkName)), nil, "", &forwards) if err != nil { return nil, err } return forwards, nil } // GetNetworkForward returns a Network forward entry for the provided network and listen address. func (r *ProtocolIncus) GetNetworkForward(networkName string, listenAddress string) (*api.NetworkForward, string, error) { if !r.HasExtension("network_forward") { return nil, "", fmt.Errorf(`The server is missing the required "network_forward" API extension`) } forward := api.NetworkForward{} // Fetch the raw value. etag, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/forwards/%s", url.PathEscape(networkName), url.PathEscape(listenAddress)), nil, "", &forward) if err != nil { return nil, "", err } return &forward, etag, nil } // CreateNetworkForward defines a new network forward using the provided struct. func (r *ProtocolIncus) CreateNetworkForward(networkName string, forward api.NetworkForwardsPost) error { if !r.HasExtension("network_forward") { return fmt.Errorf(`The server is missing the required "network_forward" API extension`) } // Send the request. _, _, err := r.query("POST", fmt.Sprintf("/networks/%s/forwards", url.PathEscape(networkName)), forward, "") if err != nil { return err } return nil } // UpdateNetworkForward updates the network forward to match the provided struct. func (r *ProtocolIncus) UpdateNetworkForward(networkName string, listenAddress string, forward api.NetworkForwardPut, ETag string) error { if !r.HasExtension("network_forward") { return fmt.Errorf(`The server is missing the required "network_forward" API extension`) } // Send the request. _, _, err := r.query("PUT", fmt.Sprintf("/networks/%s/forwards/%s", url.PathEscape(networkName), url.PathEscape(listenAddress)), forward, ETag) if err != nil { return err } return nil } // DeleteNetworkForward deletes an existing network forward. func (r *ProtocolIncus) DeleteNetworkForward(networkName string, listenAddress string) error { if !r.HasExtension("network_forward") { return fmt.Errorf(`The server is missing the required "network_forward" API extension`) } // Send the request. _, _, err := r.query("DELETE", fmt.Sprintf("/networks/%s/forwards/%s", url.PathEscape(networkName), url.PathEscape(listenAddress)), nil, "") if err != nil { return err } return nil } incus-6.0.4/client/incus_network_integrations.go000066400000000000000000000074201477363751000221470ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/lxc/incus/v6/shared/api" ) // GetNetworkIntegrationNames returns a list of network integration names. func (r *ProtocolIncus) GetNetworkIntegrationNames() ([]string, error) { if !r.HasExtension("network_integrations") { return nil, fmt.Errorf(`The server is missing the required "network_integrations" API extension`) } // Fetch the raw URL values. urls := []string{} baseURL := "/network-integrations" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetNetworkIntegrations returns a list of network integration structs. func (r *ProtocolIncus) GetNetworkIntegrations() ([]api.NetworkIntegration, error) { if !r.HasExtension("network_integrations") { return nil, fmt.Errorf(`The server is missing the required "network_integrations" API extension`) } integrations := []api.NetworkIntegration{} // Fetch the raw value. _, err := r.queryStruct("GET", "/network-integrations?recursion=1", nil, "", &integrations) if err != nil { return nil, err } return integrations, nil } // GetNetworkIntegration returns a network integration entry. func (r *ProtocolIncus) GetNetworkIntegration(name string) (*api.NetworkIntegration, string, error) { if !r.HasExtension("network_integrations") { return nil, "", fmt.Errorf(`The server is missing the required "network_integrations" API extension`) } integration := api.NetworkIntegration{} // Fetch the raw value. etag, err := r.queryStruct("GET", fmt.Sprintf("/network-integrations/%s", url.PathEscape(name)), nil, "", &integration) if err != nil { return nil, "", err } return &integration, etag, nil } // CreateNetworkIntegration defines a new network integration using the provided struct. // Returns true if the integration connection has been mutually created. Returns false if integrationing has been only initiated. func (r *ProtocolIncus) CreateNetworkIntegration(integration api.NetworkIntegrationsPost) error { if !r.HasExtension("network_integrations") { return fmt.Errorf(`The server is missing the required "network_integrations" API extension`) } // Send the request. _, _, err := r.query("POST", "/network-integrations", integration, "") if err != nil { return err } return nil } // UpdateNetworkIntegration updates the network integration to match the provided struct. func (r *ProtocolIncus) UpdateNetworkIntegration(name string, integration api.NetworkIntegrationPut, ETag string) error { if !r.HasExtension("network_integrations") { return fmt.Errorf(`The server is missing the required "network_integrations" API extension`) } // Send the request. _, _, err := r.query("PUT", fmt.Sprintf("/network-integrations/%s", url.PathEscape(name)), integration, ETag) if err != nil { return err } return nil } // RenameNetworkIntegration renames an existing network integration entry. func (r *ProtocolIncus) RenameNetworkIntegration(name string, network api.NetworkIntegrationPost) error { if !r.HasExtension("network_integrations") { return fmt.Errorf("The server is missing the required \"network_integrations\" API extension") } // Send the request _, _, err := r.query("POST", fmt.Sprintf("/network-integrations/%s", url.PathEscape(name)), network, "") if err != nil { return err } return nil } // DeleteNetworkIntegration deletes an existing network integration. func (r *ProtocolIncus) DeleteNetworkIntegration(name string) error { if !r.HasExtension("network_integrations") { return fmt.Errorf(`The server is missing the required "network_integrations" API extension`) } // Send the request. _, _, err := r.query("DELETE", fmt.Sprintf("/network-integrations/%s", url.PathEscape(name)), nil, "") if err != nil { return err } return nil } incus-6.0.4/client/incus_network_load_balancers.go000066400000000000000000000075721477363751000224020ustar00rootroot00000000000000package incus import ( "github.com/lxc/incus/v6/shared/api" ) // GetNetworkLoadBalancerAddresses returns a list of network load balancer listen addresses. func (r *ProtocolIncus) GetNetworkLoadBalancerAddresses(networkName string) ([]string, error) { err := r.CheckExtension("network_load_balancer") if err != nil { return nil, err } // Fetch the raw URL values. urls := []string{} u := api.NewURL().Path("networks", networkName, "load-balancers") _, err = r.queryStruct("GET", u.String(), nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(u.String(), urls...) } // GetNetworkLoadBalancers returns a list of Network load balancer structs. func (r *ProtocolIncus) GetNetworkLoadBalancers(networkName string) ([]api.NetworkLoadBalancer, error) { err := r.CheckExtension("network_load_balancer") if err != nil { return nil, err } loadBalancers := []api.NetworkLoadBalancer{} // Fetch the raw value. u := api.NewURL().Path("networks", networkName, "load-balancers").WithQuery("recursion", "1") _, err = r.queryStruct("GET", u.String(), nil, "", &loadBalancers) if err != nil { return nil, err } return loadBalancers, nil } // GetNetworkLoadBalancer returns a Network load balancer entry for the provided network and listen address. func (r *ProtocolIncus) GetNetworkLoadBalancer(networkName string, listenAddress string) (*api.NetworkLoadBalancer, string, error) { err := r.CheckExtension("network_load_balancer") if err != nil { return nil, "", err } loadBalancer := api.NetworkLoadBalancer{} // Fetch the raw value. u := api.NewURL().Path("networks", networkName, "load-balancers", listenAddress) etag, err := r.queryStruct("GET", u.String(), nil, "", &loadBalancer) if err != nil { return nil, "", err } return &loadBalancer, etag, nil } // CreateNetworkLoadBalancer defines a new network load balancer using the provided struct. func (r *ProtocolIncus) CreateNetworkLoadBalancer(networkName string, loadBalancer api.NetworkLoadBalancersPost) error { err := r.CheckExtension("network_load_balancer") if err != nil { return err } // Send the request. u := api.NewURL().Path("networks", networkName, "load-balancers") _, _, err = r.query("POST", u.String(), loadBalancer, "") if err != nil { return err } return nil } // UpdateNetworkLoadBalancer updates the network load balancer to match the provided struct. func (r *ProtocolIncus) UpdateNetworkLoadBalancer(networkName string, listenAddress string, loadBalancer api.NetworkLoadBalancerPut, ETag string) error { err := r.CheckExtension("network_load_balancer") if err != nil { return err } // Send the request. u := api.NewURL().Path("networks", networkName, "load-balancers", listenAddress) _, _, err = r.query("PUT", u.String(), loadBalancer, ETag) if err != nil { return err } return nil } // DeleteNetworkLoadBalancer deletes an existing network load balancer. func (r *ProtocolIncus) DeleteNetworkLoadBalancer(networkName string, listenAddress string) error { err := r.CheckExtension("network_load_balancer") if err != nil { return err } // Send the request. u := api.NewURL().Path("networks", networkName, "load-balancers", listenAddress) _, _, err = r.query("DELETE", u.String(), nil, "") if err != nil { return err } return nil } // GetNetworkLoadBalancerState returns a Network load balancer state for the provided network and listen address. func (r *ProtocolIncus) GetNetworkLoadBalancerState(networkName string, listenAddress string) (*api.NetworkLoadBalancerState, error) { err := r.CheckExtension("network_load_balancer_state") if err != nil { return nil, err } lbState := api.NetworkLoadBalancerState{} // Fetch the raw value. u := api.NewURL().Path("networks", networkName, "load-balancers", listenAddress, "state") _, err = r.queryStruct("GET", u.String(), nil, "", &lbState) if err != nil { return nil, err } return &lbState, nil } incus-6.0.4/client/incus_network_peers.go000066400000000000000000000067561477363751000205720ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/lxc/incus/v6/shared/api" ) // GetNetworkPeerNames returns a list of network peer names. func (r *ProtocolIncus) GetNetworkPeerNames(networkName string) ([]string, error) { if !r.HasExtension("network_peer") { return nil, fmt.Errorf(`The server is missing the required "network_peer" API extension`) } // Fetch the raw URL values. urls := []string{} baseURL := fmt.Sprintf("/networks/%s/peers", url.PathEscape(networkName)) _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetNetworkPeers returns a list of network peer structs. func (r *ProtocolIncus) GetNetworkPeers(networkName string) ([]api.NetworkPeer, error) { if !r.HasExtension("network_peer") { return nil, fmt.Errorf(`The server is missing the required "network_peer" API extension`) } peers := []api.NetworkPeer{} // Fetch the raw value. _, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/peers?recursion=1", url.PathEscape(networkName)), nil, "", &peers) if err != nil { return nil, err } return peers, nil } // GetNetworkPeer returns a network peer entry for the provided network and peer name. func (r *ProtocolIncus) GetNetworkPeer(networkName string, peerName string) (*api.NetworkPeer, string, error) { if !r.HasExtension("network_peer") { return nil, "", fmt.Errorf(`The server is missing the required "network_peer" API extension`) } peer := api.NetworkPeer{} // Fetch the raw value. etag, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/peers/%s", url.PathEscape(networkName), url.PathEscape(peerName)), nil, "", &peer) if err != nil { return nil, "", err } return &peer, etag, nil } // CreateNetworkPeer defines a new network peer using the provided struct. // Returns true if the peer connection has been mutually created. Returns false if peering has been only initiated. func (r *ProtocolIncus) CreateNetworkPeer(networkName string, peer api.NetworkPeersPost) error { if !r.HasExtension("network_peer") { return fmt.Errorf(`The server is missing the required "network_peer" API extension`) } if peer.Type != "" && peer.Type != "local" && !r.HasExtension("network_integrations") { return fmt.Errorf(`The server is missing the required "network_integrations" API extension`) } // Send the request. _, _, err := r.query("POST", fmt.Sprintf("/networks/%s/peers", url.PathEscape(networkName)), peer, "") if err != nil { return err } return nil } // UpdateNetworkPeer updates the network peer to match the provided struct. func (r *ProtocolIncus) UpdateNetworkPeer(networkName string, peerName string, peer api.NetworkPeerPut, ETag string) error { if !r.HasExtension("network_peer") { return fmt.Errorf(`The server is missing the required "network_peer" API extension`) } // Send the request. _, _, err := r.query("PUT", fmt.Sprintf("/networks/%s/peers/%s", url.PathEscape(networkName), url.PathEscape(peerName)), peer, ETag) if err != nil { return err } return nil } // DeleteNetworkPeer deletes an existing network peer. func (r *ProtocolIncus) DeleteNetworkPeer(networkName string, peerName string) error { if !r.HasExtension("network_peer") { return fmt.Errorf(`The server is missing the required "network_peer" API extension`) } // Send the request. _, _, err := r.query("DELETE", fmt.Sprintf("/networks/%s/peers/%s", url.PathEscape(networkName), url.PathEscape(peerName)), nil, "") if err != nil { return err } return nil } incus-6.0.4/client/incus_network_zones.go000066400000000000000000000151641477363751000206030ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/lxc/incus/v6/shared/api" ) // GetNetworkZoneNames returns a list of network zone names. func (r *ProtocolIncus) GetNetworkZoneNames() ([]string, error) { if !r.HasExtension("network_dns") { return nil, fmt.Errorf(`The server is missing the required "network_dns" API extension`) } // Fetch the raw URL values. urls := []string{} baseURL := "/network-zones" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetNetworkZones returns a list of Network zone structs. func (r *ProtocolIncus) GetNetworkZones() ([]api.NetworkZone, error) { if !r.HasExtension("network_dns") { return nil, fmt.Errorf(`The server is missing the required "network_dns" API extension`) } zones := []api.NetworkZone{} // Fetch the raw value. _, err := r.queryStruct("GET", "/network-zones?recursion=1", nil, "", &zones) if err != nil { return nil, err } return zones, nil } // GetNetworkZonesAllProjects returns a list of network zones across all projects as NetworkZone structs. func (r *ProtocolIncus) GetNetworkZonesAllProjects() ([]api.NetworkZone, error) { err := r.CheckExtension("network_zones_all_projects") if err != nil { return nil, fmt.Errorf(`The server is missing the required "network_zones_all_projects" API extension`) } zones := []api.NetworkZone{} _, err = r.queryStruct("GET", "/network-zones?recursion=1&all-projects=true", nil, "", &zones) if err != nil { return nil, err } return zones, nil } // GetNetworkZone returns a Network zone entry for the provided name. func (r *ProtocolIncus) GetNetworkZone(name string) (*api.NetworkZone, string, error) { if !r.HasExtension("network_dns") { return nil, "", fmt.Errorf(`The server is missing the required "network_dns" API extension`) } zone := api.NetworkZone{} // Fetch the raw value. etag, err := r.queryStruct("GET", fmt.Sprintf("/network-zones/%s", url.PathEscape(name)), nil, "", &zone) if err != nil { return nil, "", err } return &zone, etag, nil } // CreateNetworkZone defines a new Network zone using the provided struct. func (r *ProtocolIncus) CreateNetworkZone(zone api.NetworkZonesPost) error { if !r.HasExtension("network_dns") { return fmt.Errorf(`The server is missing the required "network_dns" API extension`) } // Send the request. _, _, err := r.query("POST", "/network-zones", zone, "") if err != nil { return err } return nil } // UpdateNetworkZone updates the network zone to match the provided struct. func (r *ProtocolIncus) UpdateNetworkZone(name string, zone api.NetworkZonePut, ETag string) error { if !r.HasExtension("network_dns") { return fmt.Errorf(`The server is missing the required "network_dns" API extension`) } // Send the request. _, _, err := r.query("PUT", fmt.Sprintf("/network-zones/%s", url.PathEscape(name)), zone, ETag) if err != nil { return err } return nil } // DeleteNetworkZone deletes an existing network zone. func (r *ProtocolIncus) DeleteNetworkZone(name string) error { if !r.HasExtension("network_dns") { return fmt.Errorf(`The server is missing the required "network_dns" API extension`) } // Send the request. _, _, err := r.query("DELETE", fmt.Sprintf("/network-zones/%s", url.PathEscape(name)), nil, "") if err != nil { return err } return nil } // GetNetworkZoneRecordNames returns a list of network zone record names. func (r *ProtocolIncus) GetNetworkZoneRecordNames(zone string) ([]string, error) { if !r.HasExtension("network_dns_records") { return nil, fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) } // Fetch the raw URL values. urls := []string{} baseURL := fmt.Sprintf("/network-zones/%s/records", url.PathEscape(zone)) _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetNetworkZoneRecords returns a list of Network zone record structs. func (r *ProtocolIncus) GetNetworkZoneRecords(zone string) ([]api.NetworkZoneRecord, error) { if !r.HasExtension("network_dns_records") { return nil, fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) } records := []api.NetworkZoneRecord{} // Fetch the raw value. _, err := r.queryStruct("GET", fmt.Sprintf("/network-zones/%s/records?recursion=1", url.PathEscape(zone)), nil, "", &records) if err != nil { return nil, err } return records, nil } // GetNetworkZoneRecord returns a Network zone record entry for the provided zone and name. func (r *ProtocolIncus) GetNetworkZoneRecord(zone string, name string) (*api.NetworkZoneRecord, string, error) { if !r.HasExtension("network_dns_records") { return nil, "", fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) } record := api.NetworkZoneRecord{} // Fetch the raw value. etag, err := r.queryStruct("GET", fmt.Sprintf("/network-zones/%s/records/%s", url.PathEscape(zone), url.PathEscape(name)), nil, "", &record) if err != nil { return nil, "", err } return &record, etag, nil } // CreateNetworkZoneRecord defines a new Network zone record using the provided struct. func (r *ProtocolIncus) CreateNetworkZoneRecord(zone string, record api.NetworkZoneRecordsPost) error { if !r.HasExtension("network_dns_records") { return fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) } // Send the request. _, _, err := r.query("POST", fmt.Sprintf("/network-zones/%s/records", url.PathEscape(zone)), record, "") if err != nil { return err } return nil } // UpdateNetworkZoneRecord updates the network zone record to match the provided struct. func (r *ProtocolIncus) UpdateNetworkZoneRecord(zone string, name string, record api.NetworkZoneRecordPut, ETag string) error { if !r.HasExtension("network_dns_records") { return fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) } // Send the request. _, _, err := r.query("PUT", fmt.Sprintf("/network-zones/%s/records/%s", url.PathEscape(zone), url.PathEscape(name)), record, ETag) if err != nil { return err } return nil } // DeleteNetworkZoneRecord deletes an existing network zone record. func (r *ProtocolIncus) DeleteNetworkZoneRecord(zone string, name string) error { if !r.HasExtension("network_dns_records") { return fmt.Errorf(`The server is missing the required "network_dns_records" API extension`) } // Send the request. _, _, err := r.query("DELETE", fmt.Sprintf("/network-zones/%s/records/%s", url.PathEscape(zone), url.PathEscape(name)), nil, "") if err != nil { return err } return nil } incus-6.0.4/client/incus_networks.go000066400000000000000000000110461477363751000175430ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/lxc/incus/v6/shared/api" ) // GetNetworkNames returns a list of network names. func (r *ProtocolIncus) GetNetworkNames() ([]string, error) { if !r.HasExtension("network") { return nil, fmt.Errorf("The server is missing the required \"network\" API extension") } // Fetch the raw values. urls := []string{} baseURL := "/networks" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetNetworks returns a list of Network struct. func (r *ProtocolIncus) GetNetworks() ([]api.Network, error) { if !r.HasExtension("network") { return nil, fmt.Errorf("The server is missing the required \"network\" API extension") } networks := []api.Network{} // Fetch the raw value _, err := r.queryStruct("GET", "/networks?recursion=1", nil, "", &networks) if err != nil { return nil, err } return networks, nil } // GetNetworksAllProjects gets all networks across all projects. func (r *ProtocolIncus) GetNetworksAllProjects() ([]api.Network, error) { if !r.HasExtension("networks_all_projects") { return nil, fmt.Errorf(`The server is missing the required "networks_all_projects" API extension`) } networks := []api.Network{} _, err := r.queryStruct("GET", "/networks?recursion=1&all-projects=true", nil, "", &networks) if err != nil { return nil, err } return networks, nil } // GetNetwork returns a Network entry for the provided name. func (r *ProtocolIncus) GetNetwork(name string) (*api.Network, string, error) { if !r.HasExtension("network") { return nil, "", fmt.Errorf("The server is missing the required \"network\" API extension") } network := api.Network{} // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s", url.PathEscape(name)), nil, "", &network) if err != nil { return nil, "", err } return &network, etag, nil } // GetNetworkLeases returns a list of Network struct. func (r *ProtocolIncus) GetNetworkLeases(name string) ([]api.NetworkLease, error) { if !r.HasExtension("network_leases") { return nil, fmt.Errorf("The server is missing the required \"network_leases\" API extension") } leases := []api.NetworkLease{} // Fetch the raw value _, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/leases", url.PathEscape(name)), nil, "", &leases) if err != nil { return nil, err } return leases, nil } // GetNetworkState returns metrics and information on the running network. func (r *ProtocolIncus) GetNetworkState(name string) (*api.NetworkState, error) { if !r.HasExtension("network_state") { return nil, fmt.Errorf("The server is missing the required \"network_state\" API extension") } state := api.NetworkState{} // Fetch the raw value _, err := r.queryStruct("GET", fmt.Sprintf("/networks/%s/state", url.PathEscape(name)), nil, "", &state) if err != nil { return nil, err } return &state, nil } // CreateNetwork defines a new network using the provided Network struct. func (r *ProtocolIncus) CreateNetwork(network api.NetworksPost) error { if !r.HasExtension("network") { return fmt.Errorf("The server is missing the required \"network\" API extension") } // Send the request _, _, err := r.query("POST", "/networks", network, "") if err != nil { return err } return nil } // UpdateNetwork updates the network to match the provided Network struct. func (r *ProtocolIncus) UpdateNetwork(name string, network api.NetworkPut, ETag string) error { if !r.HasExtension("network") { return fmt.Errorf("The server is missing the required \"network\" API extension") } // Send the request _, _, err := r.query("PUT", fmt.Sprintf("/networks/%s", url.PathEscape(name)), network, ETag) if err != nil { return err } return nil } // RenameNetwork renames an existing network entry. func (r *ProtocolIncus) RenameNetwork(name string, network api.NetworkPost) error { if !r.HasExtension("network") { return fmt.Errorf("The server is missing the required \"network\" API extension") } // Send the request _, _, err := r.query("POST", fmt.Sprintf("/networks/%s", url.PathEscape(name)), network, "") if err != nil { return err } return nil } // DeleteNetwork deletes an existing network. func (r *ProtocolIncus) DeleteNetwork(name string) error { if !r.HasExtension("network") { return fmt.Errorf("The server is missing the required \"network\" API extension") } // Send the request _, _, err := r.query("DELETE", fmt.Sprintf("/networks/%s", url.PathEscape(name)), nil, "") if err != nil { return err } return nil } incus-6.0.4/client/incus_oidc.go000066400000000000000000000230161477363751000166050ustar00rootroot00000000000000package incus import ( "context" "crypto/rand" "fmt" "io" "net/http" "net/url" "os" "os/signal" "strings" "syscall" "time" "github.com/gorilla/websocket" "github.com/zitadel/oidc/v3/pkg/client/rp" httphelper "github.com/zitadel/oidc/v3/pkg/http" "github.com/zitadel/oidc/v3/pkg/oidc" "golang.org/x/oauth2" "github.com/lxc/incus/v6/shared/util" ) // ErrOIDCExpired is returned when the token is expired and we can't retry the request ourselves. var ErrOIDCExpired = fmt.Errorf("OIDC token expired, please re-try the request") // setupOIDCClient initializes the OIDC (OpenID Connect) client with given tokens if it hasn't been set up already. // It also assigns the protocol's http client to the oidcClient's httpClient. func (r *ProtocolIncus) setupOIDCClient(token *oidc.Tokens[*oidc.IDTokenClaims]) { if r.oidcClient != nil { return } r.oidcClient = newOIDCClient(token) r.oidcClient.httpClient = r.http } // GetOIDCTokens returns the current OIDC tokens (if any) from the OIDC client. // // This should only be used by internal Incus tools when it's not possible to get the tokens from a Config struct. func (r *ProtocolIncus) GetOIDCTokens() *oidc.Tokens[*oidc.IDTokenClaims] { if r.oidcClient == nil { return nil } return r.oidcClient.tokens } // Custom transport that modifies requests to inject the audience field. type oidcTransport struct { deviceAuthorizationEndpoint string audience string } // oidcTransport is a custom HTTP transport that injects the audience field into requests directed at the device authorization endpoint. // RoundTrip is a method of oidcTransport that modifies the request, adds the audience parameter if appropriate, and sends it along. func (o *oidcTransport) RoundTrip(r *http.Request) (*http.Response, error) { // Don't modify the request if it's not to the device authorization endpoint, or there are no // URL parameters which need to be set. if r.URL.String() != o.deviceAuthorizationEndpoint || len(o.audience) == 0 { return http.DefaultTransport.RoundTrip(r) } err := r.ParseForm() if err != nil { return nil, err } if o.audience != "" { r.Form.Add("audience", o.audience) } // Update the body with the new URL parameters. body := r.Form.Encode() r.Body = io.NopCloser(strings.NewReader(body)) r.ContentLength = int64(len(body)) return http.DefaultTransport.RoundTrip(r) } var ( errRefreshAccessToken = fmt.Errorf("Failed refreshing access token") oidcScopes = []string{oidc.ScopeOpenID, oidc.ScopeOfflineAccess, oidc.ScopeEmail} ) type oidcClient struct { httpClient *http.Client oidcTransport *oidcTransport tokens *oidc.Tokens[*oidc.IDTokenClaims] } // oidcClient is a structure encapsulating an HTTP client, OIDC transport, and a token for OpenID Connect (OIDC) operations. // newOIDCClient constructs a new oidcClient, ensuring the token field is non-nil to prevent panics during authentication. func newOIDCClient(tokens *oidc.Tokens[*oidc.IDTokenClaims]) *oidcClient { client := oidcClient{ tokens: tokens, httpClient: &http.Client{}, oidcTransport: &oidcTransport{}, } // Ensure client.tokens is never nil otherwise authenticate() will panic. if client.tokens == nil { client.tokens = &oidc.Tokens[*oidc.IDTokenClaims]{} } return &client } // getAccessToken returns the Access Token from the oidcClient's tokens, or an empty string if no tokens are present. func (o *oidcClient) getAccessToken() string { if o.tokens == nil || o.tokens.Token == nil { return "" } return o.tokens.AccessToken } // do function executes an HTTP request using the oidcClient's http client, and manages authorization by refreshing or authenticating as needed. // If the request fails with an HTTP Unauthorized status, it attempts to refresh the access token, or perform an OIDC authentication if refresh fails. func (o *oidcClient) do(req *http.Request) (*http.Response, error) { resp, err := o.httpClient.Do(req) if err != nil { return nil, err } // Return immediately if the error is not HTTP status unauthorized. if resp.StatusCode != http.StatusUnauthorized { return resp, nil } issuer := resp.Header.Get("X-Incus-OIDC-issuer") clientID := resp.Header.Get("X-Incus-OIDC-clientid") audience := resp.Header.Get("X-Incus-OIDC-audience") if issuer == "" || clientID == "" { return resp, nil } // Refresh the token. err = o.refresh(issuer, clientID) if err != nil { err = o.authenticate(issuer, clientID, audience) if err != nil { return nil, err } } // If not dealing with something we can retry, return a clear error. if req.Method != "GET" && req.GetBody == nil { return resp, ErrOIDCExpired } // Set the new access token in the header. req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", o.tokens.AccessToken)) // Reset the request body. if req.GetBody != nil { body, err := req.GetBody() if err != nil { return nil, err } req.Body = body } resp, err = o.httpClient.Do(req) if err != nil { return nil, err } return resp, nil } // dial function executes a websocket request and handles OIDC authentication and refresh. func (o *oidcClient) dial(dialer websocket.Dialer, uri string, req *http.Request) (*websocket.Conn, *http.Response, error) { conn, resp, err := dialer.Dial(uri, req.Header) if err != nil && resp == nil { return nil, nil, err } // Return immediately if the error is not HTTP status unauthorized. if conn != nil && resp.StatusCode != http.StatusUnauthorized { return conn, resp, nil } issuer := resp.Header.Get("X-Incus-OIDC-issuer") clientID := resp.Header.Get("X-Incus-OIDC-clientid") audience := resp.Header.Get("X-Incus-OIDC-audience") if issuer == "" || clientID == "" { return nil, resp, err } err = o.refresh(issuer, clientID) if err != nil { err = o.authenticate(issuer, clientID, audience) if err != nil { return nil, resp, err } } // Set the new access token in the header. req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", o.tokens.AccessToken)) return dialer.Dial(uri, req.Header) } // getProvider initializes a new OpenID Connect Relying Party for a given issuer and clientID. // The function also creates a secure CookieHandler with random encryption and hash keys, and applies a series of configurations on the Relying Party. func (o *oidcClient) getProvider(issuer string, clientID string) (rp.RelyingParty, error) { hashKey := make([]byte, 16) encryptKey := make([]byte, 16) _, err := rand.Read(hashKey) if err != nil { return nil, err } _, err = rand.Read(encryptKey) if err != nil { return nil, err } cookieHandler := httphelper.NewCookieHandler(hashKey, encryptKey, httphelper.WithUnsecure()) options := []rp.Option{ rp.WithCookieHandler(cookieHandler), rp.WithVerifierOpts(rp.WithIssuedAtOffset(5 * time.Second)), rp.WithPKCE(cookieHandler), rp.WithHTTPClient(o.httpClient), } provider, err := rp.NewRelyingPartyOIDC(context.TODO(), issuer, clientID, "", "", oidcScopes, options...) if err != nil { return nil, err } return provider, nil } // refresh attempts to refresh the OpenID Connect access token for the client using the refresh token. // If no token is present or the refresh token is empty, it returns an error. If successful, it updates the access token and other relevant token fields. func (o *oidcClient) refresh(issuer string, clientID string) error { if o.tokens.Token == nil || o.tokens.RefreshToken == "" { return errRefreshAccessToken } provider, err := o.getProvider(issuer, clientID) if err != nil { return errRefreshAccessToken } oauthTokens, err := rp.RefreshTokens[*oidc.IDTokenClaims](context.TODO(), provider, o.tokens.RefreshToken, "", "") if err != nil { return errRefreshAccessToken } o.tokens.Token.AccessToken = oauthTokens.AccessToken o.tokens.TokenType = oauthTokens.TokenType o.tokens.Expiry = oauthTokens.Expiry if oauthTokens.RefreshToken != "" { o.tokens.Token.RefreshToken = oauthTokens.RefreshToken } return nil } // authenticate initiates the OpenID Connect device flow authentication process for the client. // It presents a user code for the end user to input in the device that has web access and waits for them to complete the authentication, // subsequently updating the client's tokens upon successful authentication. func (o *oidcClient) authenticate(issuer string, clientID string, audience string) error { // Store the old transport and restore it in the end. oldTransport := o.httpClient.Transport o.oidcTransport.audience = audience o.httpClient.Transport = o.oidcTransport defer func() { o.httpClient.Transport = oldTransport }() provider, err := o.getProvider(issuer, clientID) if err != nil { return err } o.oidcTransport.deviceAuthorizationEndpoint = provider.GetDeviceAuthorizationEndpoint() resp, err := rp.DeviceAuthorization(context.TODO(), oidcScopes, provider, nil) if err != nil { return err } u, _ := url.Parse(resp.VerificationURIComplete) fmt.Printf("URL: %s\n", u.String()) fmt.Printf("Code: %s\n\n", resp.UserCode) _ = util.OpenBrowser(u.String()) ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT) defer stop() token, err := rp.DeviceAccessToken(ctx, resp.DeviceCode, time.Duration(resp.Interval)*time.Second, provider) if err != nil { return err } if o.tokens.Token == nil { o.tokens.Token = &oauth2.Token{} } o.tokens.Expiry = time.Now().Add(time.Duration(token.ExpiresIn)) o.tokens.IDToken = token.IDToken o.tokens.Token.AccessToken = token.AccessToken o.tokens.TokenType = token.TokenType if token.RefreshToken != "" { o.tokens.Token.RefreshToken = token.RefreshToken } return nil } incus-6.0.4/client/incus_operations.go000066400000000000000000000075221477363751000200560ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/gorilla/websocket" "github.com/lxc/incus/v6/shared/api" ) // GetOperationUUIDs returns a list of operation uuids. func (r *ProtocolIncus) GetOperationUUIDs() ([]string, error) { // Fetch the raw URL values. urls := []string{} baseURL := "/operations" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetOperations returns a list of Operation struct. func (r *ProtocolIncus) GetOperations() ([]api.Operation, error) { apiOperations := map[string][]api.Operation{} // Fetch the raw value. _, err := r.queryStruct("GET", "/operations?recursion=1", nil, "", &apiOperations) if err != nil { return nil, err } // Turn it into a list of operations. operations := []api.Operation{} for _, v := range apiOperations { operations = append(operations, v...) } return operations, nil } // GetOperationsAllProjects returns a list of operations from all projects. func (r *ProtocolIncus) GetOperationsAllProjects() ([]api.Operation, error) { err := r.CheckExtension("operations_get_query_all_projects") if err != nil { return nil, err } apiOperations := map[string][]api.Operation{} path := "/operations" v := url.Values{} v.Set("recursion", "1") v.Set("all-projects", "true") // Fetch the raw value. _, err = r.queryStruct("GET", fmt.Sprintf("%s?%s", path, v.Encode()), nil, "", &apiOperations) if err != nil { return nil, err } // Turn it into a list of operations. operations := []api.Operation{} for _, v := range apiOperations { operations = append(operations, v...) } return operations, nil } // GetOperation returns an Operation entry for the provided uuid. func (r *ProtocolIncus) GetOperation(uuid string) (*api.Operation, string, error) { op := api.Operation{} // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("/operations/%s", url.PathEscape(uuid)), nil, "", &op) if err != nil { return nil, "", err } return &op, etag, nil } // GetOperationWait returns an Operation entry for the provided uuid once it's complete or hits the timeout. func (r *ProtocolIncus) GetOperationWait(uuid string, timeout int) (*api.Operation, string, error) { op := api.Operation{} // Unset the response header timeout so that the request does not time out. transport, err := r.getUnderlyingHTTPTransport() if err != nil { return nil, "", err } transport.ResponseHeaderTimeout = 0 // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("/operations/%s/wait?timeout=%d", url.PathEscape(uuid), timeout), nil, "", &op) if err != nil { return nil, "", err } return &op, etag, nil } // GetOperationWaitSecret returns an Operation entry for the provided uuid and secret once it's complete or hits the timeout. func (r *ProtocolIncus) GetOperationWaitSecret(uuid string, secret string, timeout int) (*api.Operation, string, error) { op := api.Operation{} // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("/operations/%s/wait?secret=%s&timeout=%d", url.PathEscape(uuid), url.PathEscape(secret), timeout), nil, "", &op) if err != nil { return nil, "", err } return &op, etag, nil } // GetOperationWebsocket returns a websocket connection for the provided operation. func (r *ProtocolIncus) GetOperationWebsocket(uuid string, secret string) (*websocket.Conn, error) { path := fmt.Sprintf("/operations/%s/websocket", url.PathEscape(uuid)) if secret != "" { path = fmt.Sprintf("%s?secret=%s", path, url.QueryEscape(secret)) } return r.websocket(path) } // DeleteOperation deletes (cancels) a running operation. func (r *ProtocolIncus) DeleteOperation(uuid string) error { // Send the request _, _, err := r.query("DELETE", fmt.Sprintf("/operations/%s", url.PathEscape(uuid)), nil, "") if err != nil { return err } return nil } incus-6.0.4/client/incus_profiles.go000066400000000000000000000054121477363751000175120ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/lxc/incus/v6/shared/api" ) // Profile handling functions // GetProfileNames returns a list of available profile names. func (r *ProtocolIncus) GetProfileNames() ([]string, error) { // Fetch the raw URL values. urls := []string{} baseURL := "/profiles" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetProfiles returns a list of available Profile structs. func (r *ProtocolIncus) GetProfiles() ([]api.Profile, error) { profiles := []api.Profile{} // Fetch the raw value _, err := r.queryStruct("GET", "/profiles?recursion=1", nil, "", &profiles) if err != nil { return nil, err } return profiles, nil } // GetProfilesAllProjects returns a list of profiles across all projects as Profile structs. func (r *ProtocolIncus) GetProfilesAllProjects() ([]api.Profile, error) { err := r.CheckExtension("profiles_all_projects") if err != nil { return nil, fmt.Errorf(`The server is missing the required "profiles_all_projects" API extension`) } profiles := []api.Profile{} _, err = r.queryStruct("GET", "/profiles?recursion=1&all-projects=true", nil, "", &profiles) if err != nil { return nil, err } return profiles, nil } // GetProfile returns a Profile entry for the provided name. func (r *ProtocolIncus) GetProfile(name string) (*api.Profile, string, error) { profile := api.Profile{} // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("/profiles/%s", url.PathEscape(name)), nil, "", &profile) if err != nil { return nil, "", err } return &profile, etag, nil } // CreateProfile defines a new instance profile. func (r *ProtocolIncus) CreateProfile(profile api.ProfilesPost) error { // Send the request _, _, err := r.query("POST", "/profiles", profile, "") if err != nil { return err } return nil } // UpdateProfile updates the profile to match the provided Profile struct. func (r *ProtocolIncus) UpdateProfile(name string, profile api.ProfilePut, ETag string) error { // Send the request _, _, err := r.query("PUT", fmt.Sprintf("/profiles/%s", url.PathEscape(name)), profile, ETag) if err != nil { return err } return nil } // RenameProfile renames an existing profile entry. func (r *ProtocolIncus) RenameProfile(name string, profile api.ProfilePost) error { // Send the request _, _, err := r.query("POST", fmt.Sprintf("/profiles/%s", url.PathEscape(name)), profile, "") if err != nil { return err } return nil } // DeleteProfile deletes a profile. func (r *ProtocolIncus) DeleteProfile(name string) error { // Send the request _, _, err := r.query("DELETE", fmt.Sprintf("/profiles/%s", url.PathEscape(name)), nil, "") if err != nil { return err } return nil } incus-6.0.4/client/incus_projects.go000066400000000000000000000111271477363751000175200ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/lxc/incus/v6/shared/api" ) // Project handling functions // GetProjectNames returns a list of available project names. func (r *ProtocolIncus) GetProjectNames() ([]string, error) { if !r.HasExtension("projects") { return nil, fmt.Errorf("The server is missing the required \"projects\" API extension") } // Fetch the raw URL values. urls := []string{} baseURL := "/projects" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetProjects returns a list of available Project structs. func (r *ProtocolIncus) GetProjects() ([]api.Project, error) { if !r.HasExtension("projects") { return nil, fmt.Errorf("The server is missing the required \"projects\" API extension") } projects := []api.Project{} // Fetch the raw value _, err := r.queryStruct("GET", "/projects?recursion=1", nil, "", &projects) if err != nil { return nil, err } return projects, nil } // GetProject returns a Project entry for the provided name. func (r *ProtocolIncus) GetProject(name string) (*api.Project, string, error) { if !r.HasExtension("projects") { return nil, "", fmt.Errorf("The server is missing the required \"projects\" API extension") } project := api.Project{} // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("/projects/%s", url.PathEscape(name)), nil, "", &project) if err != nil { return nil, "", err } return &project, etag, nil } // GetProjectState returns a Project state for the provided name. func (r *ProtocolIncus) GetProjectState(name string) (*api.ProjectState, error) { if !r.HasExtension("project_usage") { return nil, fmt.Errorf("The server is missing the required \"project_usage\" API extension") } projectState := api.ProjectState{} // Fetch the raw value _, err := r.queryStruct("GET", fmt.Sprintf("/projects/%s/state", url.PathEscape(name)), nil, "", &projectState) if err != nil { return nil, err } return &projectState, nil } // GetProjectAccess returns an Access entry for the specified project. func (r *ProtocolIncus) GetProjectAccess(name string) (api.Access, error) { access := api.Access{} if !r.HasExtension("project_access") { return nil, fmt.Errorf("The server is missing the required \"project_access\" API extension") } // Fetch the raw value _, err := r.queryStruct("GET", fmt.Sprintf("/projects/%s/access", url.PathEscape(name)), nil, "", &access) if err != nil { return nil, err } return access, nil } // CreateProject defines a new project. func (r *ProtocolIncus) CreateProject(project api.ProjectsPost) error { if !r.HasExtension("projects") { return fmt.Errorf("The server is missing the required \"projects\" API extension") } // Send the request _, _, err := r.query("POST", "/projects", project, "") if err != nil { return err } return nil } // UpdateProject updates the project to match the provided Project struct. func (r *ProtocolIncus) UpdateProject(name string, project api.ProjectPut, ETag string) error { if !r.HasExtension("projects") { return fmt.Errorf("The server is missing the required \"projects\" API extension") } // Send the request _, _, err := r.query("PUT", fmt.Sprintf("/projects/%s", url.PathEscape(name)), project, ETag) if err != nil { return err } return nil } // RenameProject renames an existing project entry. func (r *ProtocolIncus) RenameProject(name string, project api.ProjectPost) (Operation, error) { if !r.HasExtension("projects") { return nil, fmt.Errorf("The server is missing the required \"projects\" API extension") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("/projects/%s", url.PathEscape(name)), project, "") if err != nil { return nil, err } return op, nil } // DeleteProject deletes a project. func (r *ProtocolIncus) DeleteProject(name string) error { if !r.HasExtension("projects") { return fmt.Errorf("The server is missing the required \"projects\" API extension") } // Send the request _, _, err := r.query("DELETE", fmt.Sprintf("/projects/%s", url.PathEscape(name)), nil, "") if err != nil { return err } return nil } // DeleteProjectForce deletes a project and everything inside of it. func (r *ProtocolIncus) DeleteProjectForce(name string) error { if !r.HasExtension("projects_force_delete") { return fmt.Errorf("The server is missing the required \"projects_force_delete\" API extension") } // Send the request _, _, err := r.query("DELETE", fmt.Sprintf("/projects/%s?force=1", url.PathEscape(name)), nil, "") if err != nil { return err } return nil } incus-6.0.4/client/incus_server.go000066400000000000000000000407411477363751000172010ustar00rootroot00000000000000package incus import ( "fmt" "io" "net/http" "slices" "github.com/gorilla/websocket" "github.com/lxc/incus/v6/shared/api" localtls "github.com/lxc/incus/v6/shared/tls" "github.com/lxc/incus/v6/shared/util" ) // Server handling functions // GetServer returns the server status as a Server struct. func (r *ProtocolIncus) GetServer() (*api.Server, string, error) { server := api.Server{} // Fetch the raw value etag, err := r.queryStruct("GET", "", nil, "", &server) if err != nil { return nil, "", err } // Fill in certificate fingerprint if not provided if server.Environment.CertificateFingerprint == "" && server.Environment.Certificate != "" { var err error server.Environment.CertificateFingerprint, err = localtls.CertFingerprintStr(server.Environment.Certificate) if err != nil { return nil, "", err } } if !server.Public && len(server.AuthMethods) == 0 { // TLS is always available for Incus servers server.AuthMethods = []string{api.AuthenticationMethodTLS} } // Add the value to the cache r.server = &server return &server, etag, nil } // UpdateServer updates the server status to match the provided Server struct. func (r *ProtocolIncus) UpdateServer(server api.ServerPut, ETag string) error { // Send the request _, _, err := r.query("PUT", "", server, ETag) if err != nil { return err } return nil } // HasExtension returns true if the server supports a given API extension. // Deprecated: Use CheckExtension instead. func (r *ProtocolIncus) HasExtension(extension string) bool { // If no cached API information, just assume we're good // This is needed for those rare cases where we must avoid a GetServer call if r.server == nil { return true } return slices.Contains(r.server.APIExtensions, extension) } // CheckExtension checks if the server has the specified extension. func (r *ProtocolIncus) CheckExtension(extensionName string) error { if !r.HasExtension(extensionName) { return fmt.Errorf("The server is missing the required %q API extension", extensionName) } return nil } // IsClustered returns true if the server is part of an Incus cluster. func (r *ProtocolIncus) IsClustered() bool { return r.server.Environment.ServerClustered } // GetServerResources returns the resources available to a given Incus server. func (r *ProtocolIncus) GetServerResources() (*api.Resources, error) { if !r.HasExtension("resources") { return nil, fmt.Errorf("The server is missing the required \"resources\" API extension") } resources := api.Resources{} // Fetch the raw value _, err := r.queryStruct("GET", "/resources", nil, "", &resources) if err != nil { return nil, err } return &resources, nil } // UseProject returns a client that will use a specific project. func (r *ProtocolIncus) UseProject(name string) InstanceServer { return &ProtocolIncus{ ctx: r.ctx, ctxConnected: r.ctxConnected, ctxConnectedCancel: r.ctxConnectedCancel, server: r.server, http: r.http, httpCertificate: r.httpCertificate, httpBaseURL: r.httpBaseURL, httpProtocol: r.httpProtocol, httpUserAgent: r.httpUserAgent, httpUnixPath: r.httpUnixPath, requireAuthenticated: r.requireAuthenticated, clusterTarget: r.clusterTarget, project: name, eventConns: make(map[string]*websocket.Conn), // New project specific listener conns. eventListeners: make(map[string][]*EventListener), // New project specific listeners. oidcClient: r.oidcClient, } } // UseTarget returns a client that will target a specific cluster member. // Use this member-specific operations such as specific container // placement, preparing a new storage pool or network, ... func (r *ProtocolIncus) UseTarget(name string) InstanceServer { return &ProtocolIncus{ ctx: r.ctx, ctxConnected: r.ctxConnected, ctxConnectedCancel: r.ctxConnectedCancel, server: r.server, http: r.http, httpCertificate: r.httpCertificate, httpBaseURL: r.httpBaseURL, httpProtocol: r.httpProtocol, httpUserAgent: r.httpUserAgent, httpUnixPath: r.httpUnixPath, requireAuthenticated: r.requireAuthenticated, project: r.project, eventConns: make(map[string]*websocket.Conn), // New target specific listener conns. eventListeners: make(map[string][]*EventListener), // New target specific listeners. oidcClient: r.oidcClient, clusterTarget: name, } } // IsAgent returns true if the server is an Incus agent. func (r *ProtocolIncus) IsAgent() bool { return r.server != nil && r.server.Environment.Server == "incus-agent" } // GetMetrics returns the text OpenMetrics data. func (r *ProtocolIncus) GetMetrics() (string, error) { // Check that the server supports it. if !r.HasExtension("metrics") { return "", fmt.Errorf("The server is missing the required \"metrics\" API extension") } // Prepare the request. requestURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0/metrics", r.httpBaseURL.String())) if err != nil { return "", err } req, err := http.NewRequest("GET", requestURL, nil) if err != nil { return "", err } // Send the request. resp, err := r.DoHTTP(req) if err != nil { return "", err } defer func() { _ = resp.Body.Close() }() if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("Bad HTTP status: %d", resp.StatusCode) } // Get the content. content, err := io.ReadAll(resp.Body) if err != nil { return "", err } return string(content), nil } // ApplyServerPreseed configures a target Incus server with the provided server and cluster configuration. func (r *ProtocolIncus) ApplyServerPreseed(config api.InitPreseed) error { // Apply server configuration. if config.Server.Config != nil && len(config.Server.Config) > 0 { // Get current config. server, etag, err := r.GetServer() if err != nil { return fmt.Errorf("Failed to retrieve current server configuration: %w", err) } for k, v := range config.Server.Config { server.Config[k] = fmt.Sprintf("%v", v) } // Apply it. err = r.UpdateServer(server.Writable(), etag) if err != nil { return fmt.Errorf("Failed to update server configuration: %w", err) } } // Apply storage configuration. if config.Server.StoragePools != nil && len(config.Server.StoragePools) > 0 { // Get the list of storagePools. storagePoolNames, err := r.GetStoragePoolNames() if err != nil { return fmt.Errorf("Failed to retrieve list of storage pools: %w", err) } // StoragePool creator createStoragePool := func(storagePool api.StoragePoolsPost) error { // Create the storagePool if doesn't exist. err := r.CreateStoragePool(storagePool) if err != nil { return fmt.Errorf("Failed to create storage pool %q: %w", storagePool.Name, err) } return nil } // StoragePool updater. updateStoragePool := func(target api.StoragePoolsPost) error { // Get the current storagePool. storagePool, etag, err := r.GetStoragePool(target.Name) if err != nil { return fmt.Errorf("Failed to retrieve current storage pool %q: %w", target.Name, err) } // Quick check. if storagePool.Driver != target.Driver { return fmt.Errorf("Storage pool %q is of type %q instead of %q", storagePool.Name, storagePool.Driver, target.Driver) } // Description override. if target.Description != "" { storagePool.Description = target.Description } // Config overrides. for k, v := range target.Config { storagePool.Config[k] = fmt.Sprintf("%v", v) } // Apply it. err = r.UpdateStoragePool(target.Name, storagePool.Writable(), etag) if err != nil { return fmt.Errorf("Failed to update storage pool %q: %w", target.Name, err) } return nil } for _, storagePool := range config.Server.StoragePools { // New storagePool. if !slices.Contains(storagePoolNames, storagePool.Name) { err := createStoragePool(storagePool) if err != nil { return err } continue } // Existing storagePool. err := updateStoragePool(storagePool) if err != nil { return err } } } // Apply network configuration function. applyNetwork := func(target api.InitNetworksProjectPost) error { network, etag, err := r.UseProject(target.Project).GetNetwork(target.Name) if err != nil { // Create the network if doesn't exist. err := r.UseProject(target.Project).CreateNetwork(target.NetworksPost) if err != nil { return fmt.Errorf("Failed to create local member network %q in project %q: %w", target.Name, target.Project, err) } } else { // Description override. if target.Description != "" { network.Description = target.Description } // Config overrides. for k, v := range target.Config { network.Config[k] = fmt.Sprintf("%v", v) } // Apply it. err = r.UseProject(target.Project).UpdateNetwork(target.Name, network.Writable(), etag) if err != nil { return fmt.Errorf("Failed to update local member network %q in project %q: %w", target.Name, target.Project, err) } } return nil } // Apply networks in the default project before other projects config applied (so that if the projects // depend on a network in the default project they can have their config applied successfully). for i := range config.Server.Networks { // Populate default project if not specified for backwards compatibility with earlier // preseed dump files. if config.Server.Networks[i].Project == "" { config.Server.Networks[i].Project = api.ProjectDefaultName } if config.Server.Networks[i].Project != api.ProjectDefaultName { continue } err := applyNetwork(config.Server.Networks[i]) if err != nil { return err } } // Apply project configuration. if config.Server.Projects != nil && len(config.Server.Projects) > 0 { // Get the list of projects. projectNames, err := r.GetProjectNames() if err != nil { return fmt.Errorf("Failed to retrieve list of projects: %w", err) } // Project creator. createProject := func(project api.ProjectsPost) error { // Create the project if doesn't exist. err := r.CreateProject(project) if err != nil { return fmt.Errorf("Failed to create local member project %q: %w", project.Name, err) } return nil } // Project updater. updateProject := func(target api.ProjectsPost) error { // Get the current project. project, etag, err := r.GetProject(target.Name) if err != nil { return fmt.Errorf("Failed to retrieve current project %q: %w", target.Name, err) } // Description override. if target.Description != "" { project.Description = target.Description } // Config overrides. for k, v := range target.Config { project.Config[k] = fmt.Sprintf("%v", v) } // Apply it. err = r.UpdateProject(target.Name, project.Writable(), etag) if err != nil { return fmt.Errorf("Failed to update local member project %q: %w", target.Name, err) } return nil } for _, project := range config.Server.Projects { // New project. if !slices.Contains(projectNames, project.Name) { err := createProject(project) if err != nil { return err } continue } // Existing project. err := updateProject(project) if err != nil { return err } } } // Apply networks in non-default projects after project config applied (so that their projects exist). for i := range config.Server.Networks { if config.Server.Networks[i].Project == api.ProjectDefaultName { continue } err := applyNetwork(config.Server.Networks[i]) if err != nil { return err } } // Apply storage volumes configuration. applyStorageVolume := func(storageVolume api.InitStorageVolumesProjectPost) error { // Get the current storageVolume. currentStorageVolume, etag, err := r.UseProject(storageVolume.Project).GetStoragePoolVolume(storageVolume.Pool, storageVolume.Type, storageVolume.Name) if err != nil { // Create the storage volume if it doesn't exist. err := r.UseProject(storageVolume.Project).CreateStoragePoolVolume(storageVolume.Pool, storageVolume.StorageVolumesPost) if err != nil { return fmt.Errorf("Failed to create storage volume %q in project %q on pool %q: %w", storageVolume.Name, storageVolume.Project, storageVolume.Pool, err) } } else { // Quick check. if currentStorageVolume.Type != storageVolume.Type { return fmt.Errorf("Storage volume %q in project %q is of type %q instead of %q", currentStorageVolume.Name, storageVolume.Project, currentStorageVolume.Type, storageVolume.Type) } // Prepare the update. newStorageVolume := api.StorageVolumePut{} err = util.DeepCopy(currentStorageVolume.Writable(), &newStorageVolume) if err != nil { return fmt.Errorf("Failed to copy configuration of storage volume %q in project %q: %w", storageVolume.Name, storageVolume.Project, err) } // Description override. if storageVolume.Description != "" { newStorageVolume.Description = storageVolume.Description } // Config overrides. for k, v := range storageVolume.Config { newStorageVolume.Config[k] = fmt.Sprintf("%v", v) } // Apply it. err = r.UseProject(storageVolume.Project).UpdateStoragePoolVolume(storageVolume.Pool, storageVolume.Type, currentStorageVolume.Name, newStorageVolume, etag) if err != nil { return fmt.Errorf("Failed to update storage volume %q in project %q: %w", storageVolume.Name, storageVolume.Project, err) } } return nil } // Apply storage volumes in the default project before other projects config. for i := range config.Server.StorageVolumes { // Populate default project if not specified. if config.Server.StorageVolumes[i].Project == "" { config.Server.StorageVolumes[i].Project = api.ProjectDefaultName } // Populate default type if not specified. if config.Server.StorageVolumes[i].Type == "" { config.Server.StorageVolumes[i].Type = "custom" } err := applyStorageVolume(config.Server.StorageVolumes[i]) if err != nil { return err } } // Apply profile configuration. if config.Server.Profiles != nil && len(config.Server.Profiles) > 0 { // Apply profile configuration. applyProfile := func(profile api.InitProfileProjectPost) error { // Get the current profile. currentProfile, etag, err := r.UseProject(profile.Project).GetProfile(profile.Name) if err != nil { // // Create the profile if it doesn't exist. err := r.UseProject(profile.Project).CreateProfile(profile.ProfilesPost) if err != nil { return fmt.Errorf("Failed to create profile %q in project %q: %w", profile.Name, profile.Project, err) } } else { // Prepare the update. updatedProfile := api.ProfilePut{} err = util.DeepCopy(currentProfile.Writable(), &updatedProfile) if err != nil { return fmt.Errorf("Failed to copy configuration of profile %q in project %q: %w", profile.Name, profile.Project, err) } // Description override. if profile.Description != "" { updatedProfile.Description = profile.Description } // Config overrides. for k, v := range profile.Config { updatedProfile.Config[k] = fmt.Sprintf("%v", v) } // Device overrides. for k, v := range profile.Devices { // New device. _, ok := updatedProfile.Devices[k] if !ok { updatedProfile.Devices[k] = v continue } // Existing device. for configKey, configValue := range v { updatedProfile.Devices[k][configKey] = fmt.Sprintf("%v", configValue) } } // Apply it. err = r.UseProject(profile.Project).UpdateProfile(profile.Name, updatedProfile, etag) if err != nil { return fmt.Errorf("Failed to update profile %q in project %q: %w", profile.Name, profile.Project, err) } } return nil } for _, profile := range config.Server.Profiles { if profile.Project == "" { profile.Project = api.ProjectDefaultName } err := applyProfile(profile) if err != nil { return err } } } // Cluster configuration. if config.Cluster != nil && config.Cluster.Enabled { // Get the current cluster configuration currentCluster, etag, err := r.GetCluster() if err != nil { return fmt.Errorf("Failed to retrieve current cluster config: %w", err) } // Check if already enabled if !currentCluster.Enabled { // Configure the cluster op, err := r.UpdateCluster(config.Cluster.ClusterPut, etag) if err != nil { return fmt.Errorf("Failed to configure cluster: %w", err) } err = op.Wait() if err != nil { return fmt.Errorf("Failed to configure cluster: %w", err) } } } return nil } incus-6.0.4/client/incus_storage_buckets.go000066400000000000000000000270441477363751000210600ustar00rootroot00000000000000package incus import ( "fmt" "io" "net/http" "net/url" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/cancel" "github.com/lxc/incus/v6/shared/ioprogress" "github.com/lxc/incus/v6/shared/units" ) // GetStoragePoolBucketNames returns a list of storage bucket names. func (r *ProtocolIncus) GetStoragePoolBucketNames(poolName string) ([]string, error) { err := r.CheckExtension("storage_buckets") if err != nil { return nil, err } // Fetch the raw URL values. urls := []string{} u := api.NewURL().Path("storage-pools", poolName, "buckets") _, err = r.queryStruct("GET", u.String(), nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(u.String(), urls...) } // GetStoragePoolBuckets returns a list of storage buckets for the provided pool. func (r *ProtocolIncus) GetStoragePoolBuckets(poolName string) ([]api.StorageBucket, error) { err := r.CheckExtension("storage_buckets") if err != nil { return nil, err } buckets := []api.StorageBucket{} // Fetch the raw value. u := api.NewURL().Path("storage-pools", poolName, "buckets").WithQuery("recursion", "1") _, err = r.queryStruct("GET", u.String(), nil, "", &buckets) if err != nil { return nil, err } return buckets, nil } // GetStoragePoolBucketsAllProjects gets all storage pool buckets across all projects. func (r *ProtocolIncus) GetStoragePoolBucketsAllProjects(poolName string) ([]api.StorageBucket, error) { err := r.CheckExtension("storage_buckets_all_projects") if err != nil { return nil, fmt.Errorf(`The server is missing the required "storage_buckets_all_projects" API extension`) } buckets := []api.StorageBucket{} u := api.NewURL().Path("storage-pools", poolName, "buckets").WithQuery("recursion", "1").WithQuery("all-projects", "true") _, err = r.queryStruct("GET", u.String(), nil, "", &buckets) if err != nil { return nil, err } return buckets, nil } // GetStoragePoolBucket returns a storage bucket entry for the provided pool and bucket name. func (r *ProtocolIncus) GetStoragePoolBucket(poolName string, bucketName string) (*api.StorageBucket, string, error) { err := r.CheckExtension("storage_buckets") if err != nil { return nil, "", err } bucket := api.StorageBucket{} // Fetch the raw value. u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName) etag, err := r.queryStruct("GET", u.String(), nil, "", &bucket) if err != nil { return nil, "", err } return &bucket, etag, nil } // CreateStoragePoolBucket defines a new storage bucket using the provided struct. // If the server supports storage_buckets_create_credentials API extension, then this function will return the // initial admin credentials. Otherwise it will be nil. func (r *ProtocolIncus) CreateStoragePoolBucket(poolName string, bucket api.StorageBucketsPost) (*api.StorageBucketKey, error) { err := r.CheckExtension("storage_buckets") if err != nil { return nil, err } u := api.NewURL().Path("storage-pools", poolName, "buckets") // Send the request and get the resulting key info (including generated keys). if r.HasExtension("storage_buckets_create_credentials") { var newKey api.StorageBucketKey _, err = r.queryStruct("POST", u.String(), bucket, "", &newKey) if err != nil { return nil, err } return &newKey, nil } _, _, err = r.query("POST", u.String(), bucket, "") if err != nil { return nil, err } return nil, nil } // UpdateStoragePoolBucket updates the storage bucket to match the provided struct. func (r *ProtocolIncus) UpdateStoragePoolBucket(poolName string, bucketName string, bucket api.StorageBucketPut, ETag string) error { err := r.CheckExtension("storage_buckets") if err != nil { return err } // Send the request. u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName) _, _, err = r.query("PUT", u.String(), bucket, ETag) if err != nil { return err } return nil } // DeleteStoragePoolBucket deletes an existing storage bucket. func (r *ProtocolIncus) DeleteStoragePoolBucket(poolName string, bucketName string) error { err := r.CheckExtension("storage_buckets") if err != nil { return err } // Send the request. u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName) _, _, err = r.query("DELETE", u.String(), nil, "") if err != nil { return err } return nil } // GetStoragePoolBucketKeyNames returns a list of storage bucket key names. func (r *ProtocolIncus) GetStoragePoolBucketKeyNames(poolName string, bucketName string) ([]string, error) { err := r.CheckExtension("storage_buckets") if err != nil { return nil, err } // Fetch the raw URL values. urls := []string{} u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys") _, err = r.queryStruct("GET", u.String(), nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(u.String(), urls...) } // GetStoragePoolBucketKeys returns a list of storage bucket keys for the provided pool and bucket. func (r *ProtocolIncus) GetStoragePoolBucketKeys(poolName string, bucketName string) ([]api.StorageBucketKey, error) { err := r.CheckExtension("storage_buckets") if err != nil { return nil, err } bucketKeys := []api.StorageBucketKey{} // Fetch the raw value. u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys").WithQuery("recursion", "1") _, err = r.queryStruct("GET", u.String(), nil, "", &bucketKeys) if err != nil { return nil, err } return bucketKeys, nil } // GetStoragePoolBucketKey returns a storage bucket key entry for the provided pool, bucket and key name. func (r *ProtocolIncus) GetStoragePoolBucketKey(poolName string, bucketName string, keyName string) (*api.StorageBucketKey, string, error) { err := r.CheckExtension("storage_buckets") if err != nil { return nil, "", err } bucketKey := api.StorageBucketKey{} // Fetch the raw value. u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys", keyName) etag, err := r.queryStruct("GET", u.String(), nil, "", &bucketKey) if err != nil { return nil, "", err } return &bucketKey, etag, nil } // CreateStoragePoolBucketKey adds a key to a storage bucket. func (r *ProtocolIncus) CreateStoragePoolBucketKey(poolName string, bucketName string, key api.StorageBucketKeysPost) (*api.StorageBucketKey, error) { err := r.CheckExtension("storage_buckets") if err != nil { return nil, err } // Send the request and get the resulting key info (including generated keys). var newKey api.StorageBucketKey u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys") _, err = r.queryStruct("POST", u.String(), key, "", &newKey) if err != nil { return nil, err } return &newKey, err } // UpdateStoragePoolBucketKey updates an existing storage bucket key. func (r *ProtocolIncus) UpdateStoragePoolBucketKey(poolName string, bucketName string, keyName string, key api.StorageBucketKeyPut, ETag string) error { err := r.CheckExtension("storage_buckets") if err != nil { return err } // Send the request. u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys", keyName) _, _, err = r.query("PUT", u.String(), key, ETag) if err != nil { return err } return nil } // DeleteStoragePoolBucketKey removes a key from a storage bucket. func (r *ProtocolIncus) DeleteStoragePoolBucketKey(poolName string, bucketName string, keyName string) error { err := r.CheckExtension("storage_buckets") if err != nil { return err } // Send the request. u := api.NewURL().Path("storage-pools", poolName, "buckets", bucketName, "keys", keyName) _, _, err = r.query("DELETE", u.String(), nil, "") if err != nil { return err } return nil } // CreateStoragePoolBucketBackup creates a new storage bucket backup. func (r *ProtocolIncus) CreateStoragePoolBucketBackup(poolName string, bucketName string, backup api.StorageBucketBackupsPost) (Operation, error) { err := r.CheckExtension("storage_bucket_backup") if err != nil { return nil, err } op, _, err := r.queryOperation("POST", fmt.Sprintf("/storage-pools/%s/buckets/%s/backups", url.PathEscape(poolName), url.PathEscape(bucketName)), backup, "") if err != nil { return nil, err } return op, nil } // DeleteStoragePoolBucketBackup deletes an existing storage bucket backup. func (r *ProtocolIncus) DeleteStoragePoolBucketBackup(pool string, bucketName string, name string) (Operation, error) { err := r.CheckExtension("storage_bucket_backup") if err != nil { return nil, err } op, _, err := r.queryOperation("DELETE", fmt.Sprintf("/storage-pools/%s/buckets/%s/backups/%s", url.PathEscape(pool), url.PathEscape(bucketName), url.PathEscape(name)), nil, "") if err != nil { return nil, err } return op, nil } // GetStoragePoolBucketBackupFile returns the storage bucket file. func (r *ProtocolIncus) GetStoragePoolBucketBackupFile(pool string, bucketName string, name string, req *BackupFileRequest) (*BackupFileResponse, error) { err := r.CheckExtension("storage_bucket_backup") if err != nil { return nil, err } // Build the URL uri := fmt.Sprintf("%s/1.0/storage-pools/%s/buckets/%s/backups/%s/export", r.httpBaseURL.String(), url.PathEscape(pool), url.PathEscape(bucketName), url.PathEscape(name)) if r.project != "" { uri += fmt.Sprintf("?project=%s", url.QueryEscape(r.project)) } // Prepare the download request request, err := http.NewRequest("GET", uri, nil) if err != nil { return nil, err } if r.httpUserAgent != "" { request.Header.Set("User-Agent", r.httpUserAgent) } // Start the request response, doneCh, err := cancel.CancelableDownload(req.Canceler, r.DoHTTP, request) if err != nil { return nil, err } defer func() { _ = response.Body.Close() }() defer close(doneCh) if response.StatusCode != http.StatusOK { _, _, err := incusParseResponse(response) if err != nil { return nil, err } } // Handle the data body := response.Body if req.ProgressHandler != nil { body = &ioprogress.ProgressReader{ ReadCloser: response.Body, Tracker: &ioprogress.ProgressTracker{ Length: response.ContentLength, Handler: func(percent int64, speed int64) { req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))}) }, }, } } size, err := io.Copy(req.BackupFile, body) if err != nil { return nil, err } resp := BackupFileResponse{} resp.Size = size return &resp, nil } // CreateStoragePoolBucketFromBackup creates a new storage bucket from a backup. func (r *ProtocolIncus) CreateStoragePoolBucketFromBackup(pool string, args StoragePoolBucketBackupArgs) (Operation, error) { if !r.HasExtension("storage_bucket_backup") { return nil, fmt.Errorf(`The server is missing the required "custom_volume_backup" API extension`) } path := fmt.Sprintf("/storage-pools/%s/buckets", url.PathEscape(pool)) // Prepare the HTTP request. reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path)) if err != nil { return nil, err } req, err := http.NewRequest("POST", reqURL, args.BackupFile) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/octet-stream") if args.Name != "" { req.Header.Set("X-Incus-name", args.Name) } // Send the request. resp, err := r.DoHTTP(req) if err != nil { return nil, err } defer func() { _ = resp.Body.Close() }() // Handle errors. response, _, err := incusParseResponse(resp) if err != nil { return nil, err } respOperation, err := response.MetadataAsOperation() if err != nil { return nil, err } op := operation{ Operation: *respOperation, r: r, chActive: make(chan bool), } return &op, nil } incus-6.0.4/client/incus_storage_pools.go000066400000000000000000000065141477363751000205530ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/lxc/incus/v6/shared/api" ) // Storage pool handling functions // GetStoragePoolNames returns the names of all storage pools. func (r *ProtocolIncus) GetStoragePoolNames() ([]string, error) { if !r.HasExtension("storage") { return nil, fmt.Errorf("The server is missing the required \"storage\" API extension") } // Fetch the raw URL values. urls := []string{} baseURL := "/storage-pools" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetStoragePools returns a list of StoragePool entries. func (r *ProtocolIncus) GetStoragePools() ([]api.StoragePool, error) { if !r.HasExtension("storage") { return nil, fmt.Errorf("The server is missing the required \"storage\" API extension") } pools := []api.StoragePool{} // Fetch the raw value _, err := r.queryStruct("GET", "/storage-pools?recursion=1", nil, "", &pools) if err != nil { return nil, err } return pools, nil } // GetStoragePool returns a StoragePool entry for the provided pool name. func (r *ProtocolIncus) GetStoragePool(name string) (*api.StoragePool, string, error) { if !r.HasExtension("storage") { return nil, "", fmt.Errorf("The server is missing the required \"storage\" API extension") } pool := api.StoragePool{} // Fetch the raw value etag, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s", url.PathEscape(name)), nil, "", &pool) if err != nil { return nil, "", err } return &pool, etag, nil } // CreateStoragePool defines a new storage pool using the provided StoragePool struct. func (r *ProtocolIncus) CreateStoragePool(pool api.StoragePoolsPost) error { if !r.HasExtension("storage") { return fmt.Errorf("The server is missing the required \"storage\" API extension") } // Send the request _, _, err := r.query("POST", "/storage-pools", pool, "") if err != nil { return err } return nil } // UpdateStoragePool updates the pool to match the provided StoragePool struct. func (r *ProtocolIncus) UpdateStoragePool(name string, pool api.StoragePoolPut, ETag string) error { if !r.HasExtension("storage") { return fmt.Errorf("The server is missing the required \"storage\" API extension") } // Send the request _, _, err := r.query("PUT", fmt.Sprintf("/storage-pools/%s", url.PathEscape(name)), pool, ETag) if err != nil { return err } return nil } // DeleteStoragePool deletes a storage pool. func (r *ProtocolIncus) DeleteStoragePool(name string) error { if !r.HasExtension("storage") { return fmt.Errorf("The server is missing the required \"storage\" API extension") } // Send the request _, _, err := r.query("DELETE", fmt.Sprintf("/storage-pools/%s", url.PathEscape(name)), nil, "") if err != nil { return err } return nil } // GetStoragePoolResources gets the resources available to a given storage pool. func (r *ProtocolIncus) GetStoragePoolResources(name string) (*api.ResourcesStoragePool, error) { if !r.HasExtension("resources") { return nil, fmt.Errorf("The server is missing the required \"resources\" API extension") } res := api.ResourcesStoragePool{} // Fetch the raw value _, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/resources", url.PathEscape(name)), nil, "", &res) if err != nil { return nil, err } return &res, nil } incus-6.0.4/client/incus_storage_volumes.go000066400000000000000000001015701477363751000211070ustar00rootroot00000000000000package incus import ( "fmt" "io" "net/http" "net/url" "strings" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/cancel" "github.com/lxc/incus/v6/shared/ioprogress" localtls "github.com/lxc/incus/v6/shared/tls" "github.com/lxc/incus/v6/shared/units" ) // Storage volumes handling function // GetStoragePoolVolumeNames returns the names of all volumes in a pool. func (r *ProtocolIncus) GetStoragePoolVolumeNames(pool string) ([]string, error) { if !r.HasExtension("storage") { return nil, fmt.Errorf("The server is missing the required \"storage\" API extension") } // Fetch the raw URL values. urls := []string{} baseURL := fmt.Sprintf("/storage-pools/%s/volumes", url.PathEscape(pool)) _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetStoragePoolVolumeNamesAllProjects returns the names of all volumes in a pool for all projects. func (r *ProtocolIncus) GetStoragePoolVolumeNamesAllProjects(pool string) (map[string][]string, error) { err := r.CheckExtension("storage") if err != nil { return nil, err } err = r.CheckExtension("storage_volumes_all_projects") if err != nil { return nil, err } // Fetch the raw URL values. urls := []string{} u := api.NewURL().Path("storage-pools", pool, "volumes").WithQuery("all-projects", "true") _, err = r.queryStruct("GET", u.String(), nil, "", &urls) if err != nil { return nil, err } names := make(map[string][]string) for _, urlString := range urls { resourceURL, err := url.Parse(urlString) if err != nil { return nil, fmt.Errorf("Could not parse unexpected URL %q: %w", urlString, err) } project := resourceURL.Query().Get("project") if project == "" { project = api.ProjectDefaultName } _, after, found := strings.Cut(resourceURL.Path, fmt.Sprintf("%s/", u.URL.Path)) if !found { return nil, fmt.Errorf("Unexpected URL path %q", resourceURL) } names[project] = append(names[project], after) } return names, nil } // GetStoragePoolVolumes returns a list of StorageVolume entries for the provided pool. func (r *ProtocolIncus) GetStoragePoolVolumes(pool string) ([]api.StorageVolume, error) { if !r.HasExtension("storage") { return nil, fmt.Errorf("The server is missing the required \"storage\" API extension") } volumes := []api.StorageVolume{} // Fetch the raw value _, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/volumes?recursion=1", url.PathEscape(pool)), nil, "", &volumes) if err != nil { return nil, err } return volumes, nil } // GetStoragePoolVolumesAllProjects returns a list of StorageVolume entries for the provided pool for all projects. func (r *ProtocolIncus) GetStoragePoolVolumesAllProjects(pool string) ([]api.StorageVolume, error) { err := r.CheckExtension("storage") if err != nil { return nil, err } err = r.CheckExtension("storage_volumes_all_projects") if err != nil { return nil, err } volumes := []api.StorageVolume{} url := api.NewURL().Path("storage-pools", pool, "volumes"). WithQuery("recursion", "1"). WithQuery("all-projects", "true") // Fetch the raw value. _, err = r.queryStruct("GET", url.String(), nil, "", &volumes) if err != nil { return nil, err } return volumes, nil } // GetStoragePoolVolumesWithFilter returns a filtered list of StorageVolume entries for the provided pool. func (r *ProtocolIncus) GetStoragePoolVolumesWithFilter(pool string, filters []string) ([]api.StorageVolume, error) { if !r.HasExtension("storage") { return nil, fmt.Errorf("The server is missing the required \"storage\" API extension") } volumes := []api.StorageVolume{} v := url.Values{} v.Set("recursion", "1") v.Set("filter", parseFilters(filters)) // Fetch the raw value _, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/volumes?%s", url.PathEscape(pool), v.Encode()), nil, "", &volumes) if err != nil { return nil, err } return volumes, nil } // GetStoragePoolVolumesWithFilterAllProjects returns a filtered list of StorageVolume entries for the provided pool for all projects. func (r *ProtocolIncus) GetStoragePoolVolumesWithFilterAllProjects(pool string, filters []string) ([]api.StorageVolume, error) { err := r.CheckExtension("storage") if err != nil { return nil, err } err = r.CheckExtension("storage_volumes_all_projects") if err != nil { return nil, err } volumes := []api.StorageVolume{} url := api.NewURL().Path("storage-pools", pool, "volumes"). WithQuery("recursion", "1"). WithQuery("filter", parseFilters(filters)). WithQuery("all-projects", "true") // Fetch the raw value. _, err = r.queryStruct("GET", url.String(), nil, "", &volumes) if err != nil { return nil, err } return volumes, nil } // GetStoragePoolVolume returns a StorageVolume entry for the provided pool and volume name. func (r *ProtocolIncus) GetStoragePoolVolume(pool string, volType string, name string) (*api.StorageVolume, string, error) { if !r.HasExtension("storage") { return nil, "", fmt.Errorf("The server is missing the required \"storage\" API extension") } volume := api.StorageVolume{} // Fetch the raw value path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.PathEscape(pool), url.PathEscape(volType), url.PathEscape(name)) etag, err := r.queryStruct("GET", path, nil, "", &volume) if err != nil { return nil, "", err } return &volume, etag, nil } // GetStoragePoolVolumeState returns a StorageVolumeState entry for the provided pool and volume name. func (r *ProtocolIncus) GetStoragePoolVolumeState(pool string, volType string, name string) (*api.StorageVolumeState, error) { if !r.HasExtension("storage_volume_state") { return nil, fmt.Errorf("The server is missing the required \"storage_volume_state\" API extension") } // Fetch the raw value state := api.StorageVolumeState{} path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/state", url.PathEscape(pool), url.PathEscape(volType), url.PathEscape(name)) _, err := r.queryStruct("GET", path, nil, "", &state) if err != nil { return nil, err } return &state, nil } // CreateStoragePoolVolume defines a new storage volume. func (r *ProtocolIncus) CreateStoragePoolVolume(pool string, volume api.StorageVolumesPost) error { if !r.HasExtension("storage") { return fmt.Errorf("The server is missing the required \"storage\" API extension") } // Send the request path := fmt.Sprintf("/storage-pools/%s/volumes/%s", url.PathEscape(pool), url.PathEscape(volume.Type)) _, _, err := r.query("POST", path, volume, "") if err != nil { return err } return nil } // CreateStoragePoolVolumeSnapshot defines a new storage volume. func (r *ProtocolIncus) CreateStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshot api.StorageVolumeSnapshotsPost) (Operation, error) { if !r.HasExtension("storage_api_volume_snapshots") { return nil, fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") } // Send the request path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots", url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName)) op, _, err := r.queryOperation("POST", path, snapshot, "") if err != nil { return nil, err } return op, nil } // GetStoragePoolVolumeSnapshotNames returns a list of snapshot names for the // storage volume. func (r *ProtocolIncus) GetStoragePoolVolumeSnapshotNames(pool string, volumeType string, volumeName string) ([]string, error) { if !r.HasExtension("storage_api_volume_snapshots") { return nil, fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") } // Fetch the raw URL values. urls := []string{} baseURL := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots", url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName)) _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetStoragePoolVolumeSnapshots returns a list of snapshots for the storage // volume. func (r *ProtocolIncus) GetStoragePoolVolumeSnapshots(pool string, volumeType string, volumeName string) ([]api.StorageVolumeSnapshot, error) { if !r.HasExtension("storage_api_volume_snapshots") { return nil, fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") } snapshots := []api.StorageVolumeSnapshot{} path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots?recursion=1", url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName)) _, err := r.queryStruct("GET", path, nil, "", &snapshots) if err != nil { return nil, err } return snapshots, nil } // GetStoragePoolVolumeSnapshot returns a snapshots for the storage volume. func (r *ProtocolIncus) GetStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string) (*api.StorageVolumeSnapshot, string, error) { if !r.HasExtension("storage_api_volume_snapshots") { return nil, "", fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") } snapshot := api.StorageVolumeSnapshot{} path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots/%s", url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName), url.PathEscape(snapshotName)) etag, err := r.queryStruct("GET", path, nil, "", &snapshot) if err != nil { return nil, "", err } return &snapshot, etag, nil } // RenameStoragePoolVolumeSnapshot renames a storage volume snapshot. func (r *ProtocolIncus) RenameStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string, snapshot api.StorageVolumeSnapshotPost) (Operation, error) { if !r.HasExtension("storage_api_volume_snapshots") { return nil, fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") } path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots/%s", url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName), url.PathEscape(snapshotName)) // Send the request op, _, err := r.queryOperation("POST", path, snapshot, "") if err != nil { return nil, err } return op, nil } // DeleteStoragePoolVolumeSnapshot deletes a storage volume snapshot. func (r *ProtocolIncus) DeleteStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string) (Operation, error) { if !r.HasExtension("storage_api_volume_snapshots") { return nil, fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") } // Send the request path := fmt.Sprintf( "/storage-pools/%s/volumes/%s/%s/snapshots/%s", url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName), url.PathEscape(snapshotName)) op, _, err := r.queryOperation("DELETE", path, nil, "") if err != nil { return nil, err } return op, nil } // UpdateStoragePoolVolumeSnapshot updates the volume to match the provided StoragePoolVolume struct. func (r *ProtocolIncus) UpdateStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string, volume api.StorageVolumeSnapshotPut, ETag string) error { if !r.HasExtension("storage_api_volume_snapshots") { return fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") } // Send the request path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s/snapshots/%s", url.PathEscape(pool), url.PathEscape(volumeType), url.PathEscape(volumeName), url.PathEscape(snapshotName)) _, _, err := r.queryOperation("PUT", path, volume, ETag) if err != nil { return err } return nil } // MigrateStoragePoolVolume requests that Incus prepares for a storage volume migration. func (r *ProtocolIncus) MigrateStoragePoolVolume(pool string, volume api.StorageVolumePost) (Operation, error) { if !r.HasExtension("storage_api_remote_volume_handling") { return nil, fmt.Errorf("The server is missing the required \"storage_api_remote_volume_handling\" API extension") } // Quick check. if !volume.Migration { return nil, fmt.Errorf("Can't ask for a rename through MigrateStoragePoolVolume") } var req any var path string srcVolParentName, srcVolSnapName, srcIsSnapshot := api.GetParentAndSnapshotName(volume.Name) if srcIsSnapshot { err := r.CheckExtension("storage_api_remote_volume_snapshot_copy") if err != nil { return nil, err } // Set the actual name of the snapshot without delimiter. req = api.StorageVolumeSnapshotPost{ Name: srcVolSnapName, Migration: volume.Migration, Target: volume.Target, } path = api.NewURL().Path("storage-pools", pool, "volumes", "custom", srcVolParentName, "snapshots", srcVolSnapName).String() } else { req = volume path = api.NewURL().Path("storage-pools", pool, "volumes", "custom", volume.Name).String() } // Send the request op, _, err := r.queryOperation("POST", path, req, "") if err != nil { return nil, err } return op, nil } func (r *ProtocolIncus) tryMigrateStoragePoolVolume(source InstanceServer, pool string, req api.StorageVolumePost, urls []string) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The source server isn't listening on the network") } rop := remoteOperation{ chDone: make(chan bool), } operation := req.Target.Operation // Forward targetOp to remote op go func() { success := false var errors []remoteOperationResult for _, serverURL := range urls { req.Target.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) // Send the request top, err := source.MigrateStoragePoolVolume(pool, req) if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) continue } rop := remoteOperation{ targetOp: top, chDone: make(chan bool), } for _, handler := range rop.handlers { _, _ = rop.targetOp.AddHandler(handler) } err = rop.targetOp.Wait() if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) if localtls.IsConnectionError(err) { continue } break } success = true break } if !success { rop.err = remoteOperationError("Failed storage volume creation", errors) } close(rop.chDone) }() return &rop, nil } // tryCreateStoragePoolVolume attempts to create a storage volume in the specified storage pool. // It will try to do this on every server in the provided list of urls, and waits for the creation to be complete. func (r *ProtocolIncus) tryCreateStoragePoolVolume(pool string, req api.StorageVolumesPost, urls []string) (RemoteOperation, error) { if len(urls) == 0 { return nil, fmt.Errorf("The source server isn't listening on the network") } rop := remoteOperation{ chDone: make(chan bool), } operation := req.Source.Operation // Forward targetOp to remote op go func() { success := false var errors []remoteOperationResult for _, serverURL := range urls { req.Source.Operation = fmt.Sprintf("%s/1.0/operations/%s", serverURL, url.PathEscape(operation)) // Send the request path := fmt.Sprintf("/storage-pools/%s/volumes/%s", url.PathEscape(pool), url.PathEscape(req.Type)) top, _, err := r.queryOperation("POST", path, req, "") if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) continue } rop := remoteOperation{ targetOp: top, chDone: make(chan bool), } for _, handler := range rop.handlers { _, _ = rop.targetOp.AddHandler(handler) } err = rop.targetOp.Wait() if err != nil { errors = append(errors, remoteOperationResult{URL: serverURL, Error: err}) if localtls.IsConnectionError(err) { continue } break } success = true break } if !success { rop.err = remoteOperationError("Failed storage volume creation", errors) } close(rop.chDone) }() return &rop, nil } // CopyStoragePoolVolume copies an existing storage volume. func (r *ProtocolIncus) CopyStoragePoolVolume(pool string, source InstanceServer, sourcePool string, volume api.StorageVolume, args *StoragePoolVolumeCopyArgs) (RemoteOperation, error) { if !r.HasExtension("storage_api_local_volume_handling") { return nil, fmt.Errorf("The server is missing the required \"storage_api_local_volume_handling\" API extension") } if args != nil && args.VolumeOnly && !r.HasExtension("storage_api_volume_snapshots") { return nil, fmt.Errorf("The target server is missing the required \"storage_api_volume_snapshots\" API extension") } if args != nil && args.Refresh && !r.HasExtension("custom_volume_refresh") { return nil, fmt.Errorf("The target server is missing the required \"custom_volume_refresh\" API extension") } if args != nil && args.RefreshExcludeOlder && !r.HasExtension("custom_volume_refresh_exclude_older_snapshots") { return nil, fmt.Errorf("The target server is missing the required \"custom_volume_refresh_exclude_older_snapshots\" API extension") } req := api.StorageVolumesPost{ Name: args.Name, Type: volume.Type, Source: api.StorageVolumeSource{ Name: volume.Name, Type: "copy", Pool: sourcePool, VolumeOnly: args.VolumeOnly, Refresh: args.Refresh, RefreshExcludeOlder: args.RefreshExcludeOlder, }, } req.Config = volume.Config req.Description = volume.Description req.ContentType = volume.ContentType sourceInfo, err := source.GetConnectionInfo() if err != nil { return nil, fmt.Errorf("Failed to get source connection info: %w", err) } destInfo, err := r.GetConnectionInfo() if err != nil { return nil, fmt.Errorf("Failed to get destination connection info: %w", err) } clusterInternalVolumeCopy := r.CheckExtension("cluster_internal_custom_volume_copy") == nil // Copy the storage pool volume locally. if destInfo.URL == sourceInfo.URL && destInfo.SocketPath == sourceInfo.SocketPath && (volume.Location == r.clusterTarget || (volume.Location == "none" && r.clusterTarget == "") || clusterInternalVolumeCopy) { // Project handling if destInfo.Project != sourceInfo.Project { if !r.HasExtension("storage_api_project") { return nil, fmt.Errorf("The server is missing the required \"storage_api_project\" API extension") } req.Source.Project = sourceInfo.Project } if clusterInternalVolumeCopy { req.Source.Location = sourceInfo.Target } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("/storage-pools/%s/volumes/%s", url.PathEscape(pool), url.PathEscape(volume.Type)), req, "") if err != nil { return nil, err } rop := remoteOperation{ targetOp: op, chDone: make(chan bool), } // Forward targetOp to remote op go func() { rop.err = rop.targetOp.Wait() close(rop.chDone) }() return &rop, nil } if !r.HasExtension("storage_api_remote_volume_handling") { return nil, fmt.Errorf("The server is missing the required \"storage_api_remote_volume_handling\" API extension") } sourceReq := api.StorageVolumePost{ Migration: true, Name: volume.Name, Pool: sourcePool, } if args != nil { sourceReq.VolumeOnly = args.VolumeOnly } // Push mode migration if args != nil && args.Mode == "push" { // Get target server connection information info, err := r.GetConnectionInfo() if err != nil { return nil, err } // Set the source type and direction req.Source.Type = "migration" req.Source.Mode = "push" // Send the request path := fmt.Sprintf("/storage-pools/%s/volumes/%s", url.PathEscape(pool), url.PathEscape(volume.Type)) // Send the request op, _, err := r.queryOperation("POST", path, req, "") if err != nil { return nil, err } opAPI := op.Get() targetSecrets := map[string]string{} for k, v := range opAPI.Metadata { targetSecrets[k] = v.(string) } // Prepare the source request target := api.StorageVolumePostTarget{} target.Operation = opAPI.ID target.Websockets = targetSecrets target.Certificate = info.Certificate sourceReq.Target = &target return r.tryMigrateStoragePoolVolume(source, sourcePool, sourceReq, info.Addresses) } // Get source server connection information info, err := source.GetConnectionInfo() if err != nil { return nil, err } // Get secrets from source server op, err := source.MigrateStoragePoolVolume(sourcePool, sourceReq) if err != nil { return nil, err } opAPI := op.Get() // Prepare source server secrets for remote sourceSecrets := map[string]string{} for k, v := range opAPI.Metadata { sourceSecrets[k] = v.(string) } // Relay mode migration if args != nil && args.Mode == "relay" { // Push copy source fields req.Source.Type = "migration" req.Source.Mode = "push" // Send the request path := fmt.Sprintf("/storage-pools/%s/volumes/%s", url.PathEscape(pool), url.PathEscape(volume.Type)) // Send the request targetOp, _, err := r.queryOperation("POST", path, req, "") if err != nil { return nil, err } targetOpAPI := targetOp.Get() // Extract the websockets targetSecrets := map[string]string{} for k, v := range targetOpAPI.Metadata { targetSecrets[k] = v.(string) } // Launch the relay err = r.proxyMigration(targetOp.(*operation), targetSecrets, source, op.(*operation), sourceSecrets) if err != nil { return nil, err } // Prepare a tracking operation rop := remoteOperation{ targetOp: targetOp, chDone: make(chan bool), } // Forward targetOp to remote op go func() { rop.err = rop.targetOp.Wait() close(rop.chDone) }() return &rop, nil } // Pull mode migration req.Source.Type = "migration" req.Source.Mode = "pull" req.Source.Operation = opAPI.ID req.Source.Websockets = sourceSecrets req.Source.Certificate = info.Certificate return r.tryCreateStoragePoolVolume(pool, req, info.Addresses) } // MoveStoragePoolVolume renames or moves an existing storage volume. func (r *ProtocolIncus) MoveStoragePoolVolume(pool string, source InstanceServer, sourcePool string, volume api.StorageVolume, args *StoragePoolVolumeMoveArgs) (RemoteOperation, error) { if !r.HasExtension("storage_api_local_volume_handling") { return nil, fmt.Errorf("The server is missing the required \"storage_api_local_volume_handling\" API extension") } if r != source { return nil, fmt.Errorf("Moving storage volumes between remotes is not implemented") } req := api.StorageVolumePost{ Name: args.Name, Pool: pool, } if args.Project != "" { if !r.HasExtension("storage_volume_project_move") { return nil, fmt.Errorf("The server is missing the required \"storage_volume_project_move\" API extension") } req.Project = args.Project } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.PathEscape(sourcePool), url.PathEscape(volume.Type), volume.Name), req, "") if err != nil { return nil, err } rop := remoteOperation{ targetOp: op, chDone: make(chan bool), } // Forward targetOp to remote op go func() { rop.err = rop.targetOp.Wait() close(rop.chDone) }() return &rop, nil } // UpdateStoragePoolVolume updates the volume to match the provided StoragePoolVolume struct. func (r *ProtocolIncus) UpdateStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePut, ETag string) error { if !r.HasExtension("storage") { return fmt.Errorf("The server is missing the required \"storage\" API extension") } if volume.Restore != "" && !r.HasExtension("storage_api_volume_snapshots") { return fmt.Errorf("The server is missing the required \"storage_api_volume_snapshots\" API extension") } // Send the request path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.PathEscape(pool), url.PathEscape(volType), url.PathEscape(name)) _, _, err := r.query("PUT", path, volume, ETag) if err != nil { return err } return nil } // DeleteStoragePoolVolume deletes a storage pool. func (r *ProtocolIncus) DeleteStoragePoolVolume(pool string, volType string, name string) error { if !r.HasExtension("storage") { return fmt.Errorf("The server is missing the required \"storage\" API extension") } // Send the request path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.PathEscape(pool), url.PathEscape(volType), url.PathEscape(name)) _, _, err := r.query("DELETE", path, nil, "") if err != nil { return err } return nil } // RenameStoragePoolVolume renames a storage volume. func (r *ProtocolIncus) RenameStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePost) error { if !r.HasExtension("storage_api_volume_rename") { return fmt.Errorf("The server is missing the required \"storage_api_volume_rename\" API extension") } path := fmt.Sprintf("/storage-pools/%s/volumes/%s/%s", url.PathEscape(pool), url.PathEscape(volType), url.PathEscape(name)) // Send the request _, _, err := r.query("POST", path, volume, "") if err != nil { return err } return nil } // GetStorageVolumeBackupNames returns a list of volume backup names. func (r *ProtocolIncus) GetStorageVolumeBackupNames(pool string, volName string) ([]string, error) { if !r.HasExtension("custom_volume_backup") { return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") } // Fetch the raw URL values. urls := []string{} baseURL := fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups", url.PathEscape(pool), url.PathEscape(volName)) _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetStorageVolumeBackups returns a list of custom volume backups. func (r *ProtocolIncus) GetStorageVolumeBackups(pool string, volName string) ([]api.StorageVolumeBackup, error) { if !r.HasExtension("custom_volume_backup") { return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") } // Fetch the raw value backups := []api.StorageVolumeBackup{} _, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups?recursion=1", url.PathEscape(pool), url.PathEscape(volName)), nil, "", &backups) if err != nil { return nil, err } return backups, nil } // GetStorageVolumeBackup returns a custom volume backup. func (r *ProtocolIncus) GetStorageVolumeBackup(pool string, volName string, name string) (*api.StorageVolumeBackup, string, error) { if !r.HasExtension("custom_volume_backup") { return nil, "", fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") } // Fetch the raw value backup := api.StorageVolumeBackup{} etag, err := r.queryStruct("GET", fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups/%s", url.PathEscape(pool), url.PathEscape(volName), url.PathEscape(name)), nil, "", &backup) if err != nil { return nil, "", err } return &backup, etag, nil } // CreateStorageVolumeBackup creates new custom volume backup. func (r *ProtocolIncus) CreateStorageVolumeBackup(pool string, volName string, backup api.StorageVolumeBackupsPost) (Operation, error) { if !r.HasExtension("custom_volume_backup") { return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups", url.PathEscape(pool), url.PathEscape(volName)), backup, "") if err != nil { return nil, err } return op, nil } // RenameStorageVolumeBackup renames a custom volume backup. func (r *ProtocolIncus) RenameStorageVolumeBackup(pool string, volName string, name string, backup api.StorageVolumeBackupPost) (Operation, error) { if !r.HasExtension("custom_volume_backup") { return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") } // Send the request op, _, err := r.queryOperation("POST", fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups/%s", url.PathEscape(pool), url.PathEscape(volName), url.PathEscape(name)), backup, "") if err != nil { return nil, err } return op, nil } // DeleteStorageVolumeBackup deletes a custom volume backup. func (r *ProtocolIncus) DeleteStorageVolumeBackup(pool string, volName string, name string) (Operation, error) { if !r.HasExtension("custom_volume_backup") { return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") } // Send the request op, _, err := r.queryOperation("DELETE", fmt.Sprintf("/storage-pools/%s/volumes/custom/%s/backups/%s", url.PathEscape(pool), url.PathEscape(volName), url.PathEscape(name)), nil, "") if err != nil { return nil, err } return op, nil } // GetStorageVolumeBackupFile requests the custom volume backup content. func (r *ProtocolIncus) GetStorageVolumeBackupFile(pool string, volName string, name string, req *BackupFileRequest) (*BackupFileResponse, error) { if !r.HasExtension("custom_volume_backup") { return nil, fmt.Errorf("The server is missing the required \"custom_volume_backup\" API extension") } // Build the URL uri := fmt.Sprintf("%s/1.0/storage-pools/%s/volumes/custom/%s/backups/%s/export", r.httpBaseURL.String(), url.PathEscape(pool), url.PathEscape(volName), url.PathEscape(name)) // Add project/target uri, err := r.setQueryAttributes(uri) if err != nil { return nil, err } // Prepare the download request request, err := http.NewRequest("GET", uri, nil) if err != nil { return nil, err } if r.httpUserAgent != "" { request.Header.Set("User-Agent", r.httpUserAgent) } // Start the request response, doneCh, err := cancel.CancelableDownload(req.Canceler, r.DoHTTP, request) if err != nil { return nil, err } defer func() { _ = response.Body.Close() }() defer close(doneCh) if response.StatusCode != http.StatusOK { _, _, err := incusParseResponse(response) if err != nil { return nil, err } } // Handle the data body := response.Body if req.ProgressHandler != nil { body = &ioprogress.ProgressReader{ ReadCloser: response.Body, Tracker: &ioprogress.ProgressTracker{ Length: response.ContentLength, Handler: func(percent int64, speed int64) { req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))}) }, }, } } size, err := io.Copy(req.BackupFile, body) if err != nil { return nil, err } resp := BackupFileResponse{} resp.Size = size return &resp, nil } // CreateStoragePoolVolumeFromISO creates a custom volume from an ISO file. func (r *ProtocolIncus) CreateStoragePoolVolumeFromISO(pool string, args StorageVolumeBackupArgs) (Operation, error) { err := r.CheckExtension("custom_volume_iso") if err != nil { return nil, err } if args.Name == "" { return nil, fmt.Errorf("Missing volume name") } path := fmt.Sprintf("/storage-pools/%s/volumes/custom", url.PathEscape(pool)) // Prepare the HTTP request. reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path)) if err != nil { return nil, err } req, err := http.NewRequest("POST", reqURL, args.BackupFile) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("X-Incus-name", args.Name) req.Header.Set("X-Incus-type", "iso") // Send the request. resp, err := r.DoHTTP(req) if err != nil { return nil, err } defer func() { _ = resp.Body.Close() }() // Handle errors. response, _, err := incusParseResponse(resp) if err != nil { return nil, err } // Get to the operation. respOperation, err := response.MetadataAsOperation() if err != nil { return nil, err } // Setup an Operation wrapper. op := operation{ Operation: *respOperation, r: r, chActive: make(chan bool), } return &op, nil } // CreateStoragePoolVolumeFromBackup creates a custom volume from a backup file. func (r *ProtocolIncus) CreateStoragePoolVolumeFromBackup(pool string, args StorageVolumeBackupArgs) (Operation, error) { if !r.HasExtension("custom_volume_backup") { return nil, fmt.Errorf(`The server is missing the required "custom_volume_backup" API extension`) } if args.Name != "" && !r.HasExtension("backup_override_name") { return nil, fmt.Errorf(`The server is missing the required "backup_override_name" API extension`) } path := fmt.Sprintf("/storage-pools/%s/volumes/custom", url.PathEscape(pool)) // Prepare the HTTP request. reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path)) if err != nil { return nil, err } req, err := http.NewRequest("POST", reqURL, args.BackupFile) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/octet-stream") if args.Name != "" { req.Header.Set("X-Incus-name", args.Name) } // Send the request. resp, err := r.DoHTTP(req) if err != nil { return nil, err } defer func() { _ = resp.Body.Close() }() // Handle errors. response, _, err := incusParseResponse(resp) if err != nil { return nil, err } // Get to the operation. respOperation, err := response.MetadataAsOperation() if err != nil { return nil, err } // Setup an Operation wrapper. op := operation{ Operation: *respOperation, r: r, chActive: make(chan bool), } return &op, nil } incus-6.0.4/client/incus_warnings.go000066400000000000000000000043721477363751000175230ustar00rootroot00000000000000package incus import ( "fmt" "net/url" "github.com/lxc/incus/v6/shared/api" ) // Warning handling functions // GetWarningUUIDs returns a list of operation uuids. func (r *ProtocolIncus) GetWarningUUIDs() ([]string, error) { if !r.HasExtension("warnings") { return nil, fmt.Errorf("The server is missing the required \"warnings\" API extension") } // Fetch the raw values. urls := []string{} baseURL := "/warnings" _, err := r.queryStruct("GET", baseURL, nil, "", &urls) if err != nil { return nil, err } // Parse it. return urlsToResourceNames(baseURL, urls...) } // GetWarnings returns a list of warnings. func (r *ProtocolIncus) GetWarnings() ([]api.Warning, error) { if !r.HasExtension("warnings") { return nil, fmt.Errorf("The server is missing the required \"warnings\" API extension") } warnings := []api.Warning{} _, err := r.queryStruct("GET", "/warnings?recursion=1", nil, "", &warnings) if err != nil { return nil, err } return warnings, nil } // GetWarning returns the warning with the given UUID. func (r *ProtocolIncus) GetWarning(UUID string) (*api.Warning, string, error) { if !r.HasExtension("warnings") { return nil, "", fmt.Errorf("The server is missing the required \"warnings\" API extension") } warning := api.Warning{} etag, err := r.queryStruct("GET", fmt.Sprintf("/warnings/%s", url.PathEscape(UUID)), nil, "", &warning) if err != nil { return nil, "", err } return &warning, etag, nil } // UpdateWarning updates the warning with the given UUID. func (r *ProtocolIncus) UpdateWarning(UUID string, warning api.WarningPut, ETag string) error { if !r.HasExtension("warnings") { return fmt.Errorf("The server is missing the required \"warnings\" API extension") } // Send the request _, _, err := r.query("PUT", fmt.Sprintf("/warnings/%s", url.PathEscape(UUID)), warning, ETag) if err != nil { return err } return nil } // DeleteWarning deletes the provided warning. func (r *ProtocolIncus) DeleteWarning(UUID string) error { if !r.HasExtension("warnings") { return fmt.Errorf("The server is missing the required \"warnings\" API extension") } // Send the request _, _, err := r.query("DELETE", fmt.Sprintf("/warnings/%s", url.PathEscape(UUID)), nil, "") if err != nil { return err } return nil } incus-6.0.4/client/interfaces.go000066400000000000000000001002511477363751000166060ustar00rootroot00000000000000package incus import ( "context" "io" "net" "net/http" "github.com/gorilla/websocket" "github.com/pkg/sftp" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/cancel" "github.com/lxc/incus/v6/shared/ioprogress" ) // The Operation type represents a currently running operation. type Operation interface { AddHandler(function func(api.Operation)) (target *EventTarget, err error) Cancel() (err error) Get() (op api.Operation) GetWebsocket(secret string) (conn *websocket.Conn, err error) RemoveHandler(target *EventTarget) (err error) Refresh() (err error) Wait() (err error) WaitContext(ctx context.Context) error } // The RemoteOperation type represents an Operation that may be using multiple servers. type RemoteOperation interface { AddHandler(function func(api.Operation)) (target *EventTarget, err error) CancelTarget() (err error) GetTarget() (op *api.Operation, err error) Wait() (err error) } // The Server type represents a generic read-only server. type Server interface { GetConnectionInfo() (info *ConnectionInfo, err error) GetHTTPClient() (client *http.Client, err error) DoHTTP(req *http.Request) (resp *http.Response, err error) Disconnect() } // The ImageServer type represents a read-only image server. type ImageServer interface { Server // Image handling functions GetImages() (images []api.Image, err error) GetImagesAllProjects() (images []api.Image, err error) GetImageFingerprints() (fingerprints []string, err error) GetImagesWithFilter(filters []string) (images []api.Image, err error) GetImage(fingerprint string) (image *api.Image, ETag string, err error) GetImageFile(fingerprint string, req ImageFileRequest) (resp *ImageFileResponse, err error) GetImageSecret(fingerprint string) (secret string, err error) GetPrivateImage(fingerprint string, secret string) (image *api.Image, ETag string, err error) GetPrivateImageFile(fingerprint string, secret string, req ImageFileRequest) (resp *ImageFileResponse, err error) GetImageAliases() (aliases []api.ImageAliasesEntry, err error) GetImageAliasNames() (names []string, err error) GetImageAlias(name string) (alias *api.ImageAliasesEntry, ETag string, err error) GetImageAliasType(imageType string, name string) (alias *api.ImageAliasesEntry, ETag string, err error) GetImageAliasArchitectures(imageType string, name string) (entries map[string]*api.ImageAliasesEntry, err error) ExportImage(fingerprint string, image api.ImageExportPost) (Operation, error) } // The InstanceServer type represents a full featured Incus server. type InstanceServer interface { ImageServer // Server functions GetMetrics() (metrics string, err error) GetServer() (server *api.Server, ETag string, err error) GetServerResources() (resources *api.Resources, err error) UpdateServer(server api.ServerPut, ETag string) (err error) ApplyServerPreseed(config api.InitPreseed) error HasExtension(extension string) (exists bool) RequireAuthenticated(authenticated bool) IsClustered() (clustered bool) UseTarget(name string) (client InstanceServer) UseProject(name string) (client InstanceServer) // Certificate functions GetCertificateFingerprints() (fingerprints []string, err error) GetCertificates() (certificates []api.Certificate, err error) GetCertificate(fingerprint string) (certificate *api.Certificate, ETag string, err error) CreateCertificate(certificate api.CertificatesPost) (err error) UpdateCertificate(fingerprint string, certificate api.CertificatePut, ETag string) (err error) DeleteCertificate(fingerprint string) (err error) CreateCertificateToken(certificate api.CertificatesPost) (op Operation, err error) // Instance functions. GetInstanceNames(instanceType api.InstanceType) (names []string, err error) GetInstanceNamesAllProjects(instanceType api.InstanceType) (names map[string][]string, err error) GetInstances(instanceType api.InstanceType) (instances []api.Instance, err error) GetInstancesFull(instanceType api.InstanceType) (instances []api.InstanceFull, err error) GetInstancesAllProjects(instanceType api.InstanceType) (instances []api.Instance, err error) GetInstancesFullAllProjects(instanceType api.InstanceType) (instances []api.InstanceFull, err error) GetInstancesWithFilter(instanceType api.InstanceType, filters []string) (instances []api.Instance, err error) GetInstancesFullWithFilter(instanceType api.InstanceType, filters []string) (instances []api.InstanceFull, err error) GetInstancesAllProjectsWithFilter(instanceType api.InstanceType, filters []string) (instances []api.Instance, err error) GetInstancesFullAllProjectsWithFilter(instanceType api.InstanceType, filters []string) (instances []api.InstanceFull, err error) GetInstance(name string) (instance *api.Instance, ETag string, err error) GetInstanceFull(name string) (instance *api.InstanceFull, ETag string, err error) CreateInstance(instance api.InstancesPost) (op Operation, err error) CreateInstanceFromImage(source ImageServer, image api.Image, req api.InstancesPost) (op RemoteOperation, err error) CopyInstance(source InstanceServer, instance api.Instance, args *InstanceCopyArgs) (op RemoteOperation, err error) UpdateInstance(name string, instance api.InstancePut, ETag string) (op Operation, err error) RenameInstance(name string, instance api.InstancePost) (op Operation, err error) MigrateInstance(name string, instance api.InstancePost) (op Operation, err error) DeleteInstance(name string) (op Operation, err error) UpdateInstances(state api.InstancesPut, ETag string) (op Operation, err error) RebuildInstance(instanceName string, req api.InstanceRebuildPost) (op Operation, err error) RebuildInstanceFromImage(source ImageServer, image api.Image, instanceName string, req api.InstanceRebuildPost) (op RemoteOperation, err error) ExecInstance(instanceName string, exec api.InstanceExecPost, args *InstanceExecArgs) (op Operation, err error) ConsoleInstance(instanceName string, console api.InstanceConsolePost, args *InstanceConsoleArgs) (op Operation, err error) ConsoleInstanceDynamic(instanceName string, console api.InstanceConsolePost, args *InstanceConsoleArgs) (Operation, func(io.ReadWriteCloser) error, error) GetInstanceConsoleLog(instanceName string, args *InstanceConsoleLogArgs) (content io.ReadCloser, err error) DeleteInstanceConsoleLog(instanceName string, args *InstanceConsoleLogArgs) (err error) GetInstanceFile(instanceName string, path string) (content io.ReadCloser, resp *InstanceFileResponse, err error) CreateInstanceFile(instanceName string, path string, args InstanceFileArgs) (err error) DeleteInstanceFile(instanceName string, path string) (err error) GetInstanceFileSFTPConn(instanceName string) (net.Conn, error) GetInstanceFileSFTP(instanceName string) (*sftp.Client, error) GetInstanceSnapshotNames(instanceName string) (names []string, err error) GetInstanceSnapshots(instanceName string) (snapshots []api.InstanceSnapshot, err error) GetInstanceSnapshot(instanceName string, name string) (snapshot *api.InstanceSnapshot, ETag string, err error) CreateInstanceSnapshot(instanceName string, snapshot api.InstanceSnapshotsPost) (op Operation, err error) CopyInstanceSnapshot(source InstanceServer, instanceName string, snapshot api.InstanceSnapshot, args *InstanceSnapshotCopyArgs) (op RemoteOperation, err error) RenameInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPost) (op Operation, err error) MigrateInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPost) (op Operation, err error) DeleteInstanceSnapshot(instanceName string, name string) (op Operation, err error) UpdateInstanceSnapshot(instanceName string, name string, instance api.InstanceSnapshotPut, ETag string) (op Operation, err error) GetInstanceBackupNames(instanceName string) (names []string, err error) GetInstanceBackups(instanceName string) (backups []api.InstanceBackup, err error) GetInstanceBackup(instanceName string, name string) (backup *api.InstanceBackup, ETag string, err error) CreateInstanceBackup(instanceName string, backup api.InstanceBackupsPost) (op Operation, err error) RenameInstanceBackup(instanceName string, name string, backup api.InstanceBackupPost) (op Operation, err error) DeleteInstanceBackup(instanceName string, name string) (op Operation, err error) GetInstanceBackupFile(instanceName string, name string, req *BackupFileRequest) (resp *BackupFileResponse, err error) CreateInstanceFromBackup(args InstanceBackupArgs) (op Operation, err error) GetInstanceState(name string) (state *api.InstanceState, ETag string, err error) UpdateInstanceState(name string, state api.InstanceStatePut, ETag string) (op Operation, err error) GetInstanceAccess(name string) (access api.Access, err error) GetInstanceLogfiles(name string) (logfiles []string, err error) GetInstanceLogfile(name string, filename string) (content io.ReadCloser, err error) DeleteInstanceLogfile(name string, filename string) (err error) GetInstanceMetadata(name string) (metadata *api.ImageMetadata, ETag string, err error) UpdateInstanceMetadata(name string, metadata api.ImageMetadata, ETag string) (err error) GetInstanceTemplateFiles(instanceName string) (templates []string, err error) GetInstanceTemplateFile(instanceName string, templateName string) (content io.ReadCloser, err error) CreateInstanceTemplateFile(instanceName string, templateName string, content io.ReadSeeker) (err error) DeleteInstanceTemplateFile(name string, templateName string) (err error) GetInstanceDebugMemory(name string, format string) (rc io.ReadCloser, err error) // Event handling functions GetEvents() (listener *EventListener, err error) GetEventsAllProjects() (listener *EventListener, err error) SendEvent(event api.Event) error // Image functions CreateImage(image api.ImagesPost, args *ImageCreateArgs) (op Operation, err error) CopyImage(source ImageServer, image api.Image, args *ImageCopyArgs) (op RemoteOperation, err error) UpdateImage(fingerprint string, image api.ImagePut, ETag string) (err error) DeleteImage(fingerprint string) (op Operation, err error) RefreshImage(fingerprint string) (op Operation, err error) CreateImageSecret(fingerprint string) (op Operation, err error) CreateImageAlias(alias api.ImageAliasesPost) (err error) UpdateImageAlias(name string, alias api.ImageAliasesEntryPut, ETag string) (err error) RenameImageAlias(name string, alias api.ImageAliasesEntryPost) (err error) DeleteImageAlias(name string) (err error) // Configuration metadata functions GetMetadataConfiguration() (meta *api.MetadataConfiguration, err error) // Network functions ("network" API extension) GetNetworkNames() (names []string, err error) GetNetworks() (networks []api.Network, err error) GetNetworksAllProjects() (networks []api.Network, err error) GetNetwork(name string) (network *api.Network, ETag string, err error) GetNetworkLeases(name string) (leases []api.NetworkLease, err error) GetNetworkState(name string) (state *api.NetworkState, err error) CreateNetwork(network api.NetworksPost) (err error) UpdateNetwork(name string, network api.NetworkPut, ETag string) (err error) RenameNetwork(name string, network api.NetworkPost) (err error) DeleteNetwork(name string) (err error) // Network forward functions ("network_forward" API extension) GetNetworkForwardAddresses(networkName string) ([]string, error) GetNetworkForwards(networkName string) ([]api.NetworkForward, error) GetNetworkForward(networkName string, listenAddress string) (forward *api.NetworkForward, ETag string, err error) CreateNetworkForward(networkName string, forward api.NetworkForwardsPost) error UpdateNetworkForward(networkName string, listenAddress string, forward api.NetworkForwardPut, ETag string) (err error) DeleteNetworkForward(networkName string, listenAddress string) (err error) // Network load balancer functions ("network_load_balancer" API extension) GetNetworkLoadBalancerAddresses(networkName string) ([]string, error) GetNetworkLoadBalancers(networkName string) ([]api.NetworkLoadBalancer, error) GetNetworkLoadBalancer(networkName string, listenAddress string) (forward *api.NetworkLoadBalancer, ETag string, err error) CreateNetworkLoadBalancer(networkName string, forward api.NetworkLoadBalancersPost) error UpdateNetworkLoadBalancer(networkName string, listenAddress string, forward api.NetworkLoadBalancerPut, ETag string) (err error) DeleteNetworkLoadBalancer(networkName string, listenAddress string) (err error) GetNetworkLoadBalancerState(networkName string, listenAddress string) (lbState *api.NetworkLoadBalancerState, err error) // Network peer functions ("network_peer" API extension) GetNetworkPeerNames(networkName string) ([]string, error) GetNetworkPeers(networkName string) ([]api.NetworkPeer, error) GetNetworkPeer(networkName string, peerName string) (peer *api.NetworkPeer, ETag string, err error) CreateNetworkPeer(networkName string, peer api.NetworkPeersPost) error UpdateNetworkPeer(networkName string, peerName string, peer api.NetworkPeerPut, ETag string) (err error) DeleteNetworkPeer(networkName string, peerName string) (err error) // Network ACL functions ("network_acl" API extension) GetNetworkACLNames() (names []string, err error) GetNetworkACLs() (acls []api.NetworkACL, err error) GetNetworkACLsAllProjects() (acls []api.NetworkACL, err error) GetNetworkACL(name string) (acl *api.NetworkACL, ETag string, err error) GetNetworkACLLogfile(name string) (log io.ReadCloser, err error) CreateNetworkACL(acl api.NetworkACLsPost) (err error) UpdateNetworkACL(name string, acl api.NetworkACLPut, ETag string) (err error) RenameNetworkACL(name string, acl api.NetworkACLPost) (err error) DeleteNetworkACL(name string) (err error) // Network allocations functions ("network_allocations" API extension) GetNetworkAllocations() (allocations []api.NetworkAllocations, err error) GetNetworkAllocationsAllProjects() (allocations []api.NetworkAllocations, err error) // Network zone functions ("network_dns" API extension) GetNetworkZonesAllProjects() (zones []api.NetworkZone, err error) GetNetworkZoneNames() (names []string, err error) GetNetworkZones() (zones []api.NetworkZone, err error) GetNetworkZone(name string) (zone *api.NetworkZone, ETag string, err error) CreateNetworkZone(zone api.NetworkZonesPost) (err error) UpdateNetworkZone(name string, zone api.NetworkZonePut, ETag string) (err error) DeleteNetworkZone(name string) (err error) GetNetworkZoneRecordNames(zone string) (names []string, err error) GetNetworkZoneRecords(zone string) (records []api.NetworkZoneRecord, err error) GetNetworkZoneRecord(zone string, name string) (record *api.NetworkZoneRecord, ETag string, err error) CreateNetworkZoneRecord(zone string, record api.NetworkZoneRecordsPost) (err error) UpdateNetworkZoneRecord(zone string, name string, record api.NetworkZoneRecordPut, ETag string) (err error) DeleteNetworkZoneRecord(zone string, name string) (err error) // Network integrations functions ("network_integrations" API extension) GetNetworkIntegrationNames() (names []string, err error) GetNetworkIntegrations() (integrations []api.NetworkIntegration, err error) GetNetworkIntegration(name string) (integration *api.NetworkIntegration, ETag string, err error) CreateNetworkIntegration(integration api.NetworkIntegrationsPost) (err error) UpdateNetworkIntegration(name string, integration api.NetworkIntegrationPut, ETag string) (err error) RenameNetworkIntegration(name string, integration api.NetworkIntegrationPost) (err error) DeleteNetworkIntegration(name string) (err error) // Operation functions GetOperationUUIDs() (uuids []string, err error) GetOperations() (operations []api.Operation, err error) GetOperationsAllProjects() (operations []api.Operation, err error) GetOperation(uuid string) (op *api.Operation, ETag string, err error) GetOperationWait(uuid string, timeout int) (op *api.Operation, ETag string, err error) GetOperationWaitSecret(uuid string, secret string, timeout int) (op *api.Operation, ETag string, err error) GetOperationWebsocket(uuid string, secret string) (conn *websocket.Conn, err error) DeleteOperation(uuid string) (err error) // Profile functions GetProfilesAllProjects() (profiles []api.Profile, err error) GetProfileNames() (names []string, err error) GetProfiles() (profiles []api.Profile, err error) GetProfile(name string) (profile *api.Profile, ETag string, err error) CreateProfile(profile api.ProfilesPost) (err error) UpdateProfile(name string, profile api.ProfilePut, ETag string) (err error) RenameProfile(name string, profile api.ProfilePost) (err error) DeleteProfile(name string) (err error) // Project functions GetProjectNames() (names []string, err error) GetProjects() (projects []api.Project, err error) GetProject(name string) (project *api.Project, ETag string, err error) GetProjectState(name string) (project *api.ProjectState, err error) GetProjectAccess(name string) (access api.Access, err error) CreateProject(project api.ProjectsPost) (err error) UpdateProject(name string, project api.ProjectPut, ETag string) (err error) RenameProject(name string, project api.ProjectPost) (op Operation, err error) DeleteProject(name string) (err error) DeleteProjectForce(name string) (err error) // Storage pool functions ("storage" API extension) GetStoragePoolNames() (names []string, err error) GetStoragePools() (pools []api.StoragePool, err error) GetStoragePool(name string) (pool *api.StoragePool, ETag string, err error) GetStoragePoolResources(name string) (resources *api.ResourcesStoragePool, err error) CreateStoragePool(pool api.StoragePoolsPost) (err error) UpdateStoragePool(name string, pool api.StoragePoolPut, ETag string) (err error) DeleteStoragePool(name string) (err error) // Storage bucket functions ("storage_buckets" API extension) GetStoragePoolBucketNames(poolName string) ([]string, error) GetStoragePoolBucketsAllProjects(poolName string) ([]api.StorageBucket, error) GetStoragePoolBuckets(poolName string) ([]api.StorageBucket, error) GetStoragePoolBucket(poolName string, bucketName string) (bucket *api.StorageBucket, ETag string, err error) CreateStoragePoolBucket(poolName string, bucket api.StorageBucketsPost) (*api.StorageBucketKey, error) UpdateStoragePoolBucket(poolName string, bucketName string, bucket api.StorageBucketPut, ETag string) (err error) DeleteStoragePoolBucket(poolName string, bucketName string) (err error) GetStoragePoolBucketKeyNames(poolName string, bucketName string) ([]string, error) GetStoragePoolBucketKeys(poolName string, bucketName string) ([]api.StorageBucketKey, error) GetStoragePoolBucketKey(poolName string, bucketName string, keyName string) (key *api.StorageBucketKey, ETag string, err error) CreateStoragePoolBucketKey(poolName string, bucketName string, key api.StorageBucketKeysPost) (newKey *api.StorageBucketKey, err error) UpdateStoragePoolBucketKey(poolName string, bucketName string, keyName string, key api.StorageBucketKeyPut, ETag string) (err error) DeleteStoragePoolBucketKey(poolName string, bucketName string, keyName string) (err error) // Storage bucket backup functions ("storage_bucket_backup" API extension) CreateStoragePoolBucketBackup(poolName string, bucketName string, backup api.StorageBucketBackupsPost) (op Operation, err error) DeleteStoragePoolBucketBackup(pool string, bucketName string, name string) (op Operation, err error) GetStoragePoolBucketBackupFile(pool string, bucketName string, name string, req *BackupFileRequest) (resp *BackupFileResponse, err error) CreateStoragePoolBucketFromBackup(pool string, args StoragePoolBucketBackupArgs) (op Operation, err error) // Storage volume functions ("storage" API extension) GetStoragePoolVolumeNames(pool string) (names []string, err error) GetStoragePoolVolumeNamesAllProjects(pool string) (names map[string][]string, err error) GetStoragePoolVolumes(pool string) (volumes []api.StorageVolume, err error) GetStoragePoolVolumesAllProjects(pool string) (volumes []api.StorageVolume, err error) GetStoragePoolVolumesWithFilter(pool string, filters []string) (volumes []api.StorageVolume, err error) GetStoragePoolVolumesWithFilterAllProjects(pool string, filters []string) (volumes []api.StorageVolume, err error) GetStoragePoolVolume(pool string, volType string, name string) (volume *api.StorageVolume, ETag string, err error) GetStoragePoolVolumeState(pool string, volType string, name string) (state *api.StorageVolumeState, err error) CreateStoragePoolVolume(pool string, volume api.StorageVolumesPost) (err error) UpdateStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePut, ETag string) (err error) DeleteStoragePoolVolume(pool string, volType string, name string) (err error) RenameStoragePoolVolume(pool string, volType string, name string, volume api.StorageVolumePost) (err error) CopyStoragePoolVolume(pool string, source InstanceServer, sourcePool string, volume api.StorageVolume, args *StoragePoolVolumeCopyArgs) (op RemoteOperation, err error) MoveStoragePoolVolume(pool string, source InstanceServer, sourcePool string, volume api.StorageVolume, args *StoragePoolVolumeMoveArgs) (op RemoteOperation, err error) MigrateStoragePoolVolume(pool string, volume api.StorageVolumePost) (op Operation, err error) // Storage volume snapshot functions ("storage_api_volume_snapshots" API extension) CreateStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshot api.StorageVolumeSnapshotsPost) (op Operation, err error) DeleteStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string) (op Operation, err error) GetStoragePoolVolumeSnapshotNames(pool string, volumeType string, volumeName string) (names []string, err error) GetStoragePoolVolumeSnapshots(pool string, volumeType string, volumeName string) (snapshots []api.StorageVolumeSnapshot, err error) GetStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string) (snapshot *api.StorageVolumeSnapshot, ETag string, err error) RenameStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string, snapshot api.StorageVolumeSnapshotPost) (op Operation, err error) UpdateStoragePoolVolumeSnapshot(pool string, volumeType string, volumeName string, snapshotName string, volume api.StorageVolumeSnapshotPut, ETag string) (err error) // Storage volume backup functions ("custom_volume_backup" API extension) GetStorageVolumeBackupNames(pool string, volName string) (names []string, err error) GetStorageVolumeBackups(pool string, volName string) (backups []api.StorageVolumeBackup, err error) GetStorageVolumeBackup(pool string, volName string, name string) (backup *api.StorageVolumeBackup, ETag string, err error) CreateStorageVolumeBackup(pool string, volName string, backup api.StorageVolumeBackupsPost) (op Operation, err error) RenameStorageVolumeBackup(pool string, volName string, name string, backup api.StorageVolumeBackupPost) (op Operation, err error) DeleteStorageVolumeBackup(pool string, volName string, name string) (op Operation, err error) GetStorageVolumeBackupFile(pool string, volName string, name string, req *BackupFileRequest) (resp *BackupFileResponse, err error) CreateStoragePoolVolumeFromBackup(pool string, args StorageVolumeBackupArgs) (op Operation, err error) // Storage volume ISO import function ("custom_volume_iso" API extension) CreateStoragePoolVolumeFromISO(pool string, args StorageVolumeBackupArgs) (op Operation, err error) // Cluster functions ("cluster" API extensions) GetCluster() (cluster *api.Cluster, ETag string, err error) UpdateCluster(cluster api.ClusterPut, ETag string) (op Operation, err error) DeleteClusterMember(name string, force bool) (err error) GetClusterMemberNames() (names []string, err error) GetClusterMembers() (members []api.ClusterMember, err error) GetClusterMember(name string) (member *api.ClusterMember, ETag string, err error) UpdateClusterMember(name string, member api.ClusterMemberPut, ETag string) (err error) RenameClusterMember(name string, member api.ClusterMemberPost) (err error) CreateClusterMember(member api.ClusterMembersPost) (op Operation, err error) UpdateClusterCertificate(certs api.ClusterCertificatePut, ETag string) (err error) GetClusterMemberState(name string) (*api.ClusterMemberState, string, error) UpdateClusterMemberState(name string, state api.ClusterMemberStatePost) (op Operation, err error) GetClusterGroups() ([]api.ClusterGroup, error) GetClusterGroupNames() ([]string, error) RenameClusterGroup(name string, group api.ClusterGroupPost) error CreateClusterGroup(group api.ClusterGroupsPost) error DeleteClusterGroup(name string) error UpdateClusterGroup(name string, group api.ClusterGroupPut, ETag string) error GetClusterGroup(name string) (*api.ClusterGroup, string, error) // Warning functions GetWarningUUIDs() (uuids []string, err error) GetWarnings() (warnings []api.Warning, err error) GetWarning(UUID string) (warning *api.Warning, ETag string, err error) UpdateWarning(UUID string, warning api.WarningPut, ETag string) (err error) DeleteWarning(UUID string) (err error) // Internal functions (for internal use) RawQuery(method string, path string, data any, queryETag string) (resp *api.Response, ETag string, err error) RawWebsocket(path string) (conn *websocket.Conn, err error) RawOperation(method string, path string, data any, queryETag string) (op Operation, ETag string, err error) } // The ConnectionInfo struct represents general information for a connection. type ConnectionInfo struct { Addresses []string Certificate string Protocol string URL string SocketPath string Project string Target string } // The BackupFileRequest struct is used for a backup download request. type BackupFileRequest struct { // Writer for the backup file BackupFile io.WriteSeeker // Progress handler (called whenever some progress is made) ProgressHandler func(progress ioprogress.ProgressData) // A canceler that can be used to interrupt some part of the image download request Canceler *cancel.HTTPRequestCanceller } // The BackupFileResponse struct is used as the response for backup downloads. type BackupFileResponse struct { // Size of backup file Size int64 } // The ImageCreateArgs struct is used for direct image upload. type ImageCreateArgs struct { // Reader for the meta file MetaFile io.Reader // Filename for the meta file MetaName string // Reader for the rootfs file RootfsFile io.Reader // Filename for the rootfs file RootfsName string // Progress handler (called with upload progress) ProgressHandler func(progress ioprogress.ProgressData) // Type of the image (container or virtual-machine) Type string } // The ImageFileRequest struct is used for an image download request. type ImageFileRequest struct { // Writer for the metadata file MetaFile io.WriteSeeker // Writer for the rootfs file RootfsFile io.WriteSeeker // Progress handler (called whenever some progress is made) ProgressHandler func(progress ioprogress.ProgressData) // A canceler that can be used to interrupt some part of the image download request Canceler *cancel.HTTPRequestCanceller // Path retriever for image delta downloads // If set, it must return the path to the image file or an empty string if not available DeltaSourceRetriever func(fingerprint string, file string) string } // The ImageFileResponse struct is used as the response for image downloads. type ImageFileResponse struct { // Filename for the metadata file MetaName string // Size of the metadata file MetaSize int64 // Filename for the rootfs file RootfsName string // Size of the rootfs file RootfsSize int64 } // The ImageCopyArgs struct is used to pass additional options during image copy. type ImageCopyArgs struct { // Aliases to add to the copied image. Aliases []api.ImageAlias // Whether to have Incus keep this image up to date AutoUpdate bool // Whether to copy the source image aliases to the target CopyAliases bool // Whether this image is to be made available to unauthenticated users Public bool // The image type to use for resolution Type string // The transfer mode, can be "pull" (default), "push" or "relay" Mode string // List of profiles to apply on the target. Profiles []string } // The StoragePoolVolumeCopyArgs struct is used to pass additional options // during storage volume copy. type StoragePoolVolumeCopyArgs struct { // New name for the target Name string // The transfer mode, can be "pull" (default), "push" or "relay" Mode string // API extension: storage_api_volume_snapshots VolumeOnly bool // API extension: custom_volume_refresh Refresh bool // API extension: custom_volume_refresh_exclude_older_snapshots RefreshExcludeOlder bool } // The StoragePoolVolumeMoveArgs struct is used to pass additional options // during storage volume move. type StoragePoolVolumeMoveArgs struct { StoragePoolVolumeCopyArgs // API extension: storage_volume_project_move Project string } // The StorageVolumeBackupArgs struct is used when creating a storage volume from a backup. // API extension: custom_volume_backup. type StorageVolumeBackupArgs struct { // The backup file BackupFile io.Reader // Name to import backup as Name string } // The InstanceBackupArgs struct is used when creating a instance from a backup. type InstanceBackupArgs struct { // The backup file BackupFile io.Reader // Storage pool to use PoolName string // Name to import backup as Name string } // The InstanceCopyArgs struct is used to pass additional options during instance copy. type InstanceCopyArgs struct { // If set, the instance will be renamed on copy Name string // If set, the instance running state will be transferred (live migration) Live bool // If set, only the instance will copied, its snapshots won't InstanceOnly bool // The transfer mode, can be "pull" (default), "push" or "relay" Mode string // API extension: container_incremental_copy // Perform an incremental copy Refresh bool // API extension: custom_volume_refresh_exclude_older_snapshots RefreshExcludeOlder bool // API extension: instance_allow_inconsistent_copy AllowInconsistent bool } // The InstanceSnapshotCopyArgs struct is used to pass additional options during instance copy. type InstanceSnapshotCopyArgs struct { // If set, the instance will be renamed on copy Name string // The transfer mode, can be "pull" (default), "push" or "relay" Mode string // API extension: container_snapshot_stateful_migration // If set, the instance running state will be transferred (live migration) Live bool } // The InstanceConsoleArgs struct is used to pass additional options during a // instance console session. type InstanceConsoleArgs struct { // Bidirectional fd to pass to the instance Terminal io.ReadWriteCloser // Control message handler (window resize) Control func(conn *websocket.Conn) // Closing this Channel causes a disconnect from the instance's console ConsoleDisconnect chan bool } // The InstanceConsoleLogArgs struct is used to pass additional options during a // instance console log request. type InstanceConsoleLogArgs struct{} // The InstanceExecArgs struct is used to pass additional options during instance exec. type InstanceExecArgs struct { // Standard input Stdin io.Reader // Standard output Stdout io.Writer // Standard error Stderr io.Writer // Control message handler (window resize, signals, ...) Control func(conn *websocket.Conn) // Channel that will be closed when all data operations are done DataDone chan bool } // The InstanceFileArgs struct is used to pass the various options for a instance file upload. type InstanceFileArgs struct { // File content Content io.ReadSeeker // User id that owns the file UID int64 // Group id that owns the file GID int64 // File permissions Mode int // File type (file or directory) Type string // File write mode (overwrite or append) WriteMode string } // The InstanceFileResponse struct is used as part of the response for a instance file download. type InstanceFileResponse struct { // User id that owns the file UID int64 // Group id that owns the file GID int64 // File permissions Mode int // File type (file or directory) Type string // If a directory, the list of files inside it Entries []string } // The StoragePoolBucketBackupArgs struct is used when creating a storage volume from a backup. // API extension: storage_bucket_backup. type StoragePoolBucketBackupArgs struct { // The backup file BackupFile io.Reader // Name to import backup as Name string } incus-6.0.4/client/oci.go000066400000000000000000000022721477363751000152410ustar00rootroot00000000000000package incus import ( "fmt" "net/http" ) // ProtocolOCI implements an OCI registry API client. type ProtocolOCI struct { http *http.Client httpHost string httpUserAgent string httpCertificate string // Cache for images. cache map[string]ociInfo } // Disconnect is a no-op for OCI. func (r *ProtocolOCI) Disconnect() { } // GetConnectionInfo returns the basic connection information used to interact with the server. func (r *ProtocolOCI) GetConnectionInfo() (*ConnectionInfo, error) { info := ConnectionInfo{} info.Addresses = []string{r.httpHost} info.Certificate = r.httpCertificate info.Protocol = "oci" info.URL = r.httpHost return &info, nil } // GetHTTPClient returns the http client used for the connection. This can be used to set custom http options. func (r *ProtocolOCI) GetHTTPClient() (*http.Client, error) { if r.http == nil { return nil, fmt.Errorf("HTTP client isn't set, bad connection") } return r.http, nil } // DoHTTP performs a Request. func (r *ProtocolOCI) DoHTTP(req *http.Request) (*http.Response, error) { // Set the user agent. if r.httpUserAgent != "" { req.Header.Set("User-Agent", r.httpUserAgent) } return r.http.Do(req) } incus-6.0.4/client/oci_images.go000066400000000000000000000273201477363751000165670ustar00rootroot00000000000000package incus import ( "compress/gzip" "context" "encoding/json" "fmt" "io" "net/http" "net/url" "os" "os/exec" "path/filepath" "strings" "time" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/ioprogress" "github.com/lxc/incus/v6/shared/logger" "github.com/lxc/incus/v6/shared/osarch" "github.com/lxc/incus/v6/shared/subprocess" "github.com/lxc/incus/v6/shared/units" ) type ociInfo struct { Alias string Name string `json:"Name"` Digest string `json:"Digest"` Created time.Time `json:"Created"` Architecture string `json:"Architecture"` LayersData []struct { Size int64 `json:"Size"` } `json:"LayersData"` } // Get the proxy host value. func (r *ProtocolOCI) getProxyHost() (*url.URL, error) { req, err := http.NewRequest("GET", r.httpHost, nil) if err != nil { return nil, err } proxy, err := r.http.Transport.(*http.Transport).Proxy(req) if err != nil { return nil, err } return proxy, nil } // Image handling functions // GetImages returns a list of available images as Image structs. func (r *ProtocolOCI) GetImages() ([]api.Image, error) { return nil, fmt.Errorf("Can't list images from OCI registry") } // GetImagesAllProjects returns a list of available images as Image structs. func (r *ProtocolOCI) GetImagesAllProjects() ([]api.Image, error) { return nil, fmt.Errorf("Can't list images from OCI registry") } // GetImageFingerprints returns a list of available image fingerprints. func (r *ProtocolOCI) GetImageFingerprints() ([]string, error) { return nil, fmt.Errorf("Can't list images from OCI registry") } // GetImagesWithFilter returns a filtered list of available images as Image structs. func (r *ProtocolOCI) GetImagesWithFilter(filters []string) ([]api.Image, error) { return nil, fmt.Errorf("Can't list images from OCI registry") } // GetImage returns an Image struct for the provided fingerprint. func (r *ProtocolOCI) GetImage(fingerprint string) (*api.Image, string, error) { info, ok := r.cache[fingerprint] if !ok { _, err := exec.LookPath("skopeo") if err != nil { return nil, "", fmt.Errorf("OCI container handling requires \"skopeo\" be present on the system") } return nil, "", fmt.Errorf("Image not found") } img := api.Image{ ImagePut: api.ImagePut{ Public: true, Properties: map[string]string{ "architecture": info.Architecture, "type": "oci", "description": fmt.Sprintf("%s (OCI)", info.Name), "id": info.Alias, }, }, Aliases: []api.ImageAlias{{ Name: info.Alias, }}, Architecture: info.Architecture, Fingerprint: fingerprint, Type: string(api.InstanceTypeContainer), CreatedAt: info.Created, UploadedAt: info.Created, } var size int64 for _, layer := range info.LayersData { size += layer.Size } img.Size = size return &img, "", nil } // GetImageFile downloads an image from the server, returning an ImageFileResponse struct. func (r *ProtocolOCI) GetImageFile(fingerprint string, req ImageFileRequest) (*ImageFileResponse, error) { ctx := context.Background() // Get proxy details. proxy, err := r.getProxyHost() if err != nil { return nil, err } var env []string if proxy != nil { env = []string{ fmt.Sprintf("HTTPS_PROXY=%s", proxy), fmt.Sprintf("HTTP_PROXY=%s", proxy), } } // Get the cached entry. info, ok := r.cache[fingerprint] if !ok { _, err := exec.LookPath("skopeo") if err != nil { return nil, fmt.Errorf("OCI container handling requires \"skopeo\" be present on the system") } return nil, fmt.Errorf("Image not found") } // Quick checks. if req.MetaFile == nil && req.RootfsFile == nil { return nil, fmt.Errorf("No file requested") } if os.Geteuid() != 0 { return nil, fmt.Errorf("OCI image export currently requires root access") } _, err = exec.LookPath("umoci") if err != nil { return nil, fmt.Errorf("OCI container handling requires \"umoci\" be present on the system") } // Get some temporary storage. ociPath, err := os.MkdirTemp("", "incus-oci-") if err != nil { return nil, err } defer func() { _ = os.RemoveAll(ociPath) }() err = os.Mkdir(filepath.Join(ociPath, "oci"), 0o700) if err != nil { return nil, err } err = os.Mkdir(filepath.Join(ociPath, "image"), 0o700) if err != nil { return nil, err } // Copy the image. if req.ProgressHandler != nil { req.ProgressHandler(ioprogress.ProgressData{Text: "Retrieving OCI image from registry"}) } stdout, _, err := subprocess.RunCommandSplit( ctx, env, nil, "skopeo", "--insecure-policy", "copy", "--remove-signatures", fmt.Sprintf("%s/%s", strings.Replace(r.httpHost, "https://", "docker://", 1), info.Alias), fmt.Sprintf("oci:%s:latest", filepath.Join(ociPath, "oci"))) if err != nil { logger.Debug("Error copying remote image to local", logger.Ctx{"image": info.Alias, "stdout": stdout, "stderr": err}) return nil, err } // Convert to something usable. if req.ProgressHandler != nil { req.ProgressHandler(ioprogress.ProgressData{Text: "Unpacking the OCI image"}) } stdout, err = subprocess.RunCommand( "umoci", "unpack", "--keep-dirlinks", "--image", filepath.Join(ociPath, "oci"), filepath.Join(ociPath, "image")) if err != nil { logger.Debug("Error unpacking OCI image", logger.Ctx{"image": filepath.Join(ociPath, "oci"), "stdout": stdout, "stderr": err}) return nil, err } // Generate a metadata.yaml. if req.ProgressHandler != nil { req.ProgressHandler(ioprogress.ProgressData{Text: "Generating image metadata"}) } metadata := api.ImageMetadata{ Architecture: info.Architecture, CreationDate: info.Created.Unix(), } data, err := json.Marshal(metadata) if err != nil { return nil, err } err = os.WriteFile(filepath.Join(ociPath, "image", "metadata.yaml"), data, 0o644) if err != nil { return nil, err } // Prepare response. resp := &ImageFileResponse{ MetaName: "metadata.tar.gz", RootfsName: "rootfs.tar.gz", } // Prepare to push the tarballs. var pipeRead io.ReadCloser var pipeWrite io.WriteCloser // Push the metadata tarball. pipeRead, pipeWrite = io.Pipe() defer pipeRead.Close() defer pipeWrite.Close() if req.ProgressHandler != nil { pipeRead = &ioprogress.ProgressReader{ ReadCloser: pipeRead, Tracker: &ioprogress.ProgressTracker{ Handler: func(received int64, speed int64) { req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("Generating metadata tarball: %s (%s/s)", units.GetByteSizeString(received, 2), units.GetByteSizeString(speed, 2))}) }, }, } } compressWrite := gzip.NewWriter(pipeWrite) metadataProcess := subprocess.NewProcessWithFds("tar", []string{"-cf", "-", "-C", filepath.Join(ociPath, "image"), "config.json", "metadata.yaml"}, nil, compressWrite, os.Stderr) err = metadataProcess.Start(ctx) if err != nil { return nil, err } go func() { _, _ = metadataProcess.Wait(ctx) compressWrite.Close() pipeWrite.Close() }() size, err := io.Copy(req.MetaFile, pipeRead) if err != nil { return nil, err } resp.MetaSize = size // Push the rootfs tarball. pipeRead, pipeWrite = io.Pipe() defer pipeRead.Close() defer pipeWrite.Close() if req.ProgressHandler != nil { pipeRead = &ioprogress.ProgressReader{ ReadCloser: pipeRead, Tracker: &ioprogress.ProgressTracker{ Handler: func(received int64, speed int64) { req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("Generating rootfs tarball: %s (%s/s)", units.GetByteSizeString(received, 2), units.GetByteSizeString(speed, 2))}) }, }, } } compressWrite = gzip.NewWriter(pipeWrite) rootfsProcess := subprocess.NewProcessWithFds("tar", []string{"-cf", "-", "-C", filepath.Join(ociPath, "image", "rootfs"), "."}, nil, compressWrite, nil) err = rootfsProcess.Start(ctx) if err != nil { return nil, err } go func() { _, _ = rootfsProcess.Wait(ctx) compressWrite.Close() pipeWrite.Close() }() size, err = io.Copy(req.RootfsFile, pipeRead) if err != nil { return nil, err } resp.RootfsSize = size return resp, nil } // GetImageSecret isn't relevant for the simplestreams protocol. func (r *ProtocolOCI) GetImageSecret(fingerprint string) (string, error) { return "", fmt.Errorf("Private images aren't supported with OCI registry") } // GetPrivateImage isn't relevant for the simplestreams protocol. func (r *ProtocolOCI) GetPrivateImage(fingerprint string, secret string) (*api.Image, string, error) { return nil, "", fmt.Errorf("Private images aren't supported with OCI registry") } // GetPrivateImageFile isn't relevant for the simplestreams protocol. func (r *ProtocolOCI) GetPrivateImageFile(fingerprint string, secret string, req ImageFileRequest) (*ImageFileResponse, error) { return nil, fmt.Errorf("Private images aren't supported with OCI registry") } // GetImageAliases returns the list of available aliases as ImageAliasesEntry structs. func (r *ProtocolOCI) GetImageAliases() ([]api.ImageAliasesEntry, error) { return nil, fmt.Errorf("Can't list image aliases from OCI registry") } // GetImageAliasNames returns the list of available alias names. func (r *ProtocolOCI) GetImageAliasNames() ([]string, error) { return nil, fmt.Errorf("Can't list image aliases from OCI registry") } // GetImageAlias returns an existing alias as an ImageAliasesEntry struct. func (r *ProtocolOCI) GetImageAlias(name string) (*api.ImageAliasesEntry, string, error) { // Get proxy details. proxy, err := r.getProxyHost() if err != nil { return nil, "", err } var env []string if proxy != nil { env = []string{ fmt.Sprintf("HTTPS_PROXY=%s", proxy), fmt.Sprintf("HTTP_PROXY=%s", proxy), } } // Get the image information from skopeo. stdout, _, err := subprocess.RunCommandSplit( context.TODO(), env, nil, "skopeo", "inspect", fmt.Sprintf("%s/%s", strings.Replace(r.httpHost, "https://", "docker://", 1), name)) if err != nil { logger.Debug("Error getting image alias", logger.Ctx{"name": name, "stdout": stdout, "stderr": err}) return nil, "", err } // Parse the image info. var info ociInfo err = json.Unmarshal([]byte(stdout), &info) if err != nil { return nil, "", err } info.Alias = name info.Digest = strings.Replace(info.Digest, "sha256:", "", 1) archID, err := osarch.ArchitectureId(info.Architecture) if err != nil { return nil, "", err } archName, err := osarch.ArchitectureName(archID) if err != nil { return nil, "", err } info.Architecture = archName // Store it in the cache. r.cache[info.Digest] = info // Prepare the alias entry. alias := api.ImageAliasesEntry{ ImageAliasesEntryPut: api.ImageAliasesEntryPut{ Target: info.Digest, }, Name: name, Type: string(api.InstanceTypeContainer), } return &alias, "", nil } // GetImageAliasType returns an existing alias as an ImageAliasesEntry struct. func (r *ProtocolOCI) GetImageAliasType(imageType string, name string) (*api.ImageAliasesEntry, string, error) { if api.InstanceType(imageType) == api.InstanceTypeVM { return nil, "", fmt.Errorf("OCI images are only supported for containers") } return r.GetImageAlias(name) } // GetImageAliasArchitectures returns a map of architectures / targets. func (r *ProtocolOCI) GetImageAliasArchitectures(imageType string, name string) (map[string]*api.ImageAliasesEntry, error) { if api.InstanceType(imageType) == api.InstanceTypeVM { return nil, fmt.Errorf("OCI images are only supported for containers") } alias, _, err := r.GetImageAlias(name) if err != nil { return nil, err } localArch, err := osarch.ArchitectureGetLocal() if err != nil { return nil, err } return map[string]*api.ImageAliasesEntry{localArch: alias}, nil } // ExportImage exports (copies) an image to a remote server. func (r *ProtocolOCI) ExportImage(fingerprint string, image api.ImageExportPost) (Operation, error) { return nil, fmt.Errorf("Exporting images is not supported with OCI registry") } incus-6.0.4/client/operations.go000066400000000000000000000176561477363751000166660ustar00rootroot00000000000000package incus import ( "context" "encoding/json" "errors" "fmt" "sync" "time" "github.com/gorilla/websocket" "github.com/lxc/incus/v6/shared/api" ) // The Operation type represents an ongoing Incus operation (asynchronous processing). type operation struct { api.Operation r *ProtocolIncus listener *EventListener handlerReady bool handlerLock sync.Mutex skipListener bool chActive chan bool } // AddHandler adds a function to be called whenever an event is received. func (op *operation) AddHandler(function func(api.Operation)) (*EventTarget, error) { if op.skipListener { return nil, fmt.Errorf("Cannot add handler, client operation does not support event listeners") } // Make sure we have a listener setup err := op.setupListener() if err != nil { return nil, err } // Make sure we're not racing with ourselves op.handlerLock.Lock() defer op.handlerLock.Unlock() // If we're done already, just return if op.StatusCode.IsFinal() { return nil, nil } // Wrap the function to filter unwanted messages wrapped := func(event api.Event) { op.handlerLock.Lock() newOp := api.Operation{} err := json.Unmarshal(event.Metadata, &newOp) if err != nil || newOp.ID != op.ID { op.handlerLock.Unlock() return } op.handlerLock.Unlock() function(newOp) } return op.listener.AddHandler([]string{"operation"}, wrapped) } // Cancel will request that Incus cancels the operation (if supported). func (op *operation) Cancel() error { return op.r.DeleteOperation(op.ID) } // Get returns the API operation struct. func (op *operation) Get() api.Operation { return op.Operation } // GetWebsocket returns a raw websocket connection from the operation. func (op *operation) GetWebsocket(secret string) (*websocket.Conn, error) { return op.r.GetOperationWebsocket(op.ID, secret) } // RemoveHandler removes a function to be called whenever an event is received. func (op *operation) RemoveHandler(target *EventTarget) error { if op.skipListener { return fmt.Errorf("Cannot remove handler, client operation does not support event listeners") } // Make sure we're not racing with ourselves op.handlerLock.Lock() defer op.handlerLock.Unlock() // If the listener is gone, just return if op.listener == nil { return nil } return op.listener.RemoveHandler(target) } // Refresh pulls the current version of the operation and updates the struct. func (op *operation) Refresh() error { // Get the current version of the operation newOp, _, err := op.r.GetOperation(op.ID) if err != nil { return err } // Update the operation struct op.Operation = *newOp return nil } // Wait lets you wait until the operation reaches a final state. func (op *operation) Wait() error { return op.WaitContext(context.Background()) } // WaitContext lets you wait until the operation reaches a final state with context.Context. func (op *operation) WaitContext(ctx context.Context) error { if op.skipListener { timeout := -1 deadline, ok := ctx.Deadline() if ok { timeout = int(time.Until(deadline).Seconds()) } opAPI, _, err := op.r.GetOperationWait(op.ID, timeout) if err != nil { return err } op.Operation = *opAPI if opAPI.Err != "" { return errors.New(opAPI.Err) } return nil } op.handlerLock.Lock() // Check if not done already if op.StatusCode.IsFinal() { if op.Err != "" { op.handlerLock.Unlock() return errors.New(op.Err) } op.handlerLock.Unlock() return nil } op.handlerLock.Unlock() // Make sure we have a listener setup err := op.setupListener() if err != nil { return err } select { case <-ctx.Done(): return ctx.Err() case <-op.chActive: } // We're done, parse the result if op.Err != "" { return errors.New(op.Err) } return nil } // setupListener initiates an event listener for an operation and manages updates to the operation's state. // It adds handlers to process events, monitors the listener for completion or errors, // and triggers a manual refresh of the operation's state to prevent race conditions. func (op *operation) setupListener() error { if op.skipListener { return fmt.Errorf("Cannot set up event listener, client operation does not support event listeners") } // Make sure we're not racing with ourselves op.handlerLock.Lock() defer op.handlerLock.Unlock() // We already have a listener setup if op.handlerReady { return nil } op.handlerReady = true // Get a new listener if op.listener == nil { listener, err := op.r.GetEvents() if err != nil { return err } op.listener = listener } // Setup the handler chReady := make(chan bool) _, err := op.listener.AddHandler([]string{"operation"}, func(event api.Event) { <-chReady // We don't want concurrency while processing events op.handlerLock.Lock() defer op.handlerLock.Unlock() // Check if we're done already (because of another event) if op.listener == nil { return } // Get an operation struct out of this data newOp := api.Operation{} err := json.Unmarshal(event.Metadata, &newOp) if err != nil || newOp.ID != op.ID { return } // Update the struct op.Operation = newOp // And check if we're done if op.StatusCode.IsFinal() { op.listener.Disconnect() op.listener = nil close(op.chActive) return } }) if err != nil { op.listener.Disconnect() op.listener = nil close(op.chActive) close(chReady) return err } // Monitor event listener go func() { <-chReady // We don't want concurrency while accessing the listener op.handlerLock.Lock() // Check if we're done already (because of another event) listener := op.listener if listener == nil { op.handlerLock.Unlock() return } op.handlerLock.Unlock() // Wait for the listener or operation to be done select { case <-listener.ctx.Done(): op.handlerLock.Lock() if op.listener != nil { op.Err = listener.err.Error() close(op.chActive) } op.handlerLock.Unlock() case <-op.chActive: return } }() // And do a manual refresh to avoid races err = op.Refresh() if err != nil { op.listener.Disconnect() op.listener = nil close(op.chActive) close(chReady) return err } // Check if not done already if op.StatusCode.IsFinal() { op.listener.Disconnect() op.listener = nil close(op.chActive) close(chReady) if op.Err != "" { return errors.New(op.Err) } return nil } // Start processing background updates close(chReady) return nil } // The remoteOperation type represents an ongoing Incus operation between two servers. type remoteOperation struct { targetOp Operation handlers []func(api.Operation) handlerLock sync.Mutex chDone chan bool chPost chan bool err error } // AddHandler adds a function to be called whenever an event is received. func (op *remoteOperation) AddHandler(function func(api.Operation)) (*EventTarget, error) { var err error var target *EventTarget op.handlerLock.Lock() defer op.handlerLock.Unlock() // Attach to the existing target operation if op.targetOp != nil { target, err = op.targetOp.AddHandler(function) if err != nil { return nil, err } } else { // Generate a mock EventTarget target = &EventTarget{ function: func(api.Event) { function(api.Operation{}) }, types: []string{"operation"}, } } // Add the handler to our list op.handlers = append(op.handlers, function) return target, nil } // CancelTarget attempts to cancel the target operation. func (op *remoteOperation) CancelTarget() error { if op.targetOp == nil { return fmt.Errorf("No associated target operation") } return op.targetOp.Cancel() } // GetTarget returns the target operation. func (op *remoteOperation) GetTarget() (*api.Operation, error) { if op.targetOp == nil { return nil, fmt.Errorf("No associated target operation") } opAPI := op.targetOp.Get() return &opAPI, nil } // Wait lets you wait until the operation reaches a final state. func (op *remoteOperation) Wait() error { <-op.chDone if op.chPost != nil { <-op.chPost } return op.err } incus-6.0.4/client/simplestreams.go000066400000000000000000000024611477363751000173570ustar00rootroot00000000000000package incus import ( "fmt" "net/http" "github.com/lxc/incus/v6/shared/simplestreams" ) // ProtocolSimpleStreams implements a SimpleStreams API client. type ProtocolSimpleStreams struct { ssClient *simplestreams.SimpleStreams http *http.Client httpHost string httpUserAgent string httpCertificate string } // Disconnect is a no-op for simplestreams. func (r *ProtocolSimpleStreams) Disconnect() { } // GetConnectionInfo returns the basic connection information used to interact with the server. func (r *ProtocolSimpleStreams) GetConnectionInfo() (*ConnectionInfo, error) { info := ConnectionInfo{} info.Addresses = []string{r.httpHost} info.Certificate = r.httpCertificate info.Protocol = "simplestreams" info.URL = r.httpHost return &info, nil } // GetHTTPClient returns the http client used for the connection. This can be used to set custom http options. func (r *ProtocolSimpleStreams) GetHTTPClient() (*http.Client, error) { if r.http == nil { return nil, fmt.Errorf("HTTP client isn't set, bad connection") } return r.http, nil } // DoHTTP performs a Request. func (r *ProtocolSimpleStreams) DoHTTP(req *http.Request) (*http.Response, error) { // Set the user agent if r.httpUserAgent != "" { req.Header.Set("User-Agent", r.httpUserAgent) } return r.http.Do(req) } incus-6.0.4/client/simplestreams_images.go000066400000000000000000000224011477363751000207000ustar00rootroot00000000000000package incus import ( "context" "crypto/sha256" "errors" "fmt" "io" "net/http" "net/url" "os" "os/exec" "strings" "time" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/logger" "github.com/lxc/incus/v6/shared/subprocess" "github.com/lxc/incus/v6/shared/util" ) // Image handling functions // GetImages returns a list of available images as Image structs. func (r *ProtocolSimpleStreams) GetImages() ([]api.Image, error) { return r.ssClient.ListImages() } // GetImagesAllProjects returns a list of available images as Image structs. func (r *ProtocolSimpleStreams) GetImagesAllProjects() ([]api.Image, error) { return r.GetImages() } // GetImageFingerprints returns a list of available image fingerprints. func (r *ProtocolSimpleStreams) GetImageFingerprints() ([]string, error) { // Get all the images from simplestreams images, err := r.ssClient.ListImages() if err != nil { return nil, err } // And now extract just the fingerprints fingerprints := []string{} for _, img := range images { fingerprints = append(fingerprints, img.Fingerprint) } return fingerprints, nil } // GetImagesWithFilter returns a filtered list of available images as Image structs. func (r *ProtocolSimpleStreams) GetImagesWithFilter(filters []string) ([]api.Image, error) { return nil, fmt.Errorf("GetImagesWithFilter is not supported by the simplestreams protocol") } // GetImage returns an Image struct for the provided fingerprint. func (r *ProtocolSimpleStreams) GetImage(fingerprint string) (*api.Image, string, error) { image, err := r.ssClient.GetImage(fingerprint) if err != nil { return nil, "", fmt.Errorf("Failed getting image: %w", err) } return image, "", err } // GetImageFile downloads an image from the server, returning an ImageFileResponse struct. func (r *ProtocolSimpleStreams) GetImageFile(fingerprint string, req ImageFileRequest) (*ImageFileResponse, error) { // Quick checks. if req.MetaFile == nil && req.RootfsFile == nil { return nil, fmt.Errorf("No file requested") } // Attempt to download from host if util.PathExists("/dev/incus/sock") && os.Geteuid() == 0 { unixURI := fmt.Sprintf("http://unix.socket/1.0/images/%s/export", url.PathEscape(fingerprint)) // Setup the HTTP client devIncusHTTP, err := unixHTTPClient(nil, "/dev/incus/sock") if err == nil { resp, err := incusDownloadImage(fingerprint, unixURI, r.httpUserAgent, devIncusHTTP.Do, req) if err == nil { return resp, nil } } } // Use relatively short response header timeout so as not to hold the image lock open too long. // Deference client and transport in order to clone them so as to not modify timeout of base client. httpClient := *r.http httpTransport := httpClient.Transport.(*http.Transport).Clone() httpTransport.ResponseHeaderTimeout = 30 * time.Second httpClient.Transport = httpTransport // Get the file list files, err := r.ssClient.GetFiles(fingerprint) if err != nil { return nil, err } // Prepare the response resp := ImageFileResponse{} // Download function download := func(path string, filename string, hash string, target io.WriteSeeker) (int64, error) { // Try over http uri, err := url.JoinPath(fmt.Sprintf("http://%s", strings.TrimPrefix(r.httpHost, "https://")), path) if err != nil { return -1, err } size, err := util.DownloadFileHash(context.TODO(), &httpClient, r.httpUserAgent, req.ProgressHandler, req.Canceler, filename, uri, hash, sha256.New(), target) if err != nil { // Handle cancellation if err.Error() == "net/http: request canceled" { return -1, err } // Try over https uri, err := url.JoinPath(r.httpHost, path) if err != nil { return -1, err } size, err = util.DownloadFileHash(context.TODO(), &httpClient, r.httpUserAgent, req.ProgressHandler, req.Canceler, filename, uri, hash, sha256.New(), target) if err != nil { if errors.Is(err, util.ErrNotFound) { logger.Info("Unable to download file by hash, invalidate potentially outdated cache", logger.Ctx{"filename": filename, "uri": uri, "hash": hash}) r.ssClient.InvalidateCache() } return -1, err } } return size, nil } // Download the Incus image file meta, ok := files["meta"] if ok && req.MetaFile != nil { size, err := download(meta.Path, "metadata", meta.Sha256, req.MetaFile) if err != nil { return nil, err } parts := strings.Split(meta.Path, "/") resp.MetaName = parts[len(parts)-1] resp.MetaSize = size } // Download the rootfs rootfs, ok := files["root"] if ok && req.RootfsFile != nil { // Look for deltas (requires xdelta3) downloaded := false _, err := exec.LookPath("xdelta3") if err == nil && req.DeltaSourceRetriever != nil { for filename, file := range files { _, srcFingerprint, prefixFound := strings.Cut(filename, "root.delta-") if !prefixFound { continue } // Check if we have the source file for the delta srcPath := req.DeltaSourceRetriever(srcFingerprint, "rootfs") if srcPath == "" { continue } // Create temporary file for the delta deltaFile, err := os.CreateTemp("", "incus_image_") if err != nil { return nil, err } defer func() { _ = deltaFile.Close() }() defer func() { _ = os.Remove(deltaFile.Name()) }() // Download the delta _, err = download(file.Path, "rootfs delta", file.Sha256, deltaFile) if err != nil { return nil, err } // Create temporary file for the delta patchedFile, err := os.CreateTemp("", "incus_image_") if err != nil { return nil, err } defer func() { _ = patchedFile.Close() }() defer func() { _ = os.Remove(patchedFile.Name()) }() // Apply it _, err = subprocess.RunCommand("xdelta3", "-f", "-d", "-s", srcPath, deltaFile.Name(), patchedFile.Name()) if err != nil { return nil, err } // Copy to the target size, err := io.Copy(req.RootfsFile, patchedFile) if err != nil { return nil, err } parts := strings.Split(rootfs.Path, "/") resp.RootfsName = parts[len(parts)-1] resp.RootfsSize = size downloaded = true } } // Download the whole file if !downloaded { size, err := download(rootfs.Path, "rootfs", rootfs.Sha256, req.RootfsFile) if err != nil { return nil, err } parts := strings.Split(rootfs.Path, "/") resp.RootfsName = parts[len(parts)-1] resp.RootfsSize = size } } return &resp, nil } // GetImageSecret isn't relevant for the simplestreams protocol. func (r *ProtocolSimpleStreams) GetImageSecret(fingerprint string) (string, error) { return "", fmt.Errorf("Private images aren't supported by the simplestreams protocol") } // GetPrivateImage isn't relevant for the simplestreams protocol. func (r *ProtocolSimpleStreams) GetPrivateImage(fingerprint string, secret string) (*api.Image, string, error) { return nil, "", fmt.Errorf("Private images aren't supported by the simplestreams protocol") } // GetPrivateImageFile isn't relevant for the simplestreams protocol. func (r *ProtocolSimpleStreams) GetPrivateImageFile(fingerprint string, secret string, req ImageFileRequest) (*ImageFileResponse, error) { return nil, fmt.Errorf("Private images aren't supported by the simplestreams protocol") } // GetImageAliases returns the list of available aliases as ImageAliasesEntry structs. func (r *ProtocolSimpleStreams) GetImageAliases() ([]api.ImageAliasesEntry, error) { return r.ssClient.ListAliases() } // GetImageAliasNames returns the list of available alias names. func (r *ProtocolSimpleStreams) GetImageAliasNames() ([]string, error) { // Get all the images from simplestreams aliases, err := r.ssClient.ListAliases() if err != nil { return nil, err } // And now extract just the names names := []string{} for _, alias := range aliases { names = append(names, alias.Name) } return names, nil } // GetImageAlias returns an existing alias as an ImageAliasesEntry struct. func (r *ProtocolSimpleStreams) GetImageAlias(name string) (*api.ImageAliasesEntry, string, error) { alias, err := r.ssClient.GetAlias("container", name) if err != nil { alias, err = r.ssClient.GetAlias("virtual-machine", name) if err != nil { return nil, "", err } } return alias, "", err } // GetImageAliasType returns an existing alias as an ImageAliasesEntry struct. func (r *ProtocolSimpleStreams) GetImageAliasType(imageType string, name string) (*api.ImageAliasesEntry, string, error) { if imageType == "" { return r.GetImageAlias(name) } alias, err := r.ssClient.GetAlias(imageType, name) if err != nil { return nil, "", err } return alias, "", err } // GetImageAliasArchitectures returns a map of architectures / targets. func (r *ProtocolSimpleStreams) GetImageAliasArchitectures(imageType string, name string) (map[string]*api.ImageAliasesEntry, error) { if imageType == "" { aliases, err := r.ssClient.GetAliasArchitectures("container", name) if err != nil { aliases, err = r.ssClient.GetAliasArchitectures("virtual-machine", name) if err != nil { return nil, err } } return aliases, nil } return r.ssClient.GetAliasArchitectures(imageType, name) } // ExportImage exports (copies) an image to a remote server. func (r *ProtocolSimpleStreams) ExportImage(fingerprint string, image api.ImageExportPost) (Operation, error) { return nil, fmt.Errorf("Exporting images is not supported by the simplestreams protocol") } incus-6.0.4/client/util.go000066400000000000000000000154301477363751000154440ustar00rootroot00000000000000package incus import ( "context" "crypto/tls" "fmt" "net" "net/http" "net/url" "strings" "time" "github.com/lxc/incus/v6/shared/proxy" localtls "github.com/lxc/incus/v6/shared/tls" ) // tlsHTTPClient creates an HTTP client with a specified Transport Layer Security (TLS) configuration. // It takes in parameters for client certificates, keys, Certificate Authority, server certificates, // a boolean for skipping verification, a proxy function, and a transport wrapper function. // It returns the HTTP client with the provided configurations and handles any errors that might occur during the setup process. func tlsHTTPClient(client *http.Client, tlsClientCert string, tlsClientKey string, tlsCA string, tlsServerCert string, insecureSkipVerify bool, proxyFunc func(req *http.Request) (*url.URL, error), transportWrapper func(t *http.Transport) HTTPTransporter) (*http.Client, error) { // Get the TLS configuration tlsConfig, err := localtls.GetTLSConfigMem(tlsClientCert, tlsClientKey, tlsCA, tlsServerCert, insecureSkipVerify) if err != nil { return nil, err } // Define the http transport transport := &http.Transport{ TLSClientConfig: tlsConfig, Proxy: proxy.FromEnvironment, DisableKeepAlives: true, ExpectContinueTimeout: time.Second * 30, ResponseHeaderTimeout: time.Second * 3600, TLSHandshakeTimeout: time.Second * 5, } // Allow overriding the proxy if proxyFunc != nil { transport.Proxy = proxyFunc } // Special TLS handling transport.DialTLSContext = func(ctx context.Context, network string, addr string) (net.Conn, error) { tlsDial := func(network string, addr string, config *tls.Config, resetName bool) (net.Conn, error) { conn, err := localtls.RFC3493Dialer(ctx, network, addr) if err != nil { return nil, err } // Setup TLS if resetName { hostName, _, err := net.SplitHostPort(addr) if err != nil { hostName = addr } config = config.Clone() config.ServerName = hostName } tlsConn := tls.Client(conn, config) // Validate the connection err = tlsConn.Handshake() if err != nil { _ = conn.Close() return nil, err } if !config.InsecureSkipVerify { err := tlsConn.VerifyHostname(config.ServerName) if err != nil { _ = conn.Close() return nil, err } } return tlsConn, nil } conn, err := tlsDial(network, addr, transport.TLSClientConfig, false) if err != nil { // We may have gotten redirected to a non-Incus machine return tlsDial(network, addr, transport.TLSClientConfig, true) } return conn, nil } // Define the http client if client == nil { client = &http.Client{} } if transportWrapper != nil { client.Transport = transportWrapper(transport) } else { client.Transport = transport } // Setup redirect policy client.CheckRedirect = func(req *http.Request, via []*http.Request) error { // Replicate the headers req.Header = via[len(via)-1].Header return nil } return client, nil } // unixHTTPClient creates an HTTP client that communicates over a Unix socket. // It takes in the connection arguments and the Unix socket path as parameters. // The function sets up a Unix socket dialer, configures the HTTP transport, and returns the HTTP client with the specified configurations. // Any errors encountered during the setup process are also handled by the function. func unixHTTPClient(args *ConnectionArgs, path string) (*http.Client, error) { // Setup a Unix socket dialer unixDial := func(_ context.Context, network, addr string) (net.Conn, error) { raddr, err := net.ResolveUnixAddr("unix", path) if err != nil { return nil, err } return net.DialUnix("unix", nil, raddr) } if args == nil { args = &ConnectionArgs{} } // Define the http transport transport := &http.Transport{ DialContext: unixDial, DisableKeepAlives: true, Proxy: args.Proxy, ExpectContinueTimeout: time.Second * 30, ResponseHeaderTimeout: time.Second * 3600, TLSHandshakeTimeout: time.Second * 5, } // Define the http client client := args.HTTPClient if client == nil { client = &http.Client{} } client.Transport = transport // Setup redirect policy client.CheckRedirect = func(req *http.Request, via []*http.Request) error { // Replicate the headers req.Header = via[len(via)-1].Header return nil } return client, nil } // remoteOperationResult used for storing the error that occurred for a particular remote URL. type remoteOperationResult struct { URL string Error error } func remoteOperationError(msg string, errors []remoteOperationResult) error { // Check if empty if len(errors) == 0 { return nil } // Check if all identical var err error for _, entry := range errors { if err != nil && entry.Error.Error() != err.Error() { errorStrs := make([]string, 0, len(errors)) for _, error := range errors { errorStrs = append(errorStrs, fmt.Sprintf("%s: %v", error.URL, error.Error)) } return fmt.Errorf("%s:\n - %s", msg, strings.Join(errorStrs, "\n - ")) } err = entry.Error } // Check if successful if err != nil { return fmt.Errorf("%s: %w", msg, err) } return nil } // Set the value of a query parameter in the given URI. func setQueryParam(uri, param, value string) (string, error) { fields, err := url.Parse(uri) if err != nil { return "", err } values := fields.Query() values.Set(param, url.QueryEscape(value)) fields.RawQuery = values.Encode() return fields.String(), nil } // urlsToResourceNames returns a list of resource names extracted from one or more URLs of the same resource type. // The resource type path prefix to match is provided by the matchPathPrefix argument. func urlsToResourceNames(matchPathPrefix string, urls ...string) ([]string, error) { resourceNames := make([]string, 0, len(urls)) for _, urlRaw := range urls { u, err := url.Parse(urlRaw) if err != nil { return nil, fmt.Errorf("Failed parsing URL %q: %w", urlRaw, err) } _, after, found := strings.Cut(u.Path, fmt.Sprintf("%s/", matchPathPrefix)) if !found { return nil, fmt.Errorf("Unexpected URL path %q", u) } resourceNames = append(resourceNames, after) } return resourceNames, nil } // parseFilters translates filters passed at client side to form acceptable by server-side API. func parseFilters(filters []string) string { var result []string for _, filter := range filters { if strings.Contains(filter, "=") { membs := strings.SplitN(filter, "=", 2) result = append(result, fmt.Sprintf("%s eq %s", membs[0], membs[1])) } } return strings.Join(result, " and ") } // HTTPTransporter represents a wrapper around *http.Transport. // It is used to add some pre and postprocessing logic to http requests / responses. type HTTPTransporter interface { http.RoundTripper // Transport what this struct wraps Transport() *http.Transport } incus-6.0.4/cmd/000077500000000000000000000000001477363751000134225ustar00rootroot00000000000000incus-6.0.4/cmd/fuidshift/000077500000000000000000000000001477363751000154075ustar00rootroot00000000000000incus-6.0.4/cmd/fuidshift/main.go000066400000000000000000000014521477363751000166640ustar00rootroot00000000000000package main import ( "os" "github.com/spf13/cobra" "github.com/lxc/incus/v6/internal/version" ) type cmdGlobal struct { flagVersion bool flagHelp bool } func main() { // shift command (main) shiftCmd := cmdShift{} app := shiftCmd.Command() app.SilenceUsage = true app.CompletionOptions = cobra.CompletionOptions{DisableDefaultCmd: true} // Global flags globalCmd := cmdGlobal{} shiftCmd.global = &globalCmd app.PersistentFlags().BoolVar(&globalCmd.flagVersion, "version", false, "Print version number") app.PersistentFlags().BoolVarP(&globalCmd.flagHelp, "help", "h", false, "Print help") // Version handling app.SetVersionTemplate("{{.Version}}\n") app.Version = version.Version // Run the main command and handle errors err := app.Execute() if err != nil { os.Exit(1) } } incus-6.0.4/cmd/fuidshift/main_shift.go000066400000000000000000000044351477363751000200650ustar00rootroot00000000000000package main import ( "fmt" "os" "github.com/spf13/cobra" "github.com/lxc/incus/v6/shared/idmap" ) type cmdShift struct { global *cmdGlobal flagReverse bool flagTestMode bool } func (c *cmdShift) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "fuidshift [...]" cmd.Short = "UID/GID shifter" cmd.Long = `Description: UID/GID shifter This tool lets you remap a filesystem tree, switching it from one set of UID/GID ranges to another. This is mostly useful when retrieving a wrongly shifted filesystem tree from a backup or broken system and having to remap everything either to the host UID/GID range (uid/gid 0 is root) or to an existing container's range. A range is represented as :::. Where "u" means shift uid, "g" means shift gid and "b" means shift uid and gid. ` cmd.Example = ` fuidshift my-dir/ b:0:100000:65536 u:10000:1000:1` cmd.RunE = c.Run cmd.Flags().BoolVarP(&c.flagTestMode, "test", "t", false, "Test mode (no change to files)") cmd.Flags().BoolVarP(&c.flagReverse, "reverse", "r", false, "Perform a reverse mapping") return cmd } func (c *cmdShift) Run(cmd *cobra.Command, args []string) error { // Help and usage if len(args) == 0 { return cmd.Help() } // Quick checks. if !c.flagTestMode && os.Geteuid() != 0 { return fmt.Errorf("This tool must be run as root") } // Handle mandatory arguments if len(args) < 2 { _ = cmd.Help() return fmt.Errorf("Missing required arguments") } directory := args[0] var skipper func(dir string, absPath string, fi os.FileInfo, newuid int64, newgid int64) error if c.flagTestMode { skipper = func(dir string, absPath string, fi os.FileInfo, newuid int64, newgid int64) error { fmt.Printf("I would shift %q to %d %d\n", absPath, newuid, newgid) return fmt.Errorf("dry run") } } // Parse the maps idmapSet := &idmap.Set{} for _, arg := range args[1:] { var err error idmapSet, err = idmapSet.Append(arg) if err != nil { return err } } // Reverse shifting if c.flagReverse { err := idmapSet.UnshiftPath(directory, skipper) if err != nil { return err } return nil } // Normal shifting err := idmapSet.ShiftPath(directory, skipper) if err != nil { return err } return nil } incus-6.0.4/cmd/generate-config/000077500000000000000000000000001477363751000164575ustar00rootroot00000000000000incus-6.0.4/cmd/generate-config/README.md000066400000000000000000000161771477363751000177520ustar00rootroot00000000000000# generate-config A small CLI to parse comments in a Golang codebase meant to be used for a documentation tool (like Sphinx for example). It parses the comments from the AST and extracts their documentation. ## Disclaimer `generate-config` is intended for internal use within the [Incus](https://github.com/lxc/incus) code base. There are no guarantees regarding backwards compatibility, API stability, or long-term availability. It may change or be removed at any time without prior notice. Use at your own discretion. ## Usage ```shell $ generate-config -h Usage of generate-config: -e value Path that will be excluded from the process ``` ## Formatting A comment is formatted this way: ```go // gendoc:generate(entity=cluster, group=cluster, key=scheduler.instance) // // // --- // shortdesc: Possible values are all, manual and group. See Automatic placement of instances for more information. // condition: container // defaultdesc: `all` // type: integer // liveupdate: `yes` // : clusterConfigKeys := map[string]func(value string) error{ "scheduler.instance": validate.Optional(validate.IsOneOf("all", "group", "manual")), } for k, v := range config { // gendoc:generate(entity=cluster, group=cluster, key=user.*) // // This is the real long desc. // // With two paragraphs. // // And a list: // // - Item // - Item // - Item // // example of a table: // // Key | Type | Scope | Default | Description // :-- | :--- | :---- | :------ | :---------- // `acme.agree_tos` | bool | global | `false` | Agree to ACME terms of service // `acme.ca_url` | string | global | `https://acme-v02.api.letsencrypt.org/directory` | URL to the directory resource of the ACME service // `acme.domain` | string | global | - | Domain for which the certificate is issued // `acme.email` | string | global | - | Email address used for the account registration // // --- // shortdesc: Free form user key/value storage (can be used in search). // condition: container // default: - // type: string // liveupdate: `yes` if strings.HasPrefix(k, "user.") { continue } validator, ok := clusterConfigKeys[k] if !ok { return fmt.Errorf("Invalid cluster configuration key %q", k) } err := validator(v) if err != nil { return fmt.Errorf("Invalid cluster configuration key %q value", k) } } return nil ``` The go-swagger spec from source generator can only handles `swagger:meta` (global file/package level documentation), `swagger:route` (API endpoints), `swagger:params` (function parameters), `swagger:operation` (method documentation), `swagger:response` (API response content documentation), `swagger:model` (struct documentation) generation. In our use case, we would want a config variable spec generator that can bundle any key-value data pairs alongside metadata to build a sense of hierarchy and identity (we want to associate a unique key to each gendoc comment group that will also be displayed in the generated documentation) In a swagger fashion, `generate-config` can associate metadata key-value pairs (here for example, `group` and `key`) to data key-value pairs. As a result, it can generate a YAML tree out of the code documentation and also a Markdown document. ### Output Here is the JSON output of the example shown above: ```json { "configs": { "cluster": [ { "scheduler.instance": { "condition": "container", "defaultdesc": "`all`", "liveupdate": "`yes`", "longdesc": "", "shortdesc": " Possible values are all, manual and group. See Automatic placement of instances for more", "type": "integer" } }, { "user.*": { "condition": "container", "defaultdesc": "-", "liveupdate": "`yes`", "longdesc": " This is the real long desc. With two paragraphs. And a list: - Item - Item - Item And a table: Key | Type | Scope | Default | Description :-- | :--- | :---- | :------ | :---------- `acme.agree_tos` | bool | global | `false` | Agree to ACME terms of service `acme.ca_url` | string | global | `https://acme-v02.api.letsencrypt.org/directory` | URL to the directory resource of the ACME service `acme.domain` | string | global | - | Domain for which the certificate is issued `acme.email` | string | global | - | Email address used for the account registration ", "shortdesc": "Free form user key/value storage (can be used in search).", "type": "string" } } ], } } ``` Here is the `.txt` output of the example shown above: ```plain \`\`\`{config:option} user.* cluster :type: string :liveupdate: `yes` :shortdesc: Free form user key/value storage (can be used in search). :condition: container :default: - This is the real long desc. With two paragraphs. And a list: - Item - Item - Item example of a table: Key | Type | Scope | Default | Description :-- | :--- | :---- | :------ | :---------- `acme.agree_tos` | bool | global | `false` | Agree to ACME terms of service `acme.ca_url` | string | global | `https://acme-v02.api.letsencrypt.org/directory` | URL to the directory resource of the ACME service `acme.domain` | string | global | - | Domain for which the certificate is issued `acme.email` | string | global | - | Email address used for the account registration \`\`\` \`\`\`{config:option} scheduler.instance cluster :liveupdate: `yes` :shortdesc: Possible values are all, manual and group. See Automatic placement of instances for more information. :condition: container :default: `all` :type: integer \`\`\` ``` incus-6.0.4/cmd/generate-config/incus_doc.go000066400000000000000000000262351477363751000207640ustar00rootroot00000000000000package main import ( "bytes" "encoding/json" "fmt" "go/ast" "go/parser" "go/token" "log" "os" "path/filepath" "regexp" "slices" "sort" "strings" "time" ) var ( globalGenDocRegex = regexp.MustCompile(`(?m)gendoc:generate\((.*)\)([\S\s]+)\s+---\n([\S\s]+)`) genDocMetadataRegex = regexp.MustCompile(`(?m)([^,\s]+)=([^,\s]+)`) genDocDataRegex = regexp.MustCompile(`(?m)([\S]+):[\s]+([\S \"\']+)`) ) var mdKeys []string = []string{"entity", "group", "key"} // IterableAny is a generic type that represents a type or an iterable container. type IterableAny interface { any | []any } // doc is the structure of the JSON file that contains the generated configuration metadata. type doc struct { Configs map[string]any `json:"configs"` } // sortConfigKeys alphabetically sorts the entries by key (config option key) within each config group in an entity. func sortConfigKeys(projectEntries map[string]any) { for _, entityValue := range projectEntries { for _, groupValue := range entityValue.(map[string]any) { configEntries := groupValue.(map[string]any)["keys"].([]any) sort.Slice(configEntries, func(i, j int) bool { // Get the only key for each map element in the slice var keyI, keyJ string confI, confJ := configEntries[i].(map[string]any), configEntries[j].(map[string]any) for k := range confI { keyI = k break // There is only one key-value pair in each map } for k := range confJ { keyJ = k break // There is only one key-value pair in each map } // Compare the keys return keyI < keyJ }) } } } // getSortedKeysFromMap returns the keys of a map sorted alphabetically. func getSortedKeysFromMap[K string, V IterableAny](m map[K]V) []K { keys := make([]K, 0, len(m)) for k := range m { keys = append(keys, k) } sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) return keys } func parse(path string, outputJSONPath string, excludedPaths []string) (*doc, error) { jsonDoc := &doc{} docKeys := make(map[string]struct{}, 0) projectEntries := make(map[string]any) err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { if err != nil { return err } // Skip excluded paths if slices.Contains(excludedPaths, path) { if info.IsDir() { log.Printf("Skipping excluded directory: %v", path) return filepath.SkipDir } log.Printf("Skipping excluded file: %v", path) return nil } // Only process go files if !info.IsDir() && filepath.Ext(path) != ".go" { return nil } // Continue walking if directory if info.IsDir() { return nil } // Parse file and create the AST fset := token.NewFileSet() var f *ast.File f, err = parser.ParseFile(fset, path, nil, parser.ParseComments) if err != nil { return err } fileEntries := make([]map[string]any, 0) // Loop in comment groups for _, cg := range f.Comments { s := cg.Text() entry := make(map[string]any) groupKeyEntry := make(map[string]any) for _, match := range globalGenDocRegex.FindAllStringSubmatch(s, -1) { // check that the match contains the expected number of groups if len(match) != 4 { continue } log.Printf("Found gendoc at %s", fset.Position(cg.Pos()).String()) metadata := match[1] longdesc := match[2] data := match[3] // process metadata metadataMap := make(map[string]string) var entityKey string var groupKey string var simpleKey string for _, mdKVMatch := range genDocMetadataRegex.FindAllStringSubmatch(metadata, -1) { if len(mdKVMatch) != 3 { continue } mdKey := mdKVMatch[1] mdValue := mdKVMatch[2] // check that the metadata key is among the expected ones if !slices.Contains(mdKeys, mdKey) { continue } if mdKey == "entity" { entityKey = mdValue } if mdKey == "group" { groupKey = mdValue } if mdKey == "key" { simpleKey = mdValue } metadataMap[mdKey] = mdValue } // Check that this metadata is not already present mdKeyHash := fmt.Sprintf("%s/%s/%s", entityKey, groupKey, simpleKey) _, ok := docKeys[mdKeyHash] if ok { return fmt.Errorf("Duplicate key '%s' found at %s", mdKeyHash, fset.Position(cg.Pos()).String()) } docKeys[mdKeyHash] = struct{}{} configKeyEntry := make(map[string]any) configKeyEntry[metadataMap["key"]] = make(map[string]any) configKeyEntry[metadataMap["key"]].(map[string]any)["longdesc"] = strings.TrimLeft(longdesc, "\n\t\v\f\r") for _, dataKVMatch := range genDocDataRegex.FindAllStringSubmatch(data, -1) { if len(dataKVMatch) != 3 { continue } configKeyEntry[metadataMap["key"]].(map[string]any)[dataKVMatch[1]] = dataKVMatch[2] } _, ok = groupKeyEntry[metadataMap["group"]] if ok { _, ok = groupKeyEntry[metadataMap["group"]].(map[string]any)["keys"] if ok { groupKeyEntry[metadataMap["group"]].(map[string]any)["keys"] = append( groupKeyEntry[metadataMap["group"]].(map[string]any)["keys"].([]any), configKeyEntry, ) } else { groupKeyEntry[metadataMap["group"]].(map[string]any)["keys"] = []any{configKeyEntry} } } else { groupKeyEntry[metadataMap["group"]] = make(map[string]any) groupKeyEntry[metadataMap["group"]].(map[string]any)["keys"] = []any{configKeyEntry} } entry[metadataMap["entity"]] = groupKeyEntry } if len(entry) > 0 { fileEntries = append(fileEntries, entry) } } // Update projectEntries for _, entry := range fileEntries { for entityKey, entityValue := range entry { _, ok := projectEntries[entityKey] if !ok { projectEntries[entityKey] = entityValue } else { for groupKey, groupValue := range entityValue.(map[string]any) { _, ok := projectEntries[entityKey].(map[string]any)[groupKey] if !ok { projectEntries[entityKey].(map[string]any)[groupKey] = groupValue } else { // merge the config keys configKeys := groupValue.(map[string]any)["keys"].([]any) projectEntries[entityKey].(map[string]any)[groupKey].(map[string]any)["keys"] = append( projectEntries[entityKey].(map[string]any)[groupKey].(map[string]any)["keys"].([]any), configKeys..., ) } } } } } return nil }) if err != nil { return nil, err } // sort the config keys alphabetically sortConfigKeys(projectEntries) jsonDoc.Configs = projectEntries data, err := json.MarshalIndent(jsonDoc, "", "\t") if err != nil { return nil, fmt.Errorf("Error while marshaling project documentation: %v", err) } if outputJSONPath != "" { buf := bytes.NewBufferString("") _, err = buf.Write(data) if err != nil { return nil, fmt.Errorf("Error while writing the JSON project documentation: %v", err) } err := os.WriteFile(outputJSONPath, buf.Bytes(), 0o644) if err != nil { return nil, fmt.Errorf("Error while writing the JSON project documentation: %v", err) } } return jsonDoc, nil } func writeDocFile(inputJSONPath, outputTxtPath string) error { countMaxBackTicks := func(s string) int { count, curr_count := 0, 0 n := len(s) for i := 0; i < n; i++ { if s[i] == '`' { curr_count++ continue } if curr_count > count { count = curr_count } curr_count = 0 } return count } specialChars := []string{"", "*", "_", "#", "+", "-", ".", "!", "no", "yes"} // read the JSON file which is the source of truth for the generation of the .txt file jsonData, err := os.ReadFile(inputJSONPath) if err != nil { return err } var jsonDoc doc err = json.Unmarshal(jsonData, &jsonDoc) if err != nil { return err } sortedEntityKeys := getSortedKeysFromMap(jsonDoc.Configs) // create a string buffer buffer := bytes.NewBufferString("// Code generated by generate-config from the incus project; DO NOT EDIT.\n\n") for _, entityKey := range sortedEntityKeys { entityEntries := jsonDoc.Configs[entityKey] sortedGroupKeys := getSortedKeysFromMap(entityEntries.(map[string]any)) for _, groupKey := range sortedGroupKeys { groupEntries := entityEntries.(map[string]any)[groupKey] buffer.WriteString(fmt.Sprintf("\n", entityKey, groupKey)) for _, configEntry := range groupEntries.(map[string]any)["keys"].([]any) { for configKey, configContent := range configEntry.(map[string]any) { // There is only one key-value pair in each map kvBuffer := bytes.NewBufferString("") var backticksCount int var longDescContent string sortedConfigContentKeys := getSortedKeysFromMap(configContent.(map[string]any)) for _, configEntryContentKey := range sortedConfigContentKeys { configContentValue := configContent.(map[string]any)[configEntryContentKey] if configEntryContentKey == "longdesc" { backticksCount = countMaxBackTicks(configContentValue.(string)) longDescContent = configContentValue.(string) continue } configContentValueStr, ok := configContentValue.(string) if ok { if (strings.HasSuffix(configContentValueStr, "`") && strings.HasPrefix(configContentValueStr, "`")) || slices.Contains(specialChars, configContentValueStr) { configContentValueStr = fmt.Sprintf("\"%s\"", configContentValueStr) } } else { switch configEntryContentTyped := configContentValue.(type) { case int, float64, bool: configContentValueStr = fmt.Sprint(configEntryContentTyped) case time.Time: configContentValueStr = fmt.Sprint(configEntryContentTyped.Format(time.RFC3339)) } } var quoteFormattedValue string if strings.Contains(configContentValueStr, `"`) { if strings.HasPrefix(configContentValueStr, `"`) && strings.HasSuffix(configContentValueStr, `"`) { for i, s := range configContentValueStr[1 : len(configContentValueStr)-1] { if s == '"' { _ = strings.Replace(configContentValueStr, `"`, `\"`, i) } } quoteFormattedValue = configContentValueStr } else { quoteFormattedValue = strings.ReplaceAll(configContentValueStr, `"`, `\"`) } } else { quoteFormattedValue = fmt.Sprintf("\"%s\"", configContentValueStr) } kvBuffer.WriteString( fmt.Sprintf( ":%s: %s\n", configEntryContentKey, quoteFormattedValue, ), ) } if backticksCount < 3 { buffer.WriteString( fmt.Sprintf("```{config:option} %s %s-%s\n%s%s\n```\n\n", configKey, entityKey, groupKey, kvBuffer.String(), strings.TrimLeft(longDescContent, "\n"), )) } else { configQuotes := strings.Repeat("`", backticksCount+1) buffer.WriteString( fmt.Sprintf("%s{config:option} %s %s-%s\n%s%s\n%s\n\n", configQuotes, configKey, entityKey, groupKey, kvBuffer.String(), strings.TrimLeft(longDescContent, "\n"), configQuotes, )) } } } buffer.WriteString(fmt.Sprintf("\n", entityKey, groupKey)) } } err = os.WriteFile(outputTxtPath, buffer.Bytes(), 0o644) if err != nil { return fmt.Errorf("Error while writing the Markdown project documentation: %v", err) } return nil } incus-6.0.4/cmd/generate-config/incus_doc_test.go000066400000000000000000000044351477363751000220210ustar00rootroot00000000000000package main import ( "testing" "github.com/stretchr/testify/assert" ) // Test the alphabetical sorting of a `generate-config` JSON structure. func TestJSONSorted(t *testing.T) { projectEntries := make(map[string]any) projectEntries["entityKey1"] = map[string]any{ "groupKey1": map[string]any{ "keys": []any{ map[string]any{ "a.core.server.test.b": map[string]string{ "todo5": "stuff", "todo6": "stuff", }, }, map[string]any{ "a.core.server.test.c": map[string]string{ "todo3": "stuff", "todo4": "stuff", }, }, map[string]any{ "b.core.server.test.a": map[string]string{ "todo1": "stuff", "todo2": "stuff", }, }, }, }, } projectEntries["entityKey2"] = map[string]any{ "groupKey2": map[string]any{ "keys": []any{ map[string]any{ "000.111.222": map[string]string{ "todo9": "stuff", "todo10": "stuff", }, }, map[string]any{ "aaa.ccc.bbb": map[string]string{ "todo7": "stuff", "todo8": "stuff", }, }, map[string]any{ "zzz.*": map[string]string{ "todo11": "stuff", "todo12": "stuff", }, }, }, }, } sortedProjectEntries := make(map[string]any) sortedProjectEntries["entityKey1"] = map[string]any{ "groupKey1": map[string]any{ "keys": []any{ map[string]any{ "a.core.server.test.b": map[string]string{ "todo5": "stuff", "todo6": "stuff", }, }, map[string]any{ "a.core.server.test.c": map[string]string{ "todo3": "stuff", "todo4": "stuff", }, }, map[string]any{ "b.core.server.test.a": map[string]string{ "todo1": "stuff", "todo2": "stuff", }, }, }, }, } sortedProjectEntries["entityKey2"] = map[string]any{ "groupKey2": map[string]any{ "keys": []any{ map[string]any{ "000.111.222": map[string]string{ "todo9": "stuff", "todo10": "stuff", }, }, map[string]any{ "aaa.ccc.bbb": map[string]string{ "todo7": "stuff", "todo8": "stuff", }, }, map[string]any{ "zzz.*": map[string]string{ "todo11": "stuff", "todo12": "stuff", }, }, }, }, } sortConfigKeys(projectEntries) assert.Equal(t, sortedProjectEntries, projectEntries) } incus-6.0.4/cmd/generate-config/main.go000066400000000000000000000026421477363751000177360ustar00rootroot00000000000000package main import ( "fmt" "log" "os" "github.com/spf13/cobra" ) var ( exclude []string jsonOutput string txtOutput string rootCmd = &cobra.Command{ Use: "generate-config", Short: "generate-config - a simple tool to generate documentation for Incus", Long: "generate-config - a simple tool to generate documentation for Incus. It outputs a YAML and a Markdown file that contain the content of all `gendoc:generate` statements in the project.", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf("Please provide a path to the project") } path := args[0] _, err := parse(path, jsonOutput, exclude) if err != nil { return err } if txtOutput != "" { err = writeDocFile(jsonOutput, txtOutput) if err != nil { return err } } return nil }, } ) func main() { rootCmd.Flags().StringSliceVarP(&exclude, "exclude", "e", []string{}, "Path to exclude from the process") rootCmd.Flags().StringVarP(&jsonOutput, "json", "j", "configuration.json", "Output JSON file containing the generated configuration") rootCmd.Flags().StringVarP(&txtOutput, "txt", "t", "", "Output TXT file containing the generated documentation") err := rootCmd.Execute() if err != nil { fmt.Fprintf(os.Stderr, "generate-config failed: %v", err) os.Exit(1) } log.Println("generate-config finished successfully") } incus-6.0.4/cmd/generate-database/000077500000000000000000000000001477363751000167565ustar00rootroot00000000000000incus-6.0.4/cmd/generate-database/README.md000066400000000000000000000313341477363751000202410ustar00rootroot00000000000000# `generate-database` ## Introduction `generate-database` is a database statement and associated `go` function generator for Incus and related projects. `generate-database` utilizes `go`'s code generation directives (`//go:generate ...`) alongside go's [ast](https://pkg.go.dev/go/ast) and [types](https://pkg.go.dev/go/types) packages for parsing the syntax tree for go structs and variables. We use `generate-database` for the majority of our SQL statements and database interactions on the `go` side for consistency and predictability. ## Disclaimer `generate-database` is intended for internal use within the [Incus](https://github.com/lxc/incus) code base. There are no guarantees regarding backwards compatibility, API stability, or long-term availability. It may change or be removed at any time without prior notice. Use at your own discretion. ## Usage ### Initialization #### Package global Once per package, that uses `generate-database` for generation of database statements and associated `go` functions, `generate-database` needs to be invoked using the following `go:generate` instruction: ```go //go:generate generate-database db mapper generate ``` This will initiate a call to `generate-database db mapper generate`, which will then search for `//generate-database:mapper` directives in the same file and process those. The following flags are available: * `--package` / `-p`: Package import paths to search for structs to parse. Defaults to the caller package. Can be used more than once. #### File Generally the first thing we will want to do for any newly generated file is to ensure the file has been cleared of content: ```go //generate-database:mapper target instances.mapper.go //generate-database:mapper reset -i -b "//go:build linux && cgo && !agent" ``` ### Generation Directive Arguments The generation directive arguments have the following form: `//generate-database:mapper flags ` The following flags are available: * `--build` / `-b`: build comment to include (commands: `reset`) * `--interface` / `-i`: create interface files (commands: `reset`, `method`) * `--entity` / `-e`: database entity to generate the method or statement for (commands: `stmt`, `method`) Example: * `//generate-database:mapper stmt -e instance objects table=table_name` The `table` key can be used to override the generated table name for a specified one. * `//generate-database:mapper method -i -e instance Create references=Config,Device` For some tables (defined below under [Additional Information](#Additional-Information) as [EntityTable](#EntityTable), the `references=` key can be provided with the name of a [ReferenceTable](#ReferenceTable) or [MapTable](#MapTable) struct. This directive would produce `CreateInstance` in addition to `CreateInstanceConfig` and `CreateInstanceDevices`: * `//generate-database:mapper method -i -e instance_profile Create struct=Instance` * `//generate-database:mapper method -i -e instance_profile Create struct=Profile` For some tables (defined below under [Additional Information](#Additional-Information) as [AssociationTable](#AssociationTable), `method` declarations must include a `struct=` to indicate the directionality of the function. An invocation can be called for each direction. This would produce `CreateInstanceProfiles` and `CreateProfileInstances` respectively. ### SQL Statement Generation SQL generation supports the following SQL statement types: Type | Description :--- | :---- `objects` | Creates a basic SELECT statement of the form `SELECT FROM ORDER BY `. `objects-by--and-...` | Parses a pre-existing SELECT statement variable declaration of the form produced by`objects`, and appends a `WHERE` clause with the given fields located in the associated struct. Specifically looks for a variable declaration of the form `var Objects = RegisterStmt("SQL String")` `names` | Creates a basic SELECT statement of the form `SELECT FROM
ORDER BY `. `names-by--and-...` | Parses a pre-existing SELECT statement variable declaration of the form produced by`names`, and appends a `WHERE` clause with the given fields located in the associated struct. Specifically looks for a variable declaration of the form `var Objects = RegisterStmt("SQL String")` `create` | Creates a basic INSERT statement of the form `INSERT INTO
VALUES`. `create-or-replace` | Creates a basic INSERT statement of the form `INSERT OR REPLACE INTO
VALUES`. `delete-by--and-...` | Creates a DELETE statement of the form `DELETE FROM
WHERE ` where the constraint is based on the given fields of the associated struct. `id` | Creates a basic SELECT statement that returns just the internal ID of the table. `rename` | Creates an UPDATE statement that updates the primary key of a table: `UPDATE
SET WHERE `. `update` | Creates an UPDATE statement of the form `UPDATE
SET WHERE `. #### Examples ```go //generate-database:mapper stmt -e instance objects //generate-database:mapper stmt -e instance objects-by-Name-and-Project //generate-database:mapper stmt -e instance create //generate-database:mapper stmt -e instance update //generate-database:mapper stmt -e instance delete-by-Name-and-Project ``` #### Statement Related Go Tags There are several tags that can be added to fields of a struct that will be parsed by the `ast` package. Tag | Description :-- | :---- `sql=
.` | Supply an explicit table and column name to use for this struct field. `coalesce=` | Generates a SQL coalesce function with the given value `coalesce(, value)`. `order=yes` | Override the default `ORDER BY` columns with all fields specifying this tag. `join=` | Applies a `JOIN` of the form `JOIN ON
. = `. `leftjoin=` | Applies a `LEFT JOIN` of the same form as a `JOIN`. `joinon=
.` | Overrides the default `JOIN ON` clause with the given table and column, replacing `
.` above. `primary=yes` | Assigns column associated with the field to be sufficient for returning a row from the table. Will default to `Name` if unspecified. Fields with this key will be included in the default 'ORDER BY' clause. `omit=` | Omits a given field from consideration for the comma separated list of statement types (`create`, `objects-by-Name`, `update`). `ignore` | Outright ignore the struct field as though it does not exist. `ignore` needs to be the only tag value in order to be recognized. `marshal=` | Marshal/Unmarshal data into the field. The column must be a TEXT column. If `marshal=yes`, then the type must implement both `Marshal` and `Unmarshal`. If `marshal=json`, the type is marshaled to JSON using the standard library ([json.Marshal](https://pkg.go.dev/encoding/json#Marshal)). This works for entity tables only, and not for association or mapping tables. ### Go Function Generation Go function generation supports the following types: Type | Description :--- | :---- `GetNames` | Return a slice of primary keys for all rows in a table matching the filter. Cannot be used with composite keys. `GetMany` | Return a slice of structs for all rows in a table matching the filter. `GetOne` | Return a single struct corresponding to a row with the given primary keys. Depends on `GetMany`. `ID` | Return the ID column from the table corresponding to the given primary keys. `Exists` | Returns whether there is an row in the table with the given primary keys. Depends on `ID.` `Create` | Insert a row from the given struct into the table if not already present. Depends on `Exists` `CreateOrReplace` | Insert a row from the given struct into the table, regardless of if an entry already exists. `Rename` | Update the primary key for a table row. `Update` | Update the columns at a given row, specified by primary key. `DeleteOne` | Delete exactly one row from the table. `DeleteMany` | Delete one or more rows from the table. ```go //generate-database:mapper method -i -e instance GetMany //generate-database:mapper method -i -e instance GetOne //generate-database:mapper method -i -e instance ID //generate-database:mapper method -i -e instance Exist //generate-database:mapper method -i -e instance Create //generate-database:mapper method -i -e instance Update //generate-database:mapper method -i -e instance DeleteOne-by-Project-and-Name //generate-database:mapper method -i -e instance DeleteMany-by-Name ``` ### Additional Information All structs should have an `ID` field, as well as an additional `Filter` struct prefixed with the original struct name. This should include any fields that should be considered for filtering in `WHERE` clauses. These fields should be pointers to facilitate omission and inclusion without setting default values. Example: ```go type Instance struct { ID int Name string Project string } type InstanceFilter struct { Name *string Project *string } ``` `generate-database` will handle parsing of structs differently based on the composition of the struct in four different ways. Non-`EntityType` structs will only support `GetMany`, `Create`, `Update`, and `Delete` functions. ### EntityTable Most structs will get treated this way, and represent a normal table. * If a table has an associated table for which a `ReferenceTable` or `MapTable` as defined below is applicable, functions specific to this entity can be generated by including a comma separated list to `references=` in the code generation directive for `GetMany`, `Create`, or `Update` directives. * The `Create` method directive for `EntityTable` will expect on the `ID` and `Exist` method directives to be present. * All `CREATE`, `UPDATE`, and `DELETE` statements that include a joined table will expect a `var ID = RegisterStmt('SQL String')` to exist for the joining table. ### ReferenceTable A struct that contains a field named `ReferenceID` will be parsed this way. `generate-database` will use this struct to generate more abstract SQL statements and functions of the form `_`. Real world invocation of these statements and functions should be done through an `EntityTable` `method` call with the tag `references=`. This `EntityTable` will replace the `` above. Example: ```go //generate-database:mapper stmt -e device create //generate-database:mapper method -e device Create type Device struct { ID int ReferenceID int Name string Type string } //... //generate-database:mapper method -e instance Create references=Device // This will produce a function called `CreateInstanceDevices`. ``` ### MapTable This is a special type of `ReferenceTable` with fields named `Key` and `Value`. On the SQL side, this is treated exactly like a `ReferenceTable`, but on the `go` side, the return values will be a map. Example: ```go //generate-database:mapper stmt -e config create //generate-database:mapper method -e config Create type Config struct { ID int ReferenceID int Key string Value string } //... //generate-database:mapper method -e instance Create references=Config // This will produce a function called `CreateInstanceConfig`, which will return a `map[string]string`. ``` ### AssociationTable This is a special type of table that contains two fields of the form `ID`, where `` corresponds to two other structs present in the same package. This will generate code for compound tables of the form `_` that are generally used to associate two tables together by their IDs. `method` generation declarations for these statements should include a `struct=` to indicate the directionality of the function. An invocation can be called for each direction. Example: ```go //generate-database:mapper method -i -e instance_profile Create struct=Instance //generate-database:mapper method -i -e instance_profile Create struct=Profile type InstanceProfile struct { InstanceID int ProfileID int } ``` incus-6.0.4/cmd/generate-database/db.go000066400000000000000000000167061477363751000177040ustar00rootroot00000000000000//go:build linux && cgo && !agent package main import ( "encoding/csv" "errors" "fmt" "go/build" "os" "strings" "github.com/spf13/cobra" "github.com/spf13/pflag" "golang.org/x/tools/go/packages" "github.com/lxc/incus/v6/cmd/generate-database/db" "github.com/lxc/incus/v6/cmd/generate-database/file" "github.com/lxc/incus/v6/cmd/generate-database/lex" ) // Return a new db command. func newDb() *cobra.Command { cmd := &cobra.Command{ Use: "db [sub-command]", Short: "Database-related code generation.", RunE: func(cmd *cobra.Command, args []string) error { return fmt.Errorf("Not implemented") }, } cmd.AddCommand(newDbSchema()) cmd.AddCommand(newDbMapper()) // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } return cmd } func newDbSchema() *cobra.Command { cmd := &cobra.Command{ Use: "schema", Short: "Generate database schema by applying updates.", RunE: func(cmd *cobra.Command, args []string) error { return db.UpdateSchema() }, } return cmd } func newDbMapper() *cobra.Command { cmd := &cobra.Command{ Use: "mapper [sub-command]", Short: "Generate code mapping database rows to Go structs.", RunE: func(cmd *cobra.Command, args []string) error { return fmt.Errorf("Not implemented") }, } cmd.AddCommand(newDbMapperGenerate()) return cmd } func newDbMapperGenerate() *cobra.Command { var pkgs *[]string var boilerplateFilename string cmd := &cobra.Command{ Use: "generate", Short: "Generate database statememnts and transaction method and interface signature.", RunE: func(cmd *cobra.Command, args []string) error { if os.Getenv("GOPACKAGE") == "" { return errors.New("GOPACKAGE environment variable is not set") } return generate(*pkgs, boilerplateFilename) }, } flags := cmd.Flags() pkgs = flags.StringArrayP("package", "p", []string{}, "Go package where the entity struct is declared") flags.StringVarP(&boilerplateFilename, "boilerplate-file", "b", "-", "Filename of the file where the mapper boilerplate is written to") return cmd } const prefix = "//generate-database:mapper " func generate(pkgs []string, boilerplateFilename string) error { localPath, err := os.Getwd() if err != nil { return err } localPkg, err := packages.Load(&packages.Config{Mode: packages.NeedName}, localPath) if err != nil { return err } localPkgPath := localPkg[0].PkgPath if len(pkgs) == 0 { pkgs = []string{localPkgPath} } parsedPkgs, err := packageLoad(pkgs) if err != nil { return err } err = file.Boilerplate(boilerplateFilename) if err != nil { return err } registeredSQLStmts := map[string]string{} for _, parsedPkg := range parsedPkgs { for _, goFile := range parsedPkg.CompiledGoFiles { body, err := os.ReadFile(goFile) if err != nil { return err } // Reset target to stdout target := "-" lines := strings.Split(string(body), "\n") for _, line := range lines { // Lazy matching for prefix, does not consider Go syntax and therefore // lines starting with prefix, that are part of e.g. multiline strings // match as well. This is highly unlikely to cause false positives. if strings.HasPrefix(line, prefix) { line = strings.TrimPrefix(line, prefix) // Use csv parser to properly handle arguments surrounded by double quotes. r := csv.NewReader(strings.NewReader(line)) r.Comma = ' ' // space args, err := r.Read() if err != nil { return err } if len(args) == 0 { return fmt.Errorf("command missing") } command := args[0] switch command { case "target": if len(args) != 2 { return fmt.Errorf("invalid arguments for command target, one argument for the target filename: %s", line) } target = args[1] case "reset": err = commandReset(args[1:], parsedPkgs, target, localPkgPath) case "stmt": err = commandStmt(args[1:], target, parsedPkgs, registeredSQLStmts, localPkgPath) case "method": err = commandMethod(args[1:], target, parsedPkgs, registeredSQLStmts, localPkgPath) default: err = fmt.Errorf("unknown command: %s", command) } if err != nil { return err } } } } } return nil } func commandReset(commandLine []string, parsedPkgs []*packages.Package, target string, localPkgPath string) error { var err error flags := pflag.NewFlagSet("", pflag.ContinueOnError) iface := flags.BoolP("interface", "i", false, "create interface files") buildComment := flags.StringP("build", "b", "", "build comment to include") err = flags.Parse(commandLine) if err != nil { return err } imports := db.Imports for _, pkg := range parsedPkgs { if pkg.PkgPath == localPkgPath { continue } imports = append(imports, pkg.PkgPath) } err = file.Reset(target, imports, *buildComment, *iface) if err != nil { return err } return nil } func commandStmt(commandLine []string, target string, parsedPkgs []*packages.Package, registeredSQLStmts map[string]string, localPkgPath string) error { var err error flags := pflag.NewFlagSet("", pflag.ContinueOnError) entity := flags.StringP("entity", "e", "", "database entity to generate the statement for") err = flags.Parse(commandLine) if err != nil { return err } if len(flags.Args()) < 1 { return fmt.Errorf("argument missing for stmt command") } kind := flags.Arg(0) config, err := parseParams(flags.Args()[1:]) if err != nil { return err } stmt, err := db.NewStmt(localPkgPath, parsedPkgs, *entity, kind, config, registeredSQLStmts) if err != nil { return err } return file.Append(*entity, target, stmt, false) } func commandMethod(commandLine []string, target string, parsedPkgs []*packages.Package, registeredSQLStmts map[string]string, localPkgPath string) error { var err error flags := pflag.NewFlagSet("", pflag.ContinueOnError) iface := flags.BoolP("interface", "i", false, "create interface files") entity := flags.StringP("entity", "e", "", "database entity to generate the method for") err = flags.Parse(commandLine) if err != nil { return err } if len(flags.Args()) < 1 { return fmt.Errorf("argument missing for method command") } kind := flags.Arg(0) config, err := parseParams(flags.Args()[1:]) if err != nil { return err } method, err := db.NewMethod(localPkgPath, parsedPkgs, *entity, kind, config, registeredSQLStmts) if err != nil { return err } return file.Append(*entity, target, method, *iface) } func packageLoad(pkgs []string) ([]*packages.Package, error) { pkgPaths := []string{} for _, pkg := range pkgs { if pkg == "" { var err error localPath, err := os.Getwd() if err != nil { return nil, err } pkgPaths = append(pkgPaths, localPath) } else { importPkg, err := build.Import(pkg, "", build.FindOnly) if err != nil { return nil, fmt.Errorf("Invalid import path %q: %w", pkg, err) } pkgPaths = append(pkgPaths, importPkg.Dir) } } parsedPkgs, err := packages.Load(&packages.Config{ Mode: packages.LoadTypes | packages.NeedTypesInfo, }, pkgPaths...) if err != nil { return nil, err } return parsedPkgs, nil } func parseParams(args []string) (map[string]string, error) { config := map[string]string{} for _, arg := range args { key, value, err := lex.KeyValue(arg) if err != nil { return nil, fmt.Errorf("Invalid config parameter: %w", err) } config[key] = value } return config, nil } incus-6.0.4/cmd/generate-database/db/000077500000000000000000000000001477363751000173435ustar00rootroot00000000000000incus-6.0.4/cmd/generate-database/db/constants.go000066400000000000000000000003471477363751000217120ustar00rootroot00000000000000//go:build linux && cgo && !agent package db // Imports is a list of the package imports every generated source file has. var Imports = []string{ "context", "database/sql", "fmt", "strings", "github.com/mattn/go-sqlite3", } incus-6.0.4/cmd/generate-database/db/lex.go000066400000000000000000000073601477363751000204700ustar00rootroot00000000000000package db import ( "fmt" "strings" "github.com/lxc/incus/v6/cmd/generate-database/lex" "github.com/lxc/incus/v6/shared/util" ) // Return the table name for the given database entity. func entityTable(entity string, override string) string { if override != "" { return override } entityParts := strings.Split(lex.SnakeCase(entity), "_") tableParts := make([]string, len(entityParts)) for i, part := range entityParts { if strings.HasSuffix(part, "ty") || strings.HasSuffix(part, "ly") { tableParts[i] = part } else { tableParts[i] = lex.Plural(part) } } return strings.Join(tableParts, "_") } // Return the name of the Filter struct for the given database entity. func entityFilter(entity string) string { return fmt.Sprintf("%sFilter", lex.PascalCase(entity)) } // Return the name of the global variable holding the registration code for // the given kind of statement aganst the given entity. func stmtCodeVar(entity string, kind string, filters ...string) string { prefix := lex.CamelCase(entity) name := fmt.Sprintf("%s%s", prefix, lex.PascalCase(kind)) if len(filters) > 0 { name += "By" name += strings.Join(filters, "And") } return name } // operation returns the kind of operation being performed, without filter fields. func operation(kind string) string { return strings.Split(kind, "-by-")[0] } // activeFilters returns the filters mentioned in the command name. func activeFilters(kind string) []string { startIndex := strings.Index(kind, "-by-") + len("-by-") return strings.Split(kind[startIndex:], "-and-") } // Return an expression evaluating if a filter should be used (based on active // criteria). func activeCriteria(filter []string, ignoredFilter []string) string { expr := "" for i, name := range filter { if i > 0 { expr += " && " } expr += fmt.Sprintf("filter.%s != nil", name) } for _, name := range ignoredFilter { if len(expr) > 0 { expr += " && " } expr += fmt.Sprintf("filter.%s == nil", name) } return expr } // Return the code for a "dest" function, to be passed as parameter to // selectObjects in order to scan a single row. func destFunc(slice string, entity string, importType string, fields []*Field) string { var builder strings.Builder writeLine := func(line string) { builder.WriteString(fmt.Sprintf("%s\n", line)) } writeLine(`func(scan func(dest ...any) error) error {`) varName := lex.Minuscule(string(entity[0])) writeLine(fmt.Sprintf("%s := %s{}", varName, importType)) checkErr := func() { writeLine("if err != nil {\nreturn err\n}") writeLine("") } unmarshal := func(declVarName string, field *Field) { unmarshalFunc := "unmarshal" if field.Config.Get("marshal") == "json" { unmarshalFunc = "unmarshalJSON" } writeLine(fmt.Sprintf("err = %s(%s, &%s.%s)", unmarshalFunc, declVarName, varName, field.Name)) checkErr() } args := make([]string, len(fields)) declVars := make(map[string]*Field, len(fields)) declVarNames := make([]string, 0, len(fields)) for i, field := range fields { var arg string if util.IsNeitherFalseNorEmpty(field.Config.Get("marshal")) { declVarName := fmt.Sprintf("%sStr", lex.Minuscule(field.Name)) declVarNames = append(declVarNames, declVarName) declVars[declVarName] = field arg = fmt.Sprintf("&%s", declVarName) } else { arg = fmt.Sprintf("&%s.%s", varName, field.Name) } args[i] = arg } for _, declVarName := range declVarNames { writeLine(fmt.Sprintf("var %s string", declVarName)) } writeLine(fmt.Sprintf("err := scan(%s)", strings.Join(args, ", "))) checkErr() for _, declVarName := range declVarNames { unmarshal(declVarName, declVars[declVarName]) } writeLine(fmt.Sprintf("%s = append(%s, %s)\n", slice, slice, varName)) writeLine("return nil") writeLine("}") return builder.String() } incus-6.0.4/cmd/generate-database/db/mapping.go000066400000000000000000000400221477363751000213230ustar00rootroot00000000000000package db import ( "fmt" "go/ast" "go/types" "net/url" "slices" "strings" "github.com/lxc/incus/v6/cmd/generate-database/lex" "github.com/lxc/incus/v6/shared/util" ) // Mapping holds information for mapping database tables to a Go structure. type Mapping struct { Local bool // Whether the entity is in the same package as the generated code. FilterLocal bool // Whether the entity is in the same package as the generated code. Package string // Package of the Go struct Name string // Name of the Go struct. Fields []*Field // Metadata about the Go struct. Filterable bool // Whether the Go struct has a Filter companion struct for filtering queries. Filters []*Field // Metadata about the Go struct used for filter fields. Type TableType // Type of table structure for this Go struct. } // TableType represents the logical type of the table defined by the Go struct. type TableType int // EntityTable represents the type for any entity that maps to a Go struct. var EntityTable = TableType(0) // ReferenceTable represents the type for for any entity that contains an // 'entity_id' field mapping to a parent entity. var ReferenceTable = TableType(1) // AssociationTable represents the type for an entity that associates two // other entities. var AssociationTable = TableType(2) // MapTable represents the type for a table storing key/value pairs. var MapTable = TableType(3) // NaturalKey returns the struct fields that can be used as natural key for // uniquely identifying a row in the underlying table (==. // // By convention the natural key field is the one called "Name", unless // specified otherwise with the `db:natural_key` tags. func (m *Mapping) NaturalKey() []*Field { key := []*Field{} for _, field := range m.Fields { if field.Config.Get("primary") != "" { key = append(key, field) } } if len(key) == 0 { // Default primary key. key = append(key, m.FieldByName("Name")) } return key } // Identifier returns the field that uniquely identifies this entity. func (m *Mapping) Identifier() *Field { for _, field := range m.NaturalKey() { if field.Name == "Name" || field.Name == "Fingerprint" { return field } } return nil } // TableName determines the table associated to the struct. // - Individual fields may bypass this with their own `sql=
.` tags. // - The override `table=` directive key is checked first. // - The struct name itself is used to approximate the table name if none of the above apply. func (m *Mapping) TableName(entity string, override string) string { table := entityTable(entity, override) if m.Type == ReferenceTable || m.Type == MapTable { table = "%s_" + table } return table } // ContainsFields checks that the mapping contains fields with the same type // and name of given ones. func (m *Mapping) ContainsFields(fields []*Field) bool { matches := map[*Field]bool{} for _, field := range m.Fields { for _, other := range fields { if field.Name == other.Name && field.Type.Name == other.Type.Name { matches[field] = true } } } return len(matches) == len(fields) } // FieldByName returns the field with the given name, if any. func (m *Mapping) FieldByName(name string) *Field { for _, field := range m.Fields { if field.Name == name { return field } } return nil } // ActiveFilters returns the active filter fields for the kind of method. func (m *Mapping) ActiveFilters(kind string) []*Field { names := activeFilters(kind) fields := []*Field{} for _, name := range names { field := m.FieldByName(name) if field != nil { fields = append(fields, field) } } return fields } // FieldColumnName returns the column name of the field with the given name, // prefixed with the entity's table name. func (m *Mapping) FieldColumnName(name string, table string) string { field := m.FieldByName(name) return fmt.Sprintf("%s.%s", table, field.Column()) } // FilterFieldByName returns the field with the given name if that field can be // used as query filter, an error otherwise. func (m *Mapping) FilterFieldByName(name string) (*Field, error) { for _, filter := range m.Filters { if name == filter.Name { if filter.Type.Code != TypeColumn { return nil, fmt.Errorf("Unknown filter %q not a column", name) } return filter, nil } } return nil, fmt.Errorf("Unknown filter %q", name) } // ColumnFields returns the fields that map directly to a database column, // either on this table or on a joined one. func (m *Mapping) ColumnFields(exclude ...string) []*Field { fields := []*Field{} for _, field := range m.Fields { if slices.Contains(exclude, field.Name) { continue } if field.Type.Code == TypeColumn { fields = append(fields, field) } } return fields } // ScalarFields returns the fields that map directly to a single database // column on another table that can be joined to this one. func (m *Mapping) ScalarFields() []*Field { fields := []*Field{} for _, field := range m.Fields { if field.Config.Get("join") != "" || field.Config.Get("leftjoin") != "" { fields = append(fields, field) } } return fields } // RefFields returns the fields that are one-to-many references to other // tables. func (m *Mapping) RefFields() []*Field { fields := []*Field{} for _, field := range m.Fields { if field.Type.Code == TypeSlice || field.Type.Code == TypeMap { fields = append(fields, field) } } return fields } // FieldArgs converts the given fields to function arguments, rendering their // name and type. func (m *Mapping) FieldArgs(fields []*Field, extra ...string) string { args := []string{} for _, field := range fields { name := lex.Minuscule(field.Name) if name == "type" { name = lex.Minuscule(m.Name) + field.Name } arg := fmt.Sprintf("%s %s", name, field.Type.Name) args = append(args, arg) } args = append(args, extra...) return strings.Join(args, ", ") } // FieldParams converts the given fields to function parameters, rendering their // name. func (m *Mapping) FieldParams(fields []*Field) string { args := make([]string, len(fields)) for i, field := range fields { name := lex.Minuscule(field.Name) if name == "type" { name = lex.Minuscule(m.Name) + field.Name } args[i] = name } return strings.Join(args, ", ") } // FieldParamsMarshal converts the given fields to function parameters, rendering their // name. If the field is configured to marshal input/output, the name will be `marshaled{name}`. func (m *Mapping) FieldParamsMarshal(fields []*Field) string { args := make([]string, len(fields)) for i, field := range fields { name := lex.Minuscule(field.Name) if name == "type" { name = lex.Minuscule(m.Name) + field.Name } if util.IsNeitherFalseNorEmpty(field.Config.Get("marshal")) { name = fmt.Sprintf("marshaled%s", field.Name) } args[i] = name } return strings.Join(args, ", ") } // ImportType returns the type of the entity for the mapping, prefixing the import package if necessary. func (m *Mapping) ImportType() string { name := lex.PascalCase(m.Name) if m.Local { return name } return m.Package + "." + lex.PascalCase(name) } // ImportFilterType returns the Filter type of the entity for the mapping, prefixing the import package if necessary. func (m *Mapping) ImportFilterType() string { name := lex.PascalCase(entityFilter(m.Name)) if m.FilterLocal { return name } return m.Package + "." + name } // Field holds all information about a field in a Go struct that is relevant // for database code generation. type Field struct { Name string Type Type Primary bool // Whether this field is part of the natural primary key. Config url.Values } // Stmt must be used only on a non-columnar field. It returns the name of // statement that should be used to fetch this field. A statement with that // name must have been generated for the entity at hand. func (f *Field) Stmt() string { switch f.Name { case "UsedBy": return "used_by" default: return "" } } // IsScalar returns true if the field is a scalar column value from a joined table. func (f *Field) IsScalar() bool { return f.JoinConfig() != "" } // IsIndirect returns true if the field is a scalar column value from a joined // table that in turn requires another join. func (f *Field) IsIndirect() bool { return f.IsScalar() && f.Config.Get("via") != "" } // IsPrimary returns true if the field part of the natural key. func (f *Field) IsPrimary() bool { return f.Config.Get("primary") != "" || f.Name == "Name" } // Column returns the name of the database column the field maps to. The type // code of the field must be TypeColumn. func (f *Field) Column() string { if f.Type.Code != TypeColumn { panic("attempt to get column name of non-column field") } column := lex.SnakeCase(f.Name) join := f.JoinConfig() if join != "" { column = fmt.Sprintf("%s AS %s", join, column) } return column } // SelectColumn returns a column name suitable for use with 'SELECT' statements. // - Applies a `coalesce()` function if the 'coalesce' tag is present. // - Returns the column in the form '. AS ' if the `join` tag is present. func (f *Field) SelectColumn(mapping *Mapping, primaryTable string) (string, error) { // ReferenceTable and MapTable require specific fields, so parse those instead of checking tags. if mapping.Type == ReferenceTable || mapping.Type == MapTable { table := primaryTable column := fmt.Sprintf("%s.%s", table, lex.SnakeCase(f.Name)) column = strings.ReplaceAll(column, "reference", "%s") return column, nil } tableName, columnName, err := f.SQLConfig() if err != nil { return "", err } if tableName == "" { tableName = primaryTable } if columnName == "" { columnName = lex.SnakeCase(f.Name) } var column string join := f.JoinConfig() if join != "" { column = join } else { column = fmt.Sprintf("%s.%s", tableName, columnName) } coalesce, ok := f.Config["coalesce"] if ok { column = fmt.Sprintf("coalesce(%s, %s)", column, coalesce[0]) } if join != "" { column = fmt.Sprintf("%s AS %s", column, columnName) } return column, nil } // OrderBy returns a column name suitable for use with the 'ORDER BY' clause. func (f *Field) OrderBy(mapping *Mapping, primaryTable string) (string, error) { // ReferenceTable and MapTable require specific fields, so parse those instead of checking tags. if mapping.Type == ReferenceTable || mapping.Type == MapTable { table := primaryTable column := fmt.Sprintf("%s.%s", table, lex.SnakeCase(f.Name)) column = strings.ReplaceAll(column, "reference", "%s") return column, nil } if f.IsScalar() { tableName, _, err := f.ScalarTableColumn() if err != nil { return "", err } return tableName + ".id", nil } tableName, columnName, err := f.SQLConfig() if err != nil { return "", nil } if columnName == "" { columnName = lex.SnakeCase(f.Name) } if tableName == "" { tableName = primaryTable } if tableName != "" { return fmt.Sprintf("%s.%s", tableName, columnName), nil } return fmt.Sprintf("%s.%s", entityTable(mapping.Name, tableName), columnName), nil } // JoinClause returns an SQL 'JOIN' clause using the 'join' and 'joinon' tags, if present. func (f *Field) JoinClause(mapping *Mapping, table string) (string, error) { joinTemplate := "\n JOIN %s ON %s = %s.id" if f.Config.Get("join") != "" && f.Config.Get("leftjoin") != "" { return "", fmt.Errorf("Cannot join and leftjoin at the same time for field %q of struct %q", f.Name, mapping.Name) } join := f.JoinConfig() if f.Config.Get("leftjoin") != "" { joinTemplate = strings.ReplaceAll(joinTemplate, "JOIN", "LEFT JOIN") } joinTable, _, ok := strings.Cut(join, ".") if !ok { return "", fmt.Errorf("'join' tag for field %q of struct %q must be of form
.", f.Name, mapping.Name) } joinOn := f.Config.Get("joinon") if joinOn == "" { tableName, columnName, err := f.SQLConfig() if err != nil { return "", err } if tableName != "" && columnName != "" { joinOn = fmt.Sprintf("%s.%s", tableName, columnName) } else { joinOn = fmt.Sprintf("%s.%s_id", table, lex.Singular(joinTable)) } } _, _, ok = strings.Cut(joinOn, ".") if !ok { return "", fmt.Errorf("'joinon' tag of field %q of struct %q must be of form '
.'", f.Name, mapping.Name) } return fmt.Sprintf(joinTemplate, joinTable, joinOn, joinTable), nil } // InsertColumn returns a column name and parameter value suitable for an 'INSERT', 'UPDATE', or 'DELETE' statement. // - If a 'join' tag is present, the package will be searched for the corresponding 'jointableID' registered statement // to select the ID to insert into this table. // - If a 'joinon' tag is present, but this table is not among the conditions, then the join will be considered indirect, // and an empty string will be returned. func (f *Field) InsertColumn(mapping *Mapping, primaryTable string, defs map[*ast.Ident]types.Object, registeredSQLStmts map[string]string) (string, string, error) { var column string var value string var err error if f.IsScalar() { tableName, columnName, err := f.SQLConfig() if err != nil { return "", "", err } if tableName == "" { tableName = primaryTable } // If there is a 'joinon' tag present without this table in the condition, then assume there is no column for this field. joinOn := f.Config.Get("joinon") if joinOn != "" { before, after, ok := strings.Cut(joinOn, ".") if !ok { return "", "", fmt.Errorf("'joinon' tag of field %q of struct %q must be of form '
.'", f.Name, mapping.Name) } columnName = after if tableName != before { return "", "", nil } } table, _, ok := strings.Cut(f.JoinConfig(), ".") if !ok { return "", "", fmt.Errorf("'join' tag of field %q of struct %q must be of form
.", f.Name, mapping.Name) } if columnName != "" { column = columnName } else { column = lex.Singular(table) + "_id" } varName := stmtCodeVar(lex.Singular(table), "ID") joinStmt, err := ParseStmt(varName, defs, registeredSQLStmts) if err != nil { return "", "", fmt.Errorf("Failed to find registered statement %q for field %q of struct %q: %w", varName, f.Name, mapping.Name, err) } value = fmt.Sprintf("(%s)", strings.ReplaceAll(strings.ReplaceAll(joinStmt, "`", ""), "\n", "")) value = strings.ReplaceAll(value, " ", " ") } else { column, err = f.SelectColumn(mapping, primaryTable) if err != nil { return "", "", err } // Strip the table name and coalesce function if present. _, column, _ = strings.Cut(column, ".") column, _, _ = strings.Cut(column, ",") if mapping.Type == ReferenceTable || mapping.Type == MapTable { column = strings.ReplaceAll(column, "reference", "%s") } value = "?" } return column, value, nil } func (f Field) JoinConfig() string { join := f.Config.Get("join") if join == "" { join = f.Config.Get("leftjoin") } return join } // SQLConfig returns the table and column specified by the 'sql' config key, if present. func (f Field) SQLConfig() (string, string, error) { where := f.Config.Get("sql") if where == "" { return "", "", nil } table, column, ok := strings.Cut(where, ".") if !ok { return "", "", fmt.Errorf("'sql' config for field %q should be of the form
.", f.Name) } return table, column, nil } // ScalarTableColumn gets the table and column from the join configuration. func (f Field) ScalarTableColumn() (string, string, error) { join := f.JoinConfig() if join == "" { return "", "", fmt.Errorf("Missing join config for field %q", f.Name) } joinFields := strings.Split(join, ".") if len(joinFields) != 2 { return "", "", fmt.Errorf("Join config must be of the format
. for field %q", f.Name) } return joinFields[0], joinFields[1], nil } // FieldNames returns the names of the given fields. func FieldNames(fields []*Field) []string { names := []string{} for _, f := range fields { names = append(names, f.Name) } return names } // Type holds all information about a field in a field type that is relevant // for database code generation. type Type struct { Name string Code int } // Possible type code. const ( TypeColumn = iota TypeSlice TypeMap ) incus-6.0.4/cmd/generate-database/db/method.go000066400000000000000000001721061477363751000211610ustar00rootroot00000000000000//go:build linux && cgo && !agent package db import ( "fmt" "go/types" "strings" "golang.org/x/tools/go/packages" "github.com/lxc/incus/v6/cmd/generate-database/file" "github.com/lxc/incus/v6/cmd/generate-database/lex" "github.com/lxc/incus/v6/shared/util" ) // Method generates a code snippet for a particular database query method. type Method struct { entity string // Name of the database entity kind string // Kind of statement to generate ref string // ref is the current reference method for the method kind config map[string]string // Configuration parameters localPath string pkgs []*types.Package // Package to perform for struct declaration lookup registeredSQLStmts map[string]string // Lookup for SQL statements registered during this execution, which are therefore not included in the parsed package information } // NewMethod returiiin a new method code snippet for executing a certain mapping. func NewMethod(localPath string, parsedPkgs []*packages.Package, entity, kind string, config map[string]string, registeredSQLStmts map[string]string) (*Method, error) { pkgTypes, err := parsePkgDecls(entity, kind, parsedPkgs) if err != nil { return nil, err } method := &Method{ entity: entity, kind: kind, config: config, localPath: localPath, pkgs: pkgTypes, registeredSQLStmts: registeredSQLStmts, } return method, nil } // Generate the desired method. func (m *Method) Generate(buf *file.Buffer) error { mapping, err := Parse(m.localPath, m.pkgs, lex.PascalCase(m.entity), m.kind) if err != nil { return fmt.Errorf("Unable to parse go struct %q: %w", lex.PascalCase(m.entity), err) } if mapping.Type != EntityTable { switch operation(m.kind) { case "GetMany": return m.getMany(buf) case "Create": return m.create(buf, false) case "Update": return m.update(buf) case "DeleteMany": return m.delete(buf, false) default: return fmt.Errorf("Unknown method kind '%s'", m.kind) } } switch operation(m.kind) { case "GetMany": return m.getMany(buf) case "GetNames": return m.getNames(buf) case "GetOne": return m.getOne(buf) case "ID": return m.id(buf) case "Exists": return m.exists(buf) case "Create": return m.create(buf, false) case "CreateOrReplace": return m.create(buf, true) case "Rename": return m.rename(buf) case "Update": return m.update(buf) case "DeleteOne": return m.delete(buf, true) case "DeleteMany": return m.delete(buf, false) default: return fmt.Errorf("Unknown method kind '%s'", m.kind) } } // GenerateSignature generates an interface signature for the method. func (m *Method) GenerateSignature(buf *file.Buffer) error { buf.N() buf.L("// %sGenerated is an interface of generated methods for %s.", lex.PascalCase(m.entity), lex.PascalCase(m.entity)) buf.L("type %sGenerated interface {", lex.PascalCase(m.entity)) defer m.end(buf) if m.config["references"] != "" { refFields := strings.Split(m.config["references"], ",") for _, fieldName := range refFields { m.ref = fieldName err := m.signature(buf, true) if err != nil { return err } m.ref = "" buf.N() } } return m.signature(buf, true) } func (m *Method) getNames(buf *file.Buffer) error { mapping, err := Parse(m.localPath, m.pkgs, lex.PascalCase(m.entity), m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } // Go type name the objects to return (e.g. api.Foo). structField := mapping.NaturalKey()[0] err = m.signature(buf, false) if err != nil { return err } defer m.end(buf) buf.L("var err error") buf.N() buf.L("// Result slice.") buf.L("names := make(%s, 0)", lex.Slice(structField.Type.Name)) buf.N() filters, ignoredFilters := FiltersFromStmt(m.pkgs, "names", m.entity, mapping.Filters, m.registeredSQLStmts) buf.N() buf.L("// Pick the prepared statement and arguments to use based on active criteria.") buf.L("var sqlStmt *sql.Stmt") buf.L("args := []any{}") buf.L("queryParts := [2]string{}") buf.N() buf.L("if len(filters) == 0 {") buf.L("sqlStmt, err = Stmt(db, %s)", stmtCodeVar(m.entity, "names")) m.ifErrNotNil(buf, false, "nil", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "names"))) buf.L("}") buf.N() if len(filters) > 0 { buf.L("for i, filter := range filters {") } else { buf.L("for _, filter := range filters {") } for i, filter := range filters { branch := "if" if i > 0 { branch = "} else if" } buf.L("%s %s {", branch, activeCriteria(filter, ignoredFilters[i])) var args string for _, name := range filter { for _, field := range mapping.Fields { if name == field.Name && util.IsNeitherFalseNorEmpty(field.Config.Get("marshal")) { marshalFunc := "marshal" if strings.ToLower(field.Config.Get("marshal")) == "json" { marshalFunc = "marshalJSON" } buf.L("marshaledFilter%s, err := %s(filter.%s)", name, marshalFunc, name) m.ifErrNotNil(buf, true, "nil", "err") args += fmt.Sprintf("marshaledFilter%s,", name) } else if name == field.Name { args += fmt.Sprintf("filter.%s,", name) } } } buf.L("args = append(args, []any{%s}...)", args) buf.L("if len(filters) == 1 {") buf.L("sqlStmt, err = Stmt(db, %s)", stmtCodeVar(m.entity, "names", filter...)) m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "names", filter...))) buf.L("break") buf.L("}") buf.N() buf.L("query, err := StmtString(%s)", stmtCodeVar(m.entity, "names", filter...)) m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "names"))) buf.L("parts := strings.SplitN(query, \"ORDER BY\", 2)") buf.L("if i == 0 {") buf.L("copy(queryParts[:], parts)") buf.L("continue") buf.L("}") buf.N() buf.L("_, where, _ := strings.Cut(parts[0], \"WHERE\")") buf.L("queryParts[0] += \"OR\" + where") } branch := "if" if len(filters) > 0 { branch = "} else if" } buf.L("%s %s {", branch, activeCriteria([]string{}, FieldNames(mapping.Filters))) buf.L("return nil, fmt.Errorf(\"Cannot filter on empty %s\")", entityFilter(mapping.Name)) buf.L("} else {") buf.L("return nil, fmt.Errorf(\"No statement exists for the given Filter\")") buf.L("}") buf.L("}") buf.N() buf.L("// Select.") buf.L("var rows *sql.Rows") buf.L("if sqlStmt != nil {") buf.L("rows, err = sqlStmt.QueryContext(ctx, args...)") buf.L("} else {") buf.L("queryStr := strings.Join(queryParts[:], \"ORDER BY\")") buf.L("rows, err = db.QueryContext(ctx, queryStr, args...)") buf.L("}") buf.N() m.ifErrNotNil(buf, true, "nil", "err") buf.L("defer func() { _ = rows.Close() }()") buf.L("for rows.Next() {") buf.L("var identifier %s", structField.Type.Name) buf.L("err := rows.Scan(&identifier)") m.ifErrNotNil(buf, true, "nil", "err") buf.L("names = append(names, identifier)") buf.L("}") buf.N() buf.L("err = rows.Err()") m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to fetch from \"%s\" table: %%w", err)`, entityTable(m.entity, m.config["table"]))) buf.L("return names, nil") return nil } func (m *Method) getMany(buf *file.Buffer) error { mapping, err := Parse(m.localPath, m.pkgs, lex.PascalCase(m.entity), m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } err = m.getManyTemplateFuncs(buf, mapping) if err != nil { return err } if m.config["references"] != "" { parentTable := mapping.TableName(m.entity, m.config["table"]) refFields := strings.Split(m.config["references"], ",") refs := make([]*Mapping, len(refFields)) for i, fieldName := range refFields { refMapping, err := Parse(m.localPath, m.pkgs, fieldName, m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } refs[len(refs)-1-i] = refMapping } defer func() { for _, refMapping := range refs { err = m.getRefs(buf, parentTable, refMapping) if err != nil { return } } }() } // Go type name the objects to return (e.g. api.Foo). typ := mapping.ImportType() err = m.signature(buf, false) if err != nil { return err } defer m.end(buf) buf.L("var err error") buf.N() buf.L("// Result slice.") buf.L("objects := make(%s, 0)", lex.Slice(typ)) buf.N() if mapping.Type == ReferenceTable || mapping.Type == MapTable { stmtVar := stmtCodeVar(m.entity, "objects") stmtLocal := stmtVar + "Local" buf.L("%s := strings.ReplaceAll(%s, \"%%s_id\", fmt.Sprintf(\"%%s_id\", parentColumnPrefix))", stmtLocal, stmtVar) buf.L("fillParent := make([]any, strings.Count(%s, \"%%s\"))", stmtLocal) buf.L("for i := range fillParent {") buf.L("fillParent[i] = parentTablePrefix") buf.L("}") buf.N() buf.L("queryStr := fmt.Sprintf(%s, fillParent...)", stmtLocal) buf.L("queryParts := strings.SplitN(queryStr, \"ORDER BY\", 2)") buf.L("args := []any{}") buf.N() buf.L("for i, filter := range filters {") buf.L("var cond string") buf.L("if i == 0 {") buf.L("cond = \" WHERE ( %%s )\"") buf.L("} else {") buf.L("cond = \" OR ( %%s )\"") buf.L("}") buf.N() buf.L("entries := []string{}") for _, filter := range mapping.Filters { // Skip over filter fields that are themselves filters for a referenced table. found := false for _, refField := range mapping.RefFields() { if filter.Type.Name == entityFilter(refField.Name) { found = true break } } if found { continue } buf.L("if filter.%s != nil {", filter.Name) buf.L("entries = append(entries, \"%s = ?\")", lex.SnakeCase(filter.Name)) buf.L("args = append(args, filter.%s)", filter.Name) buf.L("}") buf.N() } buf.L("if len(entries) == 0 {") buf.L("return nil, fmt.Errorf(\"Cannot filter on empty %s\")", entityFilter(mapping.Name)) buf.L("}") buf.N() buf.L("queryParts[0] += fmt.Sprintf(cond, strings.Join(entries, \" AND \"))") buf.L("}") buf.N() buf.L("queryStr = strings.Join(queryParts, \" ORDER BY\")") } else if mapping.Type == AssociationTable { filter := m.config["struct"] + "ID" buf.L("sqlStmt, err := Stmt(db, %s)", stmtCodeVar(m.entity, "objects", filter)) m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "objects", filter))) buf.L("args := []any{%sID}", lex.Minuscule(m.config["struct"])) } else { filters, ignoredFilters := FiltersFromStmt(m.pkgs, "objects", m.entity, mapping.Filters, m.registeredSQLStmts) buf.N() buf.L("// Pick the prepared statement and arguments to use based on active criteria.") buf.L("var sqlStmt *sql.Stmt") buf.L("args := []any{}") buf.L("queryParts := [2]string{}") buf.N() buf.L("if len(filters) == 0 {") buf.L("sqlStmt, err = Stmt(db, %s)", stmtCodeVar(m.entity, "objects")) m.ifErrNotNil(buf, false, "nil", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "objects"))) buf.L("}") buf.N() buf.L("for i, filter := range filters {") for i, filter := range filters { branch := "if" if i > 0 { branch = "} else if" } buf.L("%s %s {", branch, activeCriteria(filter, ignoredFilters[i])) var args string for _, name := range filter { for _, field := range mapping.Fields { if name == field.Name && util.IsNeitherFalseNorEmpty(field.Config.Get("marshal")) { marshalFunc := "marshal" if strings.ToLower(field.Config.Get("marshal")) == "json" { marshalFunc = "marshalJSON" } buf.L("marshaledFilter%s, err := %s(filter.%s)", name, marshalFunc, name) m.ifErrNotNil(buf, true, "nil", "err") args += fmt.Sprintf("marshaledFilter%s,", name) } else if name == field.Name { args += fmt.Sprintf("filter.%s,", name) } } } buf.L("args = append(args, []any{%s}...)", args) buf.L("if len(filters) == 1 {") buf.L("sqlStmt, err = Stmt(db, %s)", stmtCodeVar(m.entity, "objects", filter...)) m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "objects", filter...))) buf.L("break") buf.L("}") buf.N() buf.L("query, err := StmtString(%s)", stmtCodeVar(m.entity, "objects", filter...)) m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "objects"))) buf.L("parts := strings.SplitN(query, \"ORDER BY\", 2)") buf.L("if i == 0 {") buf.L("copy(queryParts[:], parts)") buf.L("continue") buf.L("}") buf.N() buf.L("_, where, _ := strings.Cut(parts[0], \"WHERE\")") buf.L("queryParts[0] += \"OR\" + where") } branch := "if" if len(filters) > 0 { branch = "} else if" } buf.L("%s %s {", branch, activeCriteria([]string{}, FieldNames(mapping.Filters))) buf.L("return nil, fmt.Errorf(\"Cannot filter on empty %s\")", entityFilter(mapping.Name)) buf.L("} else {") buf.L("return nil, fmt.Errorf(\"No statement exists for the given Filter\")") buf.L("}") buf.L("}") buf.N() } if mapping.Type == EntityTable { buf.L("// Select.") buf.L("if sqlStmt != nil {") buf.L("objects, err = get%s(ctx, sqlStmt, args...)", lex.Plural(mapping.Name)) buf.L("} else {") buf.L("queryStr := strings.Join(queryParts[:], \"ORDER BY\")") buf.L("objects, err = get%sRaw(ctx, db, queryStr, args...)", lex.Plural(mapping.Name)) buf.L("}") buf.N() m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to fetch from \"%s\" table: %%w", err)`, entityTable(m.entity, m.config["table"]))) } else if mapping.Type == ReferenceTable || mapping.Type == MapTable { buf.L("// Select.") buf.L("objects, err = get%sRaw(ctx, db, queryStr, parentTablePrefix, args...)", lex.Plural(mapping.Name)) m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to fetch from \"%%s_%s\" table: %%w", parentTablePrefix, err)`, entityTable(m.entity, m.config["table"]))) } else { buf.N() buf.L("// Select.") buf.L("objects, err = get%s(ctx, sqlStmt, args...)", lex.Plural(mapping.Name)) m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to fetch from \"%s\" table: %%w", err)`, entityTable(m.entity, m.config["table"]))) } for _, field := range mapping.RefFields() { refStruct := lex.Singular(field.Name) refVar := lex.Minuscule(refStruct) refSlice := lex.Plural(refVar) refMapping, err := Parse(m.localPath, m.pkgs, refStruct, "") if err != nil { return fmt.Errorf("Could not find definition for reference struct %q: %w", refStruct, err) } switch refMapping.Type { case EntityTable: assocStruct := mapping.Name + field.Name buf.L("%s, err := Get%s()", lex.Minuscule(assocStruct), assocStruct) m.ifErrNotNil(buf, true, "nil", "err") buf.L("for i := range objects {") buf.L("objects[i].%s = make([]string, 0)", field.Name) buf.L("refIDs, ok := %s[objects[i].ID]", lex.Minuscule(assocStruct)) buf.L("if ok {") buf.L("for _, refID := range refIDs {") buf.L("%sURIs, err := Get%sURIs(%sFilter{ID: &refID})", refVar, refStruct, refStruct) m.ifErrNotNil(buf, true, "nil", "err") if field.Config.Get("uri") == "" { uriName := strings.ReplaceAll(lex.SnakeCase(refSlice), "_", "-") buf.L("uris, err := urlsToResourceNames(\"/%s\", %sURIs...)", uriName, refVar) m.ifErrNotNil(buf, true, "nil", "err") buf.L("%sURIs = uris", refVar) } buf.L("objects[i].%s = append(objects[i].%s, %sURIs...)", field.Name, field.Name, refVar) buf.L("}") buf.L("}") buf.L("}") case ReferenceTable: buf.L("%sFilters := []%s{}", refVar, entityFilter(refStruct)) buf.L("for _, f := range filters {") buf.L("filter := f.%s", refStruct) buf.L("if filter != nil {") buf.L("if %s {", activeCriteria(nil, FieldNames(refMapping.Filters))) buf.L("return nil, fmt.Errorf(\"Cannot filter on empty %s\")", entityFilter(refMapping.Name)) buf.L("}") buf.N() buf.L("%sFilters = append(%sFilters, *filter)", refVar, refVar) buf.L("}") buf.L("}") buf.N() if mapping.Type == ReferenceTable { // A reference table should let its child reference know about its parent. buf.L("%s, err := Get%s(ctx, db, parentTablePrefix+\"_%s\", parent_columnPrefix+\"_%s\", %sFilters...)", refSlice, lex.Plural(refStruct), lex.Plural(m.entity), m.entity, refVar) m.ifErrNotNil(buf, true, "nil", "err") } else { buf.L("%s, err := Get%s(ctx, db, \"%s\", %sFilters...)", refSlice, lex.Plural(refStruct), m.entity, refVar) m.ifErrNotNil(buf, true, "nil", "err") } buf.L("for i := range objects {") if field.Type.Code == TypeSlice { buf.L("objects[i].%s = %s[objects[i].ID]", lex.Plural(refStruct), refSlice) } else if field.Type.Code == TypeMap { buf.L("objects[i].%s = map[string]%s{}", lex.Plural(refStruct), refStruct) buf.L("for _, obj := range %s[objects[i].ID] {", refSlice) buf.L("_, ok := objects[i].%s[obj.%s]", lex.Plural(refStruct), refMapping.NaturalKey()[0].Name) buf.L("if !ok {") buf.L("objects[i].%s[obj.%s] = obj", lex.Plural(refStruct), refMapping.NaturalKey()[0].Name) buf.L("} else {") buf.L("return nil, fmt.Errorf(\"Found duplicate %s with name %%q\", obj.%s)", refStruct, refMapping.NaturalKey()[0].Name) buf.L("}") buf.L("}") } buf.L("}") case MapTable: buf.L("%sFilters := []%s{}", refVar, entityFilter(refStruct)) buf.L("for _, f := range filters {") buf.L("filter := f.%s", refStruct) buf.L("if filter != nil {") buf.L("if %s {", activeCriteria(nil, FieldNames(refMapping.Filters))) buf.L("return nil, fmt.Errorf(\"Cannot filter on empty %s\")", entityFilter(refMapping.Name)) buf.L("}") buf.N() buf.L("%sFilters = append(%sFilters, *filter)", refVar, refVar) buf.L("}") buf.L("}") buf.N() if mapping.Type == ReferenceTable { // A reference table should let its child reference know about its parent. buf.L("%s, err := Get%s(ctx, db, parentTablePrefix+\"_%s\", parentColumnPrefix+\"_%s\", %sFilters...)", refSlice, lex.Plural(refStruct), lex.Plural(m.entity), m.entity, refVar) m.ifErrNotNil(buf, true, "nil", "err") } else { buf.L("%s, err := Get%s(ctx, db, \"%s\", %sFilters...)", refSlice, lex.Plural(refStruct), m.entity, refVar) m.ifErrNotNil(buf, true, "nil", "err") } buf.L("for i := range objects {") buf.L("_, ok := %s[objects[i].ID]", refSlice) buf.L("if !ok {") buf.L("objects[i].%s = map[string]string{}", refStruct) buf.L("} else {") buf.L("objects[i].%s = %s[objects[i].ID]", lex.Plural(refStruct), refSlice) buf.L("}") buf.L("}") } buf.N() } switch mapping.Type { case AssociationTable: ref := strings.ReplaceAll(mapping.Name, m.config["struct"], "") refMapping, err := Parse(m.localPath, m.pkgs, ref, "") if err != nil { return fmt.Errorf("Could not find definition for reference struct %q: %w", ref, err) } buf.L("result := make([]%s, len(objects))", refMapping.ImportType()) buf.L("for i, object := range objects {") buf.L("%s, err := Get%s(ctx, db, %sFilter{ID: &object.%sID})", lex.Minuscule(ref), lex.Plural(ref), ref, ref) m.ifErrNotNil(buf, true, "nil", "err") buf.L("result[i] = %s[0]", lex.Minuscule(ref)) buf.L("}") buf.N() buf.L("return result, nil") case ReferenceTable: buf.L("resultMap := map[int][]%s{}", mapping.ImportType()) buf.L("for _, object := range objects {") buf.L("_, ok := resultMap[object.ReferenceID]") buf.L("if !ok {") buf.L("resultMap[object.ReferenceID] = []%s{}", mapping.ImportType()) buf.L("}") buf.N() buf.L("resultMap[object.ReferenceID] = append(resultMap[object.ReferenceID], object)") buf.L("}") buf.N() buf.L("return resultMap, nil") case MapTable: buf.L("resultMap := map[int]map[string]string{}") buf.L("for _, object := range objects {") buf.L("_, ok := resultMap[object.ReferenceID]") buf.L("if !ok {") buf.L("resultMap[object.ReferenceID] = map[string]string{}") buf.L("}") buf.N() buf.L("resultMap[object.ReferenceID][object.Key] = object.Value") buf.L("}") buf.N() buf.L("return resultMap, nil") case EntityTable: buf.L("return objects, nil") } return nil } func (m *Method) getRefs(buf *file.Buffer, parentTable string, refMapping *Mapping) error { m.ref = refMapping.Name err := m.signature(buf, false) if err != nil { return err } defer m.end(buf) // reset m.ref in case m.signature is called again. m.ref = "" refStruct := refMapping.Name refVar := lex.Minuscule(refStruct) refList := lex.Plural(refVar) refParent := lex.CamelCase(m.entity) refParentList := refParent + lex.PascalCase(refList) switch refMapping.Type { case ReferenceTable: buf.L("%s, err := Get%s(ctx, db, \"%s\", \"%s\", filters...)", refParentList, lex.Plural(refStruct), parentTable, m.entity) m.ifErrNotNil(buf, true, "nil", "err") buf.L("%s := map[string]%s{}", refList, refMapping.ImportType()) buf.L("for _, ref := range %s[%sID] {", refParentList, refParent) buf.L("_, ok := %s[ref.%s]", refList, refMapping.Identifier().Name) buf.L("if !ok {") buf.L("%s[ref.%s] = ref", refList, refMapping.Identifier().Name) buf.L("} else {") buf.L("return nil, fmt.Errorf(\"Found duplicate %s with name %%q\", ref.%s)", refStruct, refMapping.Identifier().Name) buf.L("}") buf.L("}") buf.N() case MapTable: buf.L("%s, err := Get%s(ctx, db, \"%s\", \"%s\", filters...)", refParentList, lex.Plural(refStruct), parentTable, m.entity) m.ifErrNotNil(buf, true, "nil", "err") buf.L("%s, ok := %s[%sID]", refList, refParentList, refParent) buf.L("if !ok {") buf.L("%s = map[string]string{}", refList) buf.L("}") buf.N() } buf.L("return %s, nil", refList) return nil } func (m *Method) getOne(buf *file.Buffer) error { mapping, err := Parse(m.localPath, m.pkgs, lex.PascalCase(m.entity), m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } nk := mapping.NaturalKey() err = m.signature(buf, false) if err != nil { return err } defer m.end(buf) buf.L("filter := %s{}", mapping.ImportFilterType()) for _, field := range nk { name := lex.Minuscule(field.Name) if name == "type" { name = lex.Minuscule(m.entity) + field.Name } buf.L("filter.%s = &%s", field.Name, name) } buf.N() buf.L("objects, err := Get%s(ctx, db, filter)", lex.Plural(lex.PascalCase(m.entity))) if mapping.Type == ReferenceTable || mapping.Type == MapTable { m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to fetch from \"%%s_%s\" table: %%w", parentTablePrefix, err)`, entityTable(m.entity, m.config["table"]))) } else { m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to fetch from \"%s\" table: %%w", err)`, entityTable(m.entity, m.config["table"]))) } buf.L("switch len(objects) {") buf.L("case 0:") buf.L(` return nil, ErrNotFound`) buf.L("case 1:") buf.L(" return &objects[0], nil") buf.L("default:") buf.L(` return nil, fmt.Errorf("More than one \"%s\" entry matches")`, entityTable(m.entity, m.config["table"])) buf.L("}") return nil } func (m *Method) id(buf *file.Buffer) error { // Support using a different structure or package to pass arguments to Create. entityCreate, ok := m.config["struct"] if !ok { entityCreate = lex.PascalCase(m.entity) } mapping, err := Parse(m.localPath, m.pkgs, entityCreate, m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } nk := mapping.NaturalKey() err = m.signature(buf, false) if err != nil { return err } defer m.end(buf) buf.L("stmt, err := Stmt(db, %s)", stmtCodeVar(m.entity, "ID")) m.ifErrNotNil(buf, true, "-1", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "ID"))) for _, field := range nk { if util.IsNeitherFalseNorEmpty(field.Config.Get("marshal")) { marshalFunc := "marshal" if strings.ToLower(field.Config.Get("marshal")) == "json" { marshalFunc = "marshalJSON" } buf.L("marshaled%s, err := %s(%s)", field.Name, marshalFunc, lex.Minuscule(field.Name)) m.ifErrNotNil(buf, true, "-1", "err") } } buf.L("row := stmt.QueryRowContext(ctx, %s)", mapping.FieldParamsMarshal(nk)) buf.L("var id int64") buf.L("err = row.Scan(&id)") buf.L("if errors.Is(err, sql.ErrNoRows) {") buf.L(`return -1, ErrNotFound`) buf.L("}") buf.N() m.ifErrNotNil(buf, true, "-1", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" ID: %%w", err)`, entityTable(m.entity, m.config["table"]))) buf.L("return id, nil") return nil } func (m *Method) exists(buf *file.Buffer) error { // Support using a different structure or package to pass arguments to Create. entityCreate, ok := m.config["struct"] if !ok { entityCreate = lex.PascalCase(m.entity) } mapping, err := Parse(m.localPath, m.pkgs, entityCreate, m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } nk := mapping.NaturalKey() err = m.signature(buf, false) if err != nil { return err } defer m.end(buf) buf.L("stmt, err := Stmt(db, %s)", stmtCodeVar(m.entity, "ID")) m.ifErrNotNil(buf, true, "false", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "ID"))) for _, field := range nk { if util.IsNeitherFalseNorEmpty(field.Config.Get("marshal")) { marshalFunc := "marshal" if strings.ToLower(field.Config.Get("marshal")) == "json" { marshalFunc = "marshalJSON" } buf.L("marshaled%s, err := %s(%s)", field.Name, marshalFunc, lex.Minuscule(field.Name)) m.ifErrNotNil(buf, true, "false", "err") } } buf.L("row := stmt.QueryRowContext(ctx, %s)", mapping.FieldParamsMarshal(nk)) buf.L("var id int64") buf.L("err = row.Scan(&id)") buf.L("if errors.Is(err, sql.ErrNoRows) {") buf.L(` return false, nil`) buf.L("}") buf.N() m.ifErrNotNil(buf, true, "false", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" ID: %%w", err)`, entityTable(m.entity, m.config["table"]))) buf.L("return true, nil") return nil } func (m *Method) create(buf *file.Buffer, replace bool) error { mapping, err := Parse(m.localPath, m.pkgs, lex.PascalCase(m.entity), m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } if m.config["references"] != "" { parentTable := mapping.TableName(m.entity, m.config["table"]) refFields := strings.Split(m.config["references"], ",") refs := make([]*Mapping, len(refFields)) for i, fieldName := range refFields { refMapping, err := Parse(m.localPath, m.pkgs, fieldName, m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } refs[len(refs)-1-i] = refMapping } defer func() { for _, refMapping := range refs { err = m.createRefs(buf, parentTable, refMapping) if err != nil { return } } }() } err = m.signature(buf, false) if err != nil { return err } defer m.end(buf) if mapping.Type == MapTable { buf.L("// An empty value means we are unsetting this key, so just return.") buf.L("if object.Value == \"\" {") buf.L("return nil") buf.L("}") buf.N() } if mapping.Type == ReferenceTable || mapping.Type == MapTable { stmtVar := stmtCodeVar(m.entity, "create") stmtLocal := stmtVar + "Local" buf.L("%s := strings.ReplaceAll(%s, \"%%s_id\", fmt.Sprintf(\"%%s_id\", parentColumnPrefix))", stmtLocal, stmtVar) buf.L("fillParent := make([]any, strings.Count(%s, \"%%s\"))", stmtLocal) buf.L("for i := range fillParent {") buf.L("fillParent[i] = parentTablePrefix") buf.L("}") buf.N() buf.L("queryStr := fmt.Sprintf(%s, fillParent...)", stmtLocal) createParams := "" columnFields := mapping.ColumnFields("ID") if mapping.Type == ReferenceTable { buf.L("for _, object := range objects {") } for i, field := range columnFields { createParams += fmt.Sprintf("object.%s", field.Name) if i < len(columnFields) { createParams += ", " } } refFields := mapping.RefFields() if len(refFields) == 0 { buf.L("_, err := db.ExecContext(ctx, queryStr, %s)", createParams) m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Insert failed for \"%%s_%s\" table: %%w", parentTablePrefix, err)`, lex.Plural(m.entity))) } else { buf.L("result, err := db.ExecContext(ctx, queryStr, %s)", createParams) m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Insert failed for \"%%s_%s\" table: %%w", parentTablePrefix, err)`, lex.Plural(m.entity))) buf.L("id, err := result.LastInsertId()") m.ifErrNotNil(buf, true, "fmt.Errorf(\"Failed to fetch ID: %w\", err)") } } else { nk := mapping.NaturalKey() nkParams := make([]string, len(nk)) for i, field := range nk { nkParams[i] = fmt.Sprintf("object.%s", field.Name) } kind := "create" if mapping.Type != AssociationTable && replace { kind = "create_or_replace" } if mapping.Type == AssociationTable { buf.L("for _, object := range objects {") } fields := mapping.ColumnFields("ID") buf.L("args := make([]any, %d)", len(fields)) buf.N() buf.L("// Populate the statement arguments. ") for i, field := range fields { if util.IsNeitherFalseNorEmpty(field.Config.Get("marshal")) { marshalFunc := "marshal" if strings.ToLower(field.Config.Get("marshal")) == "json" { marshalFunc = "marshalJSON" } buf.L("marshaled%s, err := %s(object.%s)", field.Name, marshalFunc, field.Name) m.ifErrNotNil(buf, true, "-1", "err") buf.L("args[%d] = marshaled%s", i, field.Name) } else { buf.L("args[%d] = object.%s", i, field.Name) } } buf.N() buf.L("// Prepared statement to use. ") buf.L("stmt, err := Stmt(db, %s)", stmtCodeVar(m.entity, kind)) if mapping.Type == AssociationTable { m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, kind))) buf.L(`// Execute the statement.`) buf.L(`_, err = stmt.Exec(args...)`) buf.L(`var sqliteErr sqlite3.Error`) buf.L(`if errors.As(err, &sqliteErr) {`) buf.L(` if sqliteErr.Code == sqlite3.ErrConstraint {`) buf.L(` return ErrConflict`) buf.L(` }`) buf.L(`}`) buf.N() m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Failed to create \"%s\" entry: %%w", err)`, entityTable(m.entity, m.config["table"]))) } else { m.ifErrNotNil(buf, true, "-1", fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, kind))) buf.L(`// Execute the statement.`) buf.L(`result, err := stmt.Exec(args...)`) buf.L(`var sqliteErr sqlite3.Error`) buf.L(`if errors.As(err, &sqliteErr) {`) buf.L(` if sqliteErr.Code == sqlite3.ErrConstraint {`) buf.L(` return -1, ErrConflict`) buf.L(` }`) buf.L(`}`) buf.N() m.ifErrNotNil(buf, true, "-1", fmt.Sprintf(`fmt.Errorf("Failed to create \"%s\" entry: %%w", err)`, entityTable(m.entity, m.config["table"]))) buf.L(`id, err := result.LastInsertId()`) m.ifErrNotNil(buf, true, "-1", fmt.Sprintf(`fmt.Errorf("Failed to fetch \"%s\" entry ID: %%w", err)`, entityTable(m.entity, m.config["table"]))) } } for _, field := range mapping.RefFields() { // TODO: Remove all references to UsedBy. if field.Name == "UsedBy" { continue } refStruct := lex.Singular(field.Name) refMapping, err := Parse(m.localPath, m.pkgs, lex.Singular(field.Name), "") if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } switch refMapping.Type { case EntityTable: assocStruct := mapping.Name + refStruct buf.L("// Update association table.") buf.L("object.ID = int(id)") buf.L("err = Update%s(ctx, db, object)", lex.Plural(assocStruct)) m.ifErrNotNil(buf, true, "-1", fmt.Sprintf("fmt.Errorf(\"Could not update association table: %%w\", err)")) continue case ReferenceTable: buf.L("for _, insert := range object.%s {", field.Name) buf.L("insert.ReferenceID = int(id)") case MapTable: buf.L("referenceID := int(id)") buf.L("for key, value := range object.%s {", field.Name) buf.L("insert := %s{", field.Name) for _, ref := range refMapping.ColumnFields("ID") { buf.L("%s: %s,", ref.Name, lex.Minuscule(ref.Name)) } buf.L("}") buf.N() } if mapping.Type != EntityTable { buf.L("err = Create%s(ctx, db, parentTablePrefix + \"_%s\", parentColumnPrefix + \"_%s\", insert)", refStruct, lex.Plural(m.entity), m.entity) m.ifErrNotNil(buf, false, fmt.Sprintf("fmt.Errorf(\"Insert %s failed for %s: %%w\", err)", field.Name, mapping.Name)) } else { buf.L("err = Create%s(ctx, db, \"%s\", insert)", refStruct, m.entity) m.ifErrNotNil(buf, false, "-1", fmt.Sprintf("fmt.Errorf(\"Insert %s failed for %s: %%w\", err)", field.Name, mapping.Name)) } buf.L("}") } if mapping.Type == ReferenceTable || mapping.Type == AssociationTable { buf.L("}") buf.N() buf.L("return nil") } else if mapping.Type == MapTable { buf.L("return nil") } else { buf.L("return id, nil") } return nil } func (m *Method) createRefs(buf *file.Buffer, parentTable string, refMapping *Mapping) error { m.ref = refMapping.Name err := m.signature(buf, false) if err != nil { return err } defer m.end(buf) // reset m.ref in case m.signature is called again. m.ref = "" refStruct := refMapping.Name refVar := lex.Minuscule(refStruct) refParent := lex.CamelCase(m.entity) switch refMapping.Type { case ReferenceTable: buf.L("for key, %s := range %s {", refVar, lex.Plural(refVar)) buf.L("%s.ReferenceID = int(%sID)", refVar, refParent) buf.L("%s[key] = %s", lex.Plural(refVar), refVar) buf.L("}") buf.N() buf.L("err := Create%s(ctx, db, \"%s\", \"%s\", %s)", lex.Plural(refStruct), parentTable, m.entity, lex.Plural(refVar)) m.ifErrNotNil(buf, false, fmt.Sprintf("fmt.Errorf(\"Insert %s failed for %s: %%w\", err)", refStruct, lex.PascalCase(m.entity))) case MapTable: buf.L("referenceID := int(%sID)", refParent) buf.L("for key, value := range %s {", refVar) buf.L("insert := %s{", refStruct) for _, ref := range refMapping.ColumnFields("ID") { buf.L("%s: %s,", ref.Name, lex.Minuscule(ref.Name)) } buf.L("}") buf.N() buf.L("err := Create%s(ctx, db, \"%s\", \"%s\", insert)", refStruct, parentTable, m.entity) m.ifErrNotNil(buf, true, fmt.Sprintf("fmt.Errorf(\"Insert %s failed for %s: %%w\", err)", refStruct, lex.PascalCase(m.entity))) buf.L("}") } buf.N() buf.L("return nil") return nil } func (m *Method) rename(buf *file.Buffer) error { mapping, err := Parse(m.localPath, m.pkgs, lex.PascalCase(m.entity), m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } nk := mapping.NaturalKey() err = m.signature(buf, false) if err != nil { return err } defer m.end(buf) buf.L("stmt, err := Stmt(db, %s)", stmtCodeVar(m.entity, "rename")) m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "rename"))) for _, field := range nk { if util.IsNeitherFalseNorEmpty(field.Config.Get("marshal")) { marshalFunc := "marshal" if strings.ToLower(field.Config.Get("marshal")) == "json" { marshalFunc = "marshalJSON" } buf.L("marshaled%s, err := %s(%s)", field.Name, marshalFunc, lex.Minuscule(field.Name)) m.ifErrNotNil(buf, true, "err") } } buf.L("result, err := stmt.Exec(to, %s)", mapping.FieldParamsMarshal(nk)) m.ifErrNotNil(buf, true, fmt.Sprintf("fmt.Errorf(\"Rename %s failed: %%w\", err)", mapping.Name)) buf.L("n, err := result.RowsAffected()") m.ifErrNotNil(buf, true, "fmt.Errorf(\"Fetch affected rows failed: %w\", err)") buf.L("if n != 1 {") buf.L(" return fmt.Errorf(\"Query affected %%d rows instead of 1\", n)") buf.L("}") buf.N() buf.L("return nil") return nil } func (m *Method) update(buf *file.Buffer) error { mapping, err := Parse(m.localPath, m.pkgs, lex.PascalCase(m.entity), m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } // Support using a different structure or package to pass arguments to Create. entityUpdate, ok := m.config["struct"] if !ok { entityUpdate = mapping.Name } if m.config["references"] != "" { refFields := strings.Split(m.config["references"], ",") parentTable := mapping.TableName(m.entity, m.config["table"]) refs := make([]*Mapping, len(refFields)) for i, fieldName := range refFields { refMapping, err := Parse(m.localPath, m.pkgs, fieldName, m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } refs[len(refs)-1-i] = refMapping } defer func() { for _, refMapping := range refs { err = m.updateRefs(buf, parentTable, refMapping) if err != nil { return } } }() } nk := mapping.NaturalKey() err = m.signature(buf, false) if err != nil { return err } defer m.end(buf) switch mapping.Type { case AssociationTable: ref := strings.ReplaceAll(mapping.Name, m.config["struct"], "") refMapping, err := Parse(m.localPath, m.pkgs, ref, "") if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } refSlice := lex.Minuscule(lex.Plural(mapping.Name)) buf.L("// Delete current entry.") buf.L("err := Delete%s%s(ctx, db, %sID)", m.config["struct"], lex.Plural(ref), lex.Minuscule(m.config["struct"])) m.ifErrNotNil(buf, true, "err") buf.L("// Get new entry IDs.") buf.L("%s := make([]%s, 0, len(%s%s))", refSlice, mapping.Name, lex.Minuscule(ref), lex.Plural(refMapping.Identifier().Name)) buf.L("for _, entry := range %s%s {", lex.Minuscule(ref), lex.Plural(refMapping.Identifier().Name)) buf.L("refID, err := Get%sID(ctx, db, entry)", ref) m.ifErrNotNil(buf, true, "err") fields := fmt.Sprintf("%sID: %sID, %sID: int(refID)", m.config["struct"], lex.Minuscule(m.config["struct"]), ref) buf.L("%s = append(%s, %s{%s})", refSlice, refSlice, mapping.Name, fields) buf.L("}") buf.N() buf.L("err = Create%s%s(ctx, db, %s)", m.config["struct"], lex.Plural(ref), refSlice) m.ifErrNotNil(buf, true, "err") case ReferenceTable: buf.L("// Delete current entry.") buf.L("err := Delete%s(ctx, db, parentTablePrefix, parentColumnPrefix, referenceID)", lex.PascalCase(lex.Plural(m.entity))) m.ifErrNotNil(buf, true, "err") buf.L("// Insert new entries.") buf.L("for key, object := range %s {", lex.Plural(m.entity)) buf.L("object.ReferenceID = referenceID") buf.L("%s[key] = object", lex.Plural(m.entity)) buf.L("}") buf.N() buf.L("err = Create%s(ctx, db, parentTablePrefix, parentColumnPrefix, %s)", lex.PascalCase(lex.Plural(m.entity)), lex.Plural(m.entity)) m.ifErrNotNil(buf, true, "err") case MapTable: buf.L("// Delete current entry.") buf.L("err := Delete%s(ctx, db, parentTablePrefix, parentColumnPrefix, referenceID)", lex.PascalCase(lex.Plural(m.entity))) m.ifErrNotNil(buf, true, "err") buf.L("// Insert new entries.") buf.L("for key, value := range config {") buf.L("object := %s{", mapping.Name) for _, field := range mapping.ColumnFields("ID") { buf.L("%s: %s,", field.Name, lex.Minuscule(field.Name)) } buf.L("}") buf.N() buf.L("err = Create%s(ctx, db, parentTablePrefix, parentColumnPrefix, object)", lex.PascalCase(m.entity)) m.ifErrNotNil(buf, false, "err") buf.L("}") buf.N() case EntityTable: updateMapping, err := Parse(m.localPath, m.pkgs, entityUpdate, m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } buf.L("id, err := Get%sID(ctx, db, %s)", lex.PascalCase(m.entity), mapping.FieldParams(nk)) m.ifErrNotNil(buf, true, "err") buf.L("stmt, err := Stmt(db, %s)", stmtCodeVar(m.entity, "update")) m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "update"))) fields := updateMapping.ColumnFields("ID") // This exclude the ID column, which is autogenerated. params := make([]string, len(fields)) for i, field := range fields { if util.IsNeitherFalseNorEmpty(field.Config.Get("marshal")) { marshalFunc := "marshal" if strings.ToLower(field.Config.Get("marshal")) == "json" { marshalFunc = "marshalJSON" } buf.L("marshaled%s, err := %s(object.%s)", field.Name, marshalFunc, field.Name) m.ifErrNotNil(buf, true, "err") params[i] = fmt.Sprintf("marshaled%s", field.Name) } else { params[i] = fmt.Sprintf("object.%s", field.Name) } } buf.L("result, err := stmt.Exec(%s)", strings.Join(params, ", ")+", id") m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Update \"%s\" entry failed: %%w", err)`, entityTable(m.entity, m.config["table"]))) buf.L("n, err := result.RowsAffected()") m.ifErrNotNil(buf, true, "fmt.Errorf(\"Fetch affected rows: %w\", err)") buf.L("if n != 1 {") buf.L(" return fmt.Errorf(\"Query updated %%d rows instead of 1\", n)") buf.L("}") buf.N() for _, field := range mapping.RefFields() { // TODO: Eliminate UsedBy fields and move to dedicated slices for entities. if field.Name == "UsedBy" { continue } refStruct := lex.Singular(field.Name) refMapping, err := Parse(m.localPath, m.pkgs, lex.Singular(field.Name), "") if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } switch refMapping.Type { case EntityTable: assocStruct := mapping.Name + refStruct buf.L("// Update association table.") buf.L("object.ID = int(id)") buf.L("err = Update%s(ctx, db, object)", lex.Plural(assocStruct)) m.ifErrNotNil(buf, true, "fmt.Errorf(\"Could not update association table: %w\", err)") case ReferenceTable: buf.L("err = Update%s(ctx, db, \"%s\", int(id), object.%s)", lex.Singular(field.Name), m.entity, field.Name) m.ifErrNotNil(buf, true, fmt.Sprintf("fmt.Errorf(\"Replace %s for %s failed: %%w\", err)", field.Name, mapping.Name)) case MapTable: buf.L("err = Update%s(ctx, db, \"%s\", int(id), object.%s)", lex.Singular(field.Name), m.entity, field.Name) m.ifErrNotNil(buf, true, fmt.Sprintf("fmt.Errorf(\"Replace %s for %s failed: %%w\", err)", field.Name, mapping.Name)) buf.N() } } } buf.L("return nil") return nil } func (m *Method) updateRefs(buf *file.Buffer, parentTable string, refMapping *Mapping) error { m.ref = refMapping.Name err := m.signature(buf, false) if err != nil { return err } defer m.end(buf) // reset m.ref in case m.signature is called again. m.ref = "" refStruct := refMapping.Name refVar := lex.Minuscule(refStruct) refList := lex.Plural(refVar) refParent := lex.CamelCase(m.entity) buf.L("err := Update%s(ctx, db, \"%s\", \"%s\", int(%sID), %s)", lex.Plural(refStruct), parentTable, m.entity, refParent, refList) m.ifErrNotNil(buf, true, fmt.Sprintf("fmt.Errorf(\"Replace %s for %s failed: %%w\", err)", refStruct, lex.PascalCase(m.entity))) buf.L("return nil") return nil } func (m *Method) delete(buf *file.Buffer, deleteOne bool) error { mapping, err := Parse(m.localPath, m.pkgs, lex.PascalCase(m.entity), m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } err = m.signature(buf, false) if err != nil { return err } defer m.end(buf) if mapping.Type == AssociationTable { buf.L("stmt, err := Stmt(db, %s)", stmtCodeVar(m.entity, "delete", m.config["struct"]+"ID")) m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "delete", m.config["struct"]+"ID"))) buf.L("result, err := stmt.Exec(int(%sID))", lex.Minuscule(m.config["struct"])) m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Delete \"%s\" entry failed: %%w", err)`, entityTable(m.entity, m.config["table"]))) } else if mapping.Type == ReferenceTable || mapping.Type == MapTable { stmtVar := stmtCodeVar(m.entity, "delete") stmtLocal := stmtVar + "Local" buf.L("%s := strings.ReplaceAll(%s, \"%%s_id\", fmt.Sprintf(\"%%s_id\", parentColumnPrefix))", stmtLocal, stmtVar) buf.L("fillParent := make([]any, strings.Count(%s, \"%%s\"))", stmtLocal) buf.L("for i := range fillParent {") buf.L("fillParent[i] = parentTablePrefix") buf.L("}") buf.N() buf.L("queryStr := fmt.Sprintf(%s, fillParent...)", stmtLocal) buf.L("result, err := db.ExecContext(ctx, queryStr, referenceID)") m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Delete entry for \"%%s_%s\" failed: %%w", parentTablePrefix, err)`, m.entity)) } else { activeFilters := mapping.ActiveFilters(m.kind) buf.L("stmt, err := Stmt(db, %s)", stmtCodeVar(m.entity, "delete", FieldNames(activeFilters)...)) for _, field := range activeFilters { if util.IsNeitherFalseNorEmpty(field.Config.Get("marshal")) { marshalFunc := "marshal" if strings.ToLower(field.Config.Get("marshal")) == "json" { marshalFunc = "marshalJSON" } buf.L("marshaled%s, err := %s(%s)", field.Name, marshalFunc, lex.Minuscule(field.Name)) m.ifErrNotNil(buf, true, "err") } } m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Failed to get \"%s\" prepared statement: %%w", err)`, stmtCodeVar(m.entity, "delete", FieldNames(activeFilters)...))) buf.L("result, err := stmt.Exec(%s)", mapping.FieldParamsMarshal(activeFilters)) m.ifErrNotNil(buf, true, fmt.Sprintf(`fmt.Errorf("Delete \"%s\": %%w", err)`, entityTable(m.entity, m.config["table"]))) } if deleteOne { buf.L("n, err := result.RowsAffected()") } else { buf.L("_, err = result.RowsAffected()") } m.ifErrNotNil(buf, true, "fmt.Errorf(\"Fetch affected rows: %w\", err)") if deleteOne { buf.L("if n == 0 {") buf.L(` return ErrNotFound`) buf.L("} else if n > 1 {") buf.L(" return fmt.Errorf(\"Query deleted %%d %s rows instead of 1\", n)", lex.PascalCase(m.entity)) buf.L("}") } buf.N() buf.L("return nil") return nil } // signature generates a method or interface signature with comments, arguments, and return values. func (m *Method) signature(buf *file.Buffer, isInterface bool) error { mapping, err := Parse(m.localPath, m.pkgs, lex.PascalCase(m.entity), m.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } comment := "" args := "ctx context.Context, db dbtx, " rets := "" switch mapping.Type { case AssociationTable: ref := strings.ReplaceAll(mapping.Name, m.config["struct"], "") refMapping, err := Parse(m.localPath, m.pkgs, ref, "") if err != nil { return fmt.Errorf("Failed to parse struct %q", ref) } switch operation(m.kind) { case "GetMany": comment = fmt.Sprintf("returns all available %s for the %s.", lex.Plural(ref), m.config["struct"]) args += fmt.Sprintf("%sID int", lex.Minuscule(m.config["struct"])) rets = fmt.Sprintf("(_ []%s, _err error)", refMapping.ImportType()) case "Create": comment = fmt.Sprintf("adds a new %s to the database.", m.entity) args += fmt.Sprintf("objects []%s", mapping.ImportType()) rets = "(_err error)" case "Update": comment = fmt.Sprintf("updates the %s matching the given key parameters.", m.entity) if len(refMapping.NaturalKey()) > 1 { return fmt.Errorf("Cannot generate update method for associative table: Reference table struct %q has more than one natural key", ref) } else if refMapping.Identifier() == nil { return fmt.Errorf("Cannot generate update method for associative table: Identifier for reference table struct %q must be `Name` or `Fingerprint`", ref) } args += fmt.Sprintf("%sID int, %s%s []%s", lex.Minuscule(m.config["struct"]), lex.Minuscule(ref), lex.Plural(refMapping.Identifier().Name), refMapping.Identifier().Type.Name) rets = "(_err error)" case "DeleteMany": comment = fmt.Sprintf("deletes the %s matching the given key parameters.", m.entity) args += fmt.Sprintf("%sID int", lex.Minuscule(m.config["struct"])) rets = "(_err error)" default: return fmt.Errorf("Unknown method kind '%s'", m.kind) } case ReferenceTable: switch operation(m.kind) { case "GetMany": comment = fmt.Sprintf("returns all available %s for the parent entity.", lex.Plural(m.entity)) args += fmt.Sprintf("parentTablePrefix string, parentColumnPrefix string, filters ...%s", mapping.ImportFilterType()) rets = fmt.Sprintf("(_ map[int][]%s, _err error)", mapping.ImportType()) case "Create": comment = fmt.Sprintf("adds a new %s to the database.", m.entity) args += fmt.Sprintf("parentTablePrefix string, parentColumnPrefix string, objects map[string]%s", mapping.ImportType()) rets = "(_err error)" case "Update": comment = fmt.Sprintf("updates the %s matching the given key parameters.", m.entity) args += fmt.Sprintf("parentTablePrefix string, parentColumnPrefix string, referenceID int, %s map[string]%s", lex.Plural(m.entity), mapping.ImportType()) rets = "(_err error)" case "DeleteMany": comment = fmt.Sprintf("deletes the %s matching the given key parameters.", m.entity) args += "parentTablePrefix string, parentColumnPrefix string, referenceID int" rets = "(_err error)" default: return fmt.Errorf("Unknown method kind '%s'", m.kind) } case MapTable: switch operation(m.kind) { case "GetMany": comment = fmt.Sprintf("returns all available %s.", lex.Plural(m.entity)) args += fmt.Sprintf("parentTablePrefix string, parentColumnPrefix string, filters ...%s", entityFilter(lex.PascalCase(m.entity))) rets = "(_ map[int]map[string]string, _err error)" case "Create": comment = fmt.Sprintf("adds a new %s to the database.", m.entity) args += fmt.Sprintf("parentTablePrefix string, parentColumnPrefix string, object %s", mapping.Name) rets = "(_err error)" case "Update": comment = fmt.Sprintf("updates the %s matching the given key parameters.", m.entity) args += "parentTablePrefix string, parentColumnPrefix string, referenceID int, config map[string]string" rets = "(_err error)" case "DeleteMany": comment = fmt.Sprintf("deletes the %s matching the given key parameters.", m.entity) args += "parentTablePrefix string, parentColumnPrefix string, referenceID int" rets = "(_err error)" default: return fmt.Errorf("Unknown method kind '%s'", m.kind) } case EntityTable: switch operation(m.kind) { case "URIs": comment = fmt.Sprintf("returns all available %s URIs.", m.entity) args += fmt.Sprintf("filter %s", mapping.ImportFilterType()) rets = "(_ []string, _err error)" case "GetMany": if m.ref == "" { comment = fmt.Sprintf("returns all available %s.", lex.Plural(m.entity)) args += fmt.Sprintf("filters ...%s", mapping.ImportFilterType()) rets = fmt.Sprintf("(_ %s, _err error)", lex.Slice(mapping.ImportType())) } else { refMapping, err := Parse(m.localPath, m.pkgs, m.ref, "") if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } comment = fmt.Sprintf("returns all available %s %s", mapping.Name, lex.Plural(m.ref)) args += fmt.Sprintf("%sID int, filters ...%s", lex.Minuscule(mapping.Name), refMapping.ImportFilterType()) var retType string switch refMapping.Type { case ReferenceTable: retType = fmt.Sprintf("map[%s]%s", refMapping.Identifier().Type.Name, refMapping.ImportType()) case MapTable: retType = "map[string]string" } rets = fmt.Sprintf("(_ %s, _err error)", retType) } case "GetNames": comment = fmt.Sprintf("returns the identifying field of %s.", m.entity) args += fmt.Sprintf("filters ...%s", mapping.ImportFilterType()) rets = fmt.Sprintf("(_ %s, _err error)", lex.Slice(mapping.NaturalKey()[0].Type.Name)) case "GetOne": comment = fmt.Sprintf("returns the %s with the given key.", m.entity) args += mapping.FieldArgs(mapping.NaturalKey()) rets = fmt.Sprintf("(_ %s, _err error)", lex.Star(mapping.ImportType())) case "ID": comment = fmt.Sprintf("return the ID of the %s with the given key.", m.entity) args += mapping.FieldArgs(mapping.NaturalKey()) rets = "(_ int64, _err error)" case "Exists": comment = fmt.Sprintf("checks if a %s with the given key exists.", m.entity) args += mapping.FieldArgs(mapping.NaturalKey()) rets = "(_ bool, _err error)" case "Create": structMapping := mapping if m.ref != "" { structMapping, err = Parse(m.localPath, m.pkgs, m.ref, "") if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } } else if m.config["struct"] != "" { structMapping, err = Parse(m.localPath, m.pkgs, m.config["struct"], "") if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } } if m.ref == "" { comment = fmt.Sprintf("adds a new %s to the database.", m.entity) args += fmt.Sprintf("object %s", structMapping.ImportType()) rets = "(_ int64, _err error)" } else { comment = fmt.Sprintf("adds new %s %s to the database.", m.entity, lex.Plural(m.ref)) rets = "(_err error)" switch structMapping.Type { case ReferenceTable: args += fmt.Sprintf("%sID int64, %s map[%s]%s", lex.CamelCase(m.entity), lex.Plural(lex.Minuscule(m.ref)), structMapping.Identifier().Type.Name, structMapping.ImportType()) case MapTable: args += fmt.Sprintf("%sID int64, %s map[string]string", lex.CamelCase(m.entity), lex.Minuscule(m.ref)) } } case "CreateOrReplace": structMapping := mapping if m.config["struct"] != "" { structMapping, err = Parse(m.localPath, m.pkgs, m.config["struct"], "") if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } } comment = fmt.Sprintf("adds a new %s to the database.", m.entity) args += fmt.Sprintf("object %s", structMapping.ImportType()) rets = "(_ int64, _err error)" case "Rename": comment = fmt.Sprintf("renames the %s matching the given key parameters.", m.entity) args += mapping.FieldArgs(mapping.NaturalKey(), "to string") rets = "(_err error)" case "Update": structMapping := mapping if m.ref != "" { structMapping, err = Parse(m.localPath, m.pkgs, m.ref, "") if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } } else if m.config["struct"] != "" { structMapping, err = Parse(m.localPath, m.pkgs, m.config["struct"], "") if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } } if m.ref == "" { comment = fmt.Sprintf("updates the %s matching the given key parameters.", m.entity) args += mapping.FieldArgs(mapping.NaturalKey(), fmt.Sprintf("object %s", structMapping.ImportType())) rets = "(_err error)" } else { comment = fmt.Sprintf("updates the %s %s matching the given key parameters.", m.entity, m.ref) rets = "(_err error)" switch structMapping.Type { case ReferenceTable: args += fmt.Sprintf("%sID int64, %s map[%s]%s", lex.CamelCase(m.entity), lex.Minuscule(lex.Plural(m.ref)), structMapping.Identifier().Type.Name, structMapping.ImportType()) case MapTable: args += fmt.Sprintf("%sID int64, %s map[string]string", lex.CamelCase(m.entity), lex.Minuscule(lex.Plural(m.ref))) } } case "DeleteOne": comment = fmt.Sprintf("deletes the %s matching the given key parameters.", m.entity) args += mapping.FieldArgs(mapping.ActiveFilters(m.kind)) rets = "(_err error)" case "DeleteMany": comment = fmt.Sprintf("deletes the %s matching the given key parameters.", m.entity) args += mapping.FieldArgs(mapping.ActiveFilters(m.kind)) rets = "(_err error)" default: return fmt.Errorf("Unknown method kind '%s'", m.kind) } } args, err = m.sqlTxCheck(mapping, args) if err != nil { return err } m.begin(buf, mapping, comment, args, rets, isInterface) if isInterface { return nil } return nil } func (m *Method) begin(buf *file.Buffer, mapping *Mapping, comment string, args string, rets string, isInterface bool) { name := "" entity := lex.PascalCase(m.entity) if mapping.Type == AssociationTable { parent := m.config["struct"] ref := strings.ReplaceAll(entity, parent, "") switch operation(m.kind) { case "GetMany": name = fmt.Sprintf("Get%s%s", parent, lex.Plural(ref)) case "Create": name = fmt.Sprintf("Create%s%s", parent, lex.Plural(ref)) case "Update": name = fmt.Sprintf("Update%s%s", parent, lex.Plural(ref)) case "DeleteMany": name = fmt.Sprintf("Delete%s%s", parent, lex.Plural(ref)) } } else { entity = entity + m.ref switch operation(m.kind) { case "URIs": name = fmt.Sprintf("Get%sURIs", entity) case "GetMany": name = fmt.Sprintf("Get%s", lex.Plural(entity)) case "GetNames": name = fmt.Sprintf("Get%sNames", entity) case "GetOne": name = fmt.Sprintf("Get%s", entity) case "ID": name = fmt.Sprintf("Get%sID", entity) case "Exists": name = fmt.Sprintf("%sExists", entity) case "Create": if mapping.Type == ReferenceTable || m.ref != "" { entity = lex.Plural(entity) } name = fmt.Sprintf("Create%s", entity) case "CreateOrReplace": if mapping.Type == ReferenceTable || m.ref != "" { entity = lex.Plural(entity) } name = fmt.Sprintf("CreateOrReplace%s", entity) case "Rename": name = fmt.Sprintf("Rename%s", entity) case "Update": if mapping.Type == ReferenceTable || m.ref != "" { entity = lex.Plural(entity) } name = fmt.Sprintf("Update%s", entity) case "DeleteOne": name = fmt.Sprintf("Delete%s", entity) case "DeleteMany": name = fmt.Sprintf("Delete%s", lex.Plural(entity)) default: name = fmt.Sprintf("%s%s", entity, m.kind) } } buf.L("// %s %s", name, comment) buf.L("// generator: %s %s", m.entity, m.kind) if isInterface { // Named return values are not needed for the interface definition. rets = strings.ReplaceAll(rets, "_err ", "") rets = strings.ReplaceAll(rets, "_ ", "") buf.L("%s(%s) %s", name, args, rets) } else { buf.L("func %s(%s) %s {", name, args, rets) buf.L("defer func() {") buf.L("_err = mapErr(_err, %q)", lex.Capital(m.entity)) buf.L("}()") buf.N() } } func (m *Method) sqlTxCheck(mapping *Mapping, args string) (string, error) { txCheck := false switch mapping.Type { case EntityTable: if m.kind == "Update" || m.kind == "ID" { txCheck = true } else if m.ref != "" { refMapping, err := Parse(m.localPath, m.pkgs, m.ref, "") if err != nil { return "", fmt.Errorf("Parse entity struct: %w", err) } if refMapping.Type != MapTable || m.kind == "GetMany" { txCheck = true } } case AssociationTable: txCheck = true case ReferenceTable: txCheck = true } if txCheck { args = strings.ReplaceAll(args, "dbtx", "tx") } return args, nil } func (m *Method) ifErrNotNil(buf *file.Buffer, newLine bool, rets ...string) { buf.L("if err != nil {") buf.L("return %s", strings.Join(rets, ", ")) buf.L("}") if newLine { buf.N() } } func (m *Method) end(buf *file.Buffer) { buf.L("}") } // getManyTemplateFuncs returns two functions that can be used to perform generic queries without validation, and return // a slice of objects matching the entity. One function will accept pre-registered statements, and the other will accept // raw queries. func (m *Method) getManyTemplateFuncs(buf *file.Buffer, mapping *Mapping) error { if mapping.Type == AssociationTable { if m.config["struct"] != "" && strings.HasSuffix(mapping.Name, m.config["struct"]) { return nil } } tableName := mapping.TableName(m.entity, m.config["table"]) // Create a function to get the column names to use with SELECT statements for the entity. buf.L("// %sColumns returns a string of column names to be used with a SELECT statement for the entity.", lex.Minuscule(mapping.Name)) buf.L("// Use this function when building statements to retrieve database entries matching the %s entity.", mapping.Name) buf.L("func %sColumns() string {", lex.Minuscule(mapping.Name)) columns := make([]string, len(mapping.Fields)) for i, field := range mapping.Fields { column, err := field.SelectColumn(mapping, tableName) if err != nil { return err } columns[i] = column } buf.L("return \"%s\"", strings.Join(columns, ", ")) buf.L("}") buf.N() // Create a function supporting prepared statements. buf.L("// get%s can be used to run handwritten sql.Stmts to return a slice of objects.", lex.Plural(mapping.Name)) if mapping.Type != ReferenceTable && mapping.Type != MapTable { buf.L("func get%s(ctx context.Context, stmt *sql.Stmt, args ...any) ([]%s, error) {", lex.Plural(mapping.Name), mapping.ImportType()) } else { buf.L("func get%s(ctx context.Context, stmt *sql.Stmt, parent string, args ...any) ([]%s, error) {", lex.Plural(mapping.Name), mapping.ImportType()) } buf.L("objects := make([]%s, 0)", mapping.ImportType()) buf.N() buf.L("dest := %s", destFunc("objects", lex.PascalCase(m.entity), mapping.ImportType(), mapping.ColumnFields())) buf.N() buf.L("err := selectObjects(ctx, stmt, dest, args...)") if mapping.Type != ReferenceTable && mapping.Type != MapTable { m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to fetch from \"%s\" table: %%w", err)`, tableName)) } else { m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to fetch from \"%s\" table: %%w", parent, err)`, tableName)) } buf.L(" return objects, nil") buf.L("}") buf.N() // Create a function supporting raw queries. buf.L("// get%sRaw can be used to run handwritten query strings to return a slice of objects.", lex.Plural(mapping.Name)) if mapping.Type != ReferenceTable && mapping.Type != MapTable { buf.L("func get%sRaw(ctx context.Context, db dbtx, sql string, args ...any) ([]%s, error) {", lex.Plural(mapping.Name), mapping.ImportType()) } else { buf.L("func get%sRaw(ctx context.Context, db dbtx, sql string, parent string, args ...any) ([]%s, error) {", lex.Plural(mapping.Name), mapping.ImportType()) } buf.L("objects := make([]%s, 0)", mapping.ImportType()) buf.N() buf.L("dest := %s", destFunc("objects", lex.PascalCase(m.entity), mapping.ImportType(), mapping.ColumnFields())) buf.N() buf.L("err := scan(ctx, db, sql, dest, args...)") if mapping.Type != ReferenceTable && mapping.Type != MapTable { m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to fetch from \"%s\" table: %%w", err)`, tableName)) } else { m.ifErrNotNil(buf, true, "nil", fmt.Sprintf(`fmt.Errorf("Failed to fetch from \"%s\" table: %%w", parent, err)`, tableName)) } buf.L(" return objects, nil") buf.L("}") buf.N() return nil } incus-6.0.4/cmd/generate-database/db/parse.go000066400000000000000000000341341477363751000210110ustar00rootroot00000000000000//go:build linux && cgo && !agent package db import ( "fmt" "go/ast" "go/types" "net/url" "reflect" "slices" "sort" "strconv" "strings" "golang.org/x/tools/go/packages" "github.com/lxc/incus/v6/cmd/generate-database/lex" "github.com/lxc/incus/v6/shared/util" ) // FiltersFromStmt parses all filtering statement defined for the given entity. It // returns all supported combinations of filters, sorted by number of criteria, and // the corresponding set of unused filters from the Filter struct. func FiltersFromStmt(pkgs []*types.Package, kind string, entity string, filters []*Field, registeredSQLStmts map[string]string) ([][]string, [][]string) { for _, pkg := range pkgs { objects := pkg.Scope().Names() stmtFilters := [][]string{} prefix := fmt.Sprintf("%s%sBy", lex.CamelCase(entity), lex.PascalCase(kind)) seenNames := make(map[string]struct{}, len(objects)) for _, name := range objects { if !strings.HasPrefix(name, prefix) { continue } rest := name[len(prefix):] stmtFilters = append(stmtFilters, strings.Split(rest, "And")) seenNames[rest] = struct{}{} } for name := range registeredSQLStmts { if !strings.HasPrefix(name, prefix) { continue } rest := name[len(prefix):] _, ok := seenNames[rest] if ok { continue } stmtFilters = append(stmtFilters, strings.Split(rest, "And")) } stmtFilters = sortFilters(stmtFilters) ignoredFilters := [][]string{} for _, filterGroup := range stmtFilters { ignoredFilterGroup := []string{} for _, filter := range filters { if !slices.Contains(filterGroup, filter.Name) { ignoredFilterGroup = append(ignoredFilterGroup, filter.Name) } } ignoredFilters = append(ignoredFilters, ignoredFilterGroup) } return stmtFilters, ignoredFilters } return nil, nil } // RefFiltersFromStmt parses all filtering statement defined for the given entity reference. func RefFiltersFromStmt(pkg *types.Package, entity string, ref string, filters []*Field, registeredSQLStmts map[string]string) ([][]string, [][]string) { objects := pkg.Scope().Names() stmtFilters := [][]string{} prefix := fmt.Sprintf("%s%sRefBy", lex.CamelCase(entity), lex.Capital(ref)) seenNames := make(map[string]struct{}, len(objects)) for _, name := range objects { if !strings.HasPrefix(name, prefix) { continue } rest := name[len(prefix):] stmtFilters = append(stmtFilters, strings.Split(rest, "And")) seenNames[rest] = struct{}{} } for name := range registeredSQLStmts { if !strings.HasPrefix(name, prefix) { continue } rest := name[len(prefix):] _, ok := seenNames[rest] if ok { continue } stmtFilters = append(stmtFilters, strings.Split(rest, "And")) } stmtFilters = sortFilters(stmtFilters) ignoredFilters := [][]string{} for _, filterGroup := range stmtFilters { ignoredFilterGroup := []string{} for _, filter := range filters { if !slices.Contains(filterGroup, filter.Name) { ignoredFilterGroup = append(ignoredFilterGroup, filter.Name) } } ignoredFilters = append(ignoredFilters, ignoredFilterGroup) } return stmtFilters, ignoredFilters } func sortFilters(filters [][]string) [][]string { sort.Slice(filters, func(i, j int) bool { n1 := len(filters[i]) n2 := len(filters[j]) if n1 != n2 { return n1 > n2 } f1 := sortFilter(filters[i]) f2 := sortFilter(filters[j]) for k := range f1 { if f1[k] == f2[k] { continue } return f1[k] > f2[k] } panic("duplicate filter") }) return filters } func sortFilter(filter []string) []string { f := make([]string, len(filter)) copy(f, filter) sort.Sort(sort.Reverse(sort.StringSlice(f))) return f } // Parse the structure declaration with the given name found in the given Go package. // Any 'Entity' struct should also have an 'EntityFilter' struct defined in the same file. func Parse(localPath string, pkgs []*types.Package, name string, kind string) (*Mapping, error) { m := &Mapping{} for _, pkg := range pkgs { // Find the package that has the main entity struct. str := findStruct(pkg.Scope(), name) if str == nil { continue } fields, err := parseStruct(str, kind, pkg.Name()) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", name, err) } m.Local = pkg.Path() == localPath m.Package = pkg.Name() m.Name = name m.Fields = fields m.Type = tableType(pkgs, name, fields) m.Filterable = true oldStructHasTags := false for _, f := range m.Fields { if len(f.Config) > 0 { oldStructHasTags = true break } } if oldStructHasTags { break } } if m.Package == "" { return nil, fmt.Errorf("No declaration found for %q", name) } if m.Filterable && m.Filters == nil { for _, pkg := range pkgs { filters, err := ParseFilter(m, kind, name, pkg) if err != nil { return nil, err } if filters != nil { m.Filters = filters m.FilterLocal = pkg.Path() == localPath break } } if m.Filters == nil { filterName := name + "Filter" return nil, fmt.Errorf("No declaration found for filter %q", filterName) } } return m, nil } // ParseFilter finds the Filter struct in the given package. func ParseFilter(m *Mapping, kind string, name string, pkg *types.Package) ([]*Field, error) { // The 'EntityFilter' struct. This is used for filtering on specific fields of the entity. filterName := name + "Filter" filterStr := findStruct(pkg.Scope(), filterName) if filterStr == nil { return nil, nil } filters, err := parseStruct(filterStr, kind, pkg.Name()) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", name, err) } for i, filter := range filters { // Any field in EntityFilter must be present in the original struct. field := m.FieldByName(filter.Name) if field == nil { return nil, fmt.Errorf("Filter field %q is not in struct %q", filter.Name, name) } // Assign the config tags from the main entity struct to the Filter struct. filters[i].Config = field.Config // A Filter field and its indirect references must all be in the Filter struct. if field.IsIndirect() { indirectField := lex.PascalCase(field.Config.Get("via")) for i, f := range filters { if f.Name == indirectField { break } if i == len(filters)-1 { return nil, fmt.Errorf("Field %q requires field %q in struct %q", field.Name, indirectField, name+"Filter") } } } } return filters, nil } // ParseStmt returns the SQL string passed as an argument to a variable declaration of a call to RegisterStmt with the given name. // e.g. the SELECT string from 'var instanceObjects = RegisterStmt(`SELECT * from instances...`)'. func ParseStmt(name string, defs map[*ast.Ident]types.Object, registeredSQLStmts map[string]string) (string, error) { sql, ok := registeredSQLStmts[name] if ok { return sql, nil } for stmtVar := range defs { if stmtVar.Name != name { continue } spec, ok := stmtVar.Obj.Decl.(*ast.ValueSpec) if !ok { continue } if len(spec.Values) != 1 { continue } expr, ok := spec.Values[0].(*ast.CallExpr) if !ok { continue } if len(expr.Args) != 1 { continue } lit, ok := expr.Args[0].(*ast.BasicLit) if !ok { continue } return lit.Value, nil } return "", fmt.Errorf("Declaration for %q not found", name) } // tableType determines the TableType for the given struct fields. func tableType(pkgs []*types.Package, name string, fields []*Field) TableType { fieldNames := FieldNames(fields) entities := strings.Split(lex.SnakeCase(name), "_") if len(entities) == 2 { var struct1 *types.Struct var struct2 *types.Struct for _, pkg := range pkgs { if struct1 == nil { struct1 = findStruct(pkg.Scope(), lex.PascalCase(lex.Singular(entities[0]))) } if struct2 == nil { struct2 = findStruct(pkg.Scope(), lex.PascalCase(lex.Singular(entities[1]))) } } if struct1 != nil && struct2 != nil { return AssociationTable } } if slices.Contains(fieldNames, "ReferenceID") { if slices.Contains(fieldNames, "Key") && slices.Contains(fieldNames, "Value") { return MapTable } return ReferenceTable } return EntityTable } func parsePkgDecls(entity string, kind string, pkgs []*packages.Package) ([]*types.Package, error) { structName := lex.PascalCase(entity) pkgTypes := make([]*types.Package, 0, len(pkgs)) numSeenDecls := 0 numTaggedDecls := 0 for _, pkg := range pkgs { for _, decl := range pkg.Types.Scope().Names() { // Don't validate any structs beyond the one we care about. if decl != structName { continue } if numTaggedDecls > 1 { return nil, fmt.Errorf("Entity declaration exists in more than one package %q: %q. Remove db tags from one definition", pkg.Name, decl) } // If we encountered a non-struct declaration, just ignore it. obj := pkg.Types.Scope().Lookup(decl) structDecl, ok := obj.Type().Underlying().(*types.Struct) if !ok { continue } numSeenDecls++ fields, err := parseStruct(structDecl, kind, pkg.Types.Name()) if err != nil { return nil, err } for _, f := range fields { if len(f.Config) > 0 { numTaggedDecls++ break } } } pkgTypes = append(pkgTypes, pkg.Types) } if numSeenDecls > 1 && numTaggedDecls != 1 { return nil, fmt.Errorf("Struct %q declaration exists in more than one package. Apply db tags to one definition", structName) } if numSeenDecls == 0 { return nil, fmt.Errorf("No declaration found for struct %q", structName) } return pkgTypes, nil } // Find the StructType node for the structure with the given name. func findStruct(scope *types.Scope, name string) *types.Struct { obj := scope.Lookup(name) if obj == nil { return nil } typ, ok := obj.(*types.TypeName) if !ok { return nil } str, ok := typ.Type().Underlying().(*types.Struct) if !ok { return nil } return str } // Extract field information from the given structure. func parseStruct(str *types.Struct, kind string, pkgName string) ([]*Field, error) { fields := make([]*Field, 0) for i := 0; i < str.NumFields(); i++ { f := str.Field(i) if f.Embedded() { // Check if this is a parent struct. parentStr, ok := f.Type().Underlying().(*types.Struct) if !ok { continue } parentFields, err := parseStruct(parentStr, kind, pkgName) if err != nil { return nil, fmt.Errorf("Failed to parse parent struct: %w", err) } fields = append(fields, parentFields...) continue } field, err := parseField(f, str.Tag(i), kind, pkgName) if err != nil { return nil, err } // Don't add field if it has been ignored. if field != nil { fields = append(fields, field) } } return fields, nil } func parseField(f *types.Var, structTag string, kind string, pkgName string) (*Field, error) { name := f.Name() if !f.Exported() { return nil, fmt.Errorf("Unexported field name %q", name) } // Ignore fields that are marked with a tag of `db:"ignore"` if structTag != "" { tagValue := reflect.StructTag(structTag).Get("db") if tagValue == "ignore" { return nil, nil } } typeName := parseType(f.Type(), pkgName) if typeName == "" { return nil, fmt.Errorf("Unsupported type for field %q", name) } typeObj := Type{ Name: typeName, } var config url.Values if structTag != "" { var err error config, err = url.ParseQuery(reflect.StructTag(structTag).Get("db")) if err != nil { return nil, fmt.Errorf("Parse 'db' structure tag: %w", err) } err = validateFieldConfig(config) if err != nil { return nil, fmt.Errorf("Invalid struct tag for field %q: %v", name, err) } } typeObj.Code = TypeColumn if config.Get("marshal") == "" { if strings.HasPrefix(typeName, "[]") { typeObj.Code = TypeSlice } else if strings.HasPrefix(typeName, "map[") { typeObj.Code = TypeMap } } // Ignore fields that are marked with `db:"omit"`. omit := config.Get("omit") if omit != "" { omitFields := strings.Split(omit, ",") stmtKind := strings.ReplaceAll(lex.SnakeCase(kind), "_", "-") switch kind { case "URIs": stmtKind = "names" case "GetMany": stmtKind = "objects" case "GetOne": stmtKind = "objects" case "DeleteMany": stmtKind = "delete" case "DeleteOne": stmtKind = "delete" } if slices.Contains(omitFields, kind) || slices.Contains(omitFields, stmtKind) { return nil, nil } else if kind == "exists" && slices.Contains(omitFields, "id") { // Exists checks ID, so if we are omitting the field from ID, also omit it from Exists. return nil, nil } } field := Field{ Name: name, Type: typeObj, Config: config, } return &field, nil } func parseType(x types.Type, pkgName string) string { switch t := x.(type) { case *types.Pointer: return parseType(t.Elem(), pkgName) case *types.Slice: return "[]" + parseType(t.Elem(), pkgName) case *types.Basic: s := t.String() if s == "byte" { return "uint8" } return s case *types.Array: return "[" + strconv.FormatInt(t.Len(), 10) + "]" + parseType(t.Elem(), pkgName) case *types.Map: return "map[" + t.Key().String() + "]" + parseType(t.Elem(), pkgName) case *types.Named: if pkgName == t.Obj().Pkg().Name() { return t.Obj().Name() } return t.Obj().Pkg().Name() + "." + t.Obj().Name() case nil: return "" default: return "" } } func validateFieldConfig(config url.Values) error { for tag, values := range config { switch tag { case "sql", "coalesce", "join", "leftjoin", "joinon", "omit": _, err := exactlyOneValue(tag, values) return err case "order", "primary", "ignore": value, err := exactlyOneValue(tag, values) if err != nil { return err } if !util.IsTrue(value) && !util.IsFalse(value) { return fmt.Errorf("Unexpected value %q for %q tag", value, tag) } case "marshal": value, err := exactlyOneValue(tag, values) if err != nil { return err } if !util.IsTrue(value) && !util.IsFalse(value) && strings.ToLower(value) != "json" { return fmt.Errorf("Unexpected value %q for %q tag", value, tag) } } } return nil } func exactlyOneValue(tag string, values []string) (string, error) { if len(values) == 0 { return "", fmt.Errorf("Missing value for %q tag", tag) } if len(values) > 1 { return "", fmt.Errorf("More than one value for %q tag", tag) } return values[0], nil } incus-6.0.4/cmd/generate-database/db/parse_test.go000066400000000000000000000026451477363751000220520ustar00rootroot00000000000000package db_test import ( "go/types" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/tools/go/packages" "github.com/lxc/incus/v6/cmd/generate-database/db" ) type Person struct { Name string } type Class struct { Time time.Time Room string } type Teacher struct { Person Subjects []string IsSubstitute bool Classes []Class } type TeacherFilter struct{} func TestParse(t *testing.T) { pkg, err := packages.Load(&packages.Config{ Mode: packages.LoadTypes | packages.NeedTypesInfo, Tests: true, }, "") require.NoError(t, err) m, err := db.Parse(pkg[1].PkgPath, []*types.Package{pkg[1].Types}, "Teacher", "objects") require.NoError(t, err) assert.Equal(t, "db_test", m.Package) assert.Equal(t, "Teacher", m.Name) fields := m.Fields assert.Len(t, fields, 4) assert.Equal(t, "Name", fields[0].Name) assert.Equal(t, "Subjects", fields[1].Name) assert.Equal(t, "IsSubstitute", fields[2].Name) assert.Equal(t, "Classes", fields[3].Name) assert.Equal(t, "string", fields[0].Type.Name) assert.Equal(t, "[]string", fields[1].Type.Name) assert.Equal(t, "bool", fields[2].Type.Name) assert.Equal(t, "[]Class", fields[3].Type.Name) assert.Equal(t, db.TypeColumn, fields[0].Type.Code) assert.Equal(t, db.TypeSlice, fields[1].Type.Code) assert.Equal(t, db.TypeColumn, fields[2].Type.Code) assert.Equal(t, db.TypeSlice, fields[3].Type.Code) } incus-6.0.4/cmd/generate-database/db/schema.go000066400000000000000000000007361477363751000211400ustar00rootroot00000000000000package db import ( "fmt" "github.com/lxc/incus/v6/internal/server/db/cluster" "github.com/lxc/incus/v6/internal/server/db/node" ) // UpdateSchema updates the schema.go file of the cluster and node databases. func UpdateSchema() error { err := cluster.SchemaDotGo() if err != nil { return fmt.Errorf("Update cluster database schema: %w", err) } err = node.SchemaDotGo() if err != nil { return fmt.Errorf("Update node database schema: %w", err) } return nil } incus-6.0.4/cmd/generate-database/db/stmt.go000066400000000000000000000353451477363751000206730ustar00rootroot00000000000000//go:build linux && cgo && !agent package db import ( "fmt" "go/ast" "go/types" "strings" "golang.org/x/tools/go/packages" "github.com/lxc/incus/v6/cmd/generate-database/file" "github.com/lxc/incus/v6/cmd/generate-database/lex" ) // Stmt generates a particular database query statement. type Stmt struct { entity string // Name of the database entity kind string // Kind of statement to generate config map[string]string // Configuration parameters localPath string pkgs []*types.Package // Package to perform for struct declaration lookups defs map[*ast.Ident]types.Object // Defs maps identifiers to the objects they define registeredSQLStmts map[string]string // Lookup for SQL statements registered during this execution, which are therefore not included in the parsed package information } // NewStmt return a new statement code snippet for running the given kind of // query against the given database entity. func NewStmt(localPath string, parsedPkgs []*packages.Package, entity, kind string, config map[string]string, registeredSQLStmts map[string]string) (*Stmt, error) { defs := map[*ast.Ident]types.Object{} for _, pkg := range parsedPkgs { for k, v := range pkg.TypesInfo.Defs { _, ok := defs[k] if ok { return nil, fmt.Errorf("Entity definition already exists: %q: %q", pkg.Name, v.Name()) } defs[k] = v } } pkgTypes, err := parsePkgDecls(entity, kind, parsedPkgs) if err != nil { return nil, err } stmt := &Stmt{ localPath: localPath, entity: entity, kind: kind, config: config, pkgs: pkgTypes, defs: defs, registeredSQLStmts: registeredSQLStmts, } return stmt, nil } // Generate plumbing and wiring code for the desired statement. func (s *Stmt) Generate(buf *file.Buffer) error { kind := strings.Split(s.kind, "-by-")[0] switch kind { case "objects": return s.objects(buf) case "names": return s.names(buf) case "delete": return s.delete(buf) case "create": return s.create(buf, false) case "create-or-replace": return s.create(buf, true) case "id": return s.id(buf) case "rename": return s.rename(buf) case "update": return s.update(buf) default: return fmt.Errorf("Unknown statement '%s'", s.kind) } } // GenerateSignature is not used for statements. func (s *Stmt) GenerateSignature(buf *file.Buffer) error { return nil } func (s *Stmt) objects(buf *file.Buffer) error { if strings.HasPrefix(s.kind, "objects-by") { return s.objectsBy(buf) } mapping, err := Parse(s.localPath, s.pkgs, lex.PascalCase(s.entity), s.kind) if err != nil { return err } table := mapping.TableName(s.entity, s.config["table"]) boiler := stmts["objects"] fields := mapping.ColumnFields() columns := make([]string, len(fields)) for i, field := range fields { column, err := field.SelectColumn(mapping, table) if err != nil { return err } columns[i] = column } orderBy := []string{} orderByFields := []*Field{} for _, field := range fields { if field.Config.Get("order") != "" { orderByFields = append(orderByFields, field) } } if len(orderByFields) < 1 { orderByFields = mapping.NaturalKey() } for _, field := range orderByFields { column, err := field.OrderBy(mapping, table) if err != nil { return err } orderBy = append(orderBy, column) } joinFields := mapping.ScalarFields() joins := make([]string, 0, len(joinFields)) for _, field := range joinFields { join, err := field.JoinClause(mapping, table) if err != nil { return err } joins = append(joins, join) } table += strings.Join(joins, "") sql := fmt.Sprintf(boiler, strings.Join(columns, ", "), table, strings.Join(orderBy, ", ")) kind := strings.ReplaceAll(s.kind, "-", "_") stmtName := stmtCodeVar(s.entity, kind) if mapping.Type == ReferenceTable || mapping.Type == MapTable { buf.L("const %s = `%s`", stmtName, sql) } else { s.register(buf, stmtName, sql) } return nil } // objectsBy parses the variable declaration produced by the 'objects' function, and appends a WHERE clause to its SQL // string using the objects-by- field suffixes, and then creates a new variable declaration. // Strictly, it will look for variables of the form 'var Objects = .RegisterStmt(`SQL String`)'. func (s *Stmt) objectsBy(buf *file.Buffer) error { mapping, err := Parse(s.localPath, s.pkgs, lex.PascalCase(s.entity), s.kind) if err != nil { return err } where := []string{} filters := strings.Split(s.kind[len("objects-by-"):], "-and-") sqlString, err := ParseStmt(stmtCodeVar(s.entity, "objects"), s.defs, s.registeredSQLStmts) if err != nil { return err } queryParts := strings.SplitN(sqlString, "ORDER BY", 2) joinStr := " JOIN" if strings.Contains(queryParts[0], " LEFT JOIN") { joinStr = " LEFT JOIN" } preJoin, _, _ := strings.Cut(queryParts[0], joinStr) _, tableName, _ := strings.Cut(preJoin, "FROM ") tableName, _, _ = strings.Cut(tableName, "\n") for _, filter := range filters { field, err := mapping.FilterFieldByName(filter) if err != nil { return err } table, columnName, err := field.SQLConfig() if err != nil { return err } var column string if table != "" && columnName != "" { if field.IsScalar() { column = columnName } else { column = table + "." + columnName } } else if field.IsScalar() { column = lex.SnakeCase(field.Name) } else { column = mapping.FieldColumnName(field.Name, tableName) } coalesce, ok := field.Config["coalesce"] if ok { // Ensure filters operate on the coalesced value for fields using coalesce setting. where = append(where, fmt.Sprintf("coalesce(%s, %s) = ? ", column, coalesce[0])) } else { where = append(where, fmt.Sprintf("%s = ? ", column)) } } queryParts[0] = fmt.Sprintf("%sWHERE ( %s)", queryParts[0], strings.Join(where, "AND ")) sqlString = strings.Join(queryParts, "\n ORDER BY") s.register(buf, stmtCodeVar(s.entity, "objects", filters...), sqlString) return nil } func (s *Stmt) names(buf *file.Buffer) error { if strings.HasPrefix(s.kind, "names-by") { return s.namesBy(buf) } mapping, err := Parse(s.localPath, s.pkgs, lex.PascalCase(s.entity), s.kind) if err != nil { return err } if len(mapping.NaturalKey()) > 1 { return fmt.Errorf("Can't return names for composite key objects") } table := mapping.TableName(s.entity, s.config["table"]) boiler := stmts["names"] field := mapping.NaturalKey()[0] column, err := field.SelectColumn(mapping, table) if err != nil { return err } orderByField := field if field.Config.Get("order") != "" { orderByField = field } orderBy, err := orderByField.OrderBy(mapping, table) if err != nil { return err } sql := fmt.Sprintf(boiler, column, table, orderBy) kind := strings.ReplaceAll(s.kind, "-", "_") stmtName := stmtCodeVar(s.entity, kind) s.register(buf, stmtName, sql) return nil } func (s *Stmt) namesBy(buf *file.Buffer) error { mapping, err := Parse(s.localPath, s.pkgs, lex.PascalCase(s.entity), s.kind) if err != nil { return err } if len(mapping.NaturalKey()) > 1 { return fmt.Errorf("Can't return names for composite key objects") } where := []string{} filters := strings.Split(s.kind[len("names-by-"):], "-and-") sqlString, err := ParseStmt(stmtCodeVar(s.entity, "names"), s.defs, s.registeredSQLStmts) if err != nil { return err } queryParts := strings.SplitN(sqlString, "ORDER BY", 2) _, tableName, _ := strings.Cut(queryParts[0], "FROM ") tableName, _, _ = strings.Cut(tableName, "\n") joins := []string{} for _, filter := range filters { field, err := mapping.FilterFieldByName(filter) if err != nil { return err } table, columnName, err := field.SQLConfig() if err != nil { return err } var column string if table != "" && columnName != "" { if field.IsScalar() { column = columnName } else { column = table + "." + columnName } } else if field.IsScalar() { join, err := field.JoinClause(mapping, tableName) if err != nil { return err } joins = append(joins, join) column = field.JoinConfig() } else { column = mapping.FieldColumnName(field.Name, tableName) } coalesce, ok := field.Config["coalesce"] if ok { // Ensure filters operate on the coalesced value for fields using coalesce setting. where = append(where, fmt.Sprintf("coalesce(%s, %s) = ? ", column, coalesce[0])) } else { where = append(where, fmt.Sprintf("%s = ? ", column)) } } join := "" if len(joins) > 0 { join = strings.TrimLeftFunc(strings.Join(joins, ""), func(r rune) bool { return r == ' ' || r == '\n' }) join += "\n " } queryParts[0] = fmt.Sprintf("%s%sWHERE ( %s)", queryParts[0], join, strings.Join(where, "AND ")) sqlString = strings.Join(queryParts, "\n ORDER BY") s.register(buf, stmtCodeVar(s.entity, "names", filters...), sqlString) return nil } func (s *Stmt) create(buf *file.Buffer, replace bool) error { entityCreate := lex.PascalCase(s.entity) mapping, err := Parse(s.localPath, s.pkgs, entityCreate, s.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } table := mapping.TableName(s.entity, s.config["table"]) all := mapping.ColumnFields("ID") // This exclude the ID column, which is autogenerated. columns := make([]string, 0, len(all)) values := make([]string, 0, len(all)) for _, field := range all { column, value, err := field.InsertColumn(mapping, table, s.defs, s.registeredSQLStmts) if err != nil { return err } if column == "" && value == "" { continue } columns = append(columns, column) values = append(values, value) } tmpl := stmts[s.kind] if replace { tmpl = stmts["replace"] } sql := fmt.Sprintf(tmpl, table, strings.Join(columns, ", "), strings.Join(values, ", ")) kind := strings.Replace(s.kind, "-", "_", -2) stmtName := stmtCodeVar(s.entity, kind) if mapping.Type == ReferenceTable || mapping.Type == MapTable { buf.L("const %s = `%s`", stmtName, sql) } else { s.register(buf, stmtName, sql) } return nil } func (s *Stmt) id(buf *file.Buffer) error { mapping, err := Parse(s.localPath, s.pkgs, lex.PascalCase(s.entity), s.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } table := mapping.TableName(s.entity, s.config["table"]) nk := mapping.NaturalKey() where := make([]string, 0, len(nk)) joins := make([]string, 0, len(nk)) for _, field := range nk { tableName, columnName, err := field.SQLConfig() if err != nil { return err } var column string if field.IsScalar() { column = field.JoinConfig() join, err := field.JoinClause(mapping, table) joins = append(joins, join) if err != nil { return err } } else if tableName != "" && columnName != "" { column = tableName + "." + columnName } else { column = mapping.FieldColumnName(field.Name, table) } where = append(where, fmt.Sprintf("%s = ?", column)) } sql := fmt.Sprintf(stmts[s.kind], table, table+strings.Join(joins, ""), strings.Join(where, " AND ")) stmtName := stmtCodeVar(s.entity, "ID") s.register(buf, stmtName, sql) return nil } func (s *Stmt) rename(buf *file.Buffer) error { mapping, err := Parse(s.localPath, s.pkgs, lex.PascalCase(s.entity), s.kind) if err != nil { return err } table := mapping.TableName(s.entity, s.config["table"]) nk := mapping.NaturalKey() updates := make([]string, 0, len(nk)) for _, field := range nk { column, value, err := field.InsertColumn(mapping, table, s.defs, s.registeredSQLStmts) if err != nil { return err } if column == "" && value == "" { continue } updates = append(updates, fmt.Sprintf("%s = %s", column, value)) } sql := fmt.Sprintf(stmts[s.kind], table, strings.Join(updates, " AND ")) kind := strings.ReplaceAll(s.kind, "-", "_") stmtName := stmtCodeVar(s.entity, kind) s.register(buf, stmtName, sql) return nil } func (s *Stmt) update(buf *file.Buffer) error { entityUpdate := lex.PascalCase(s.entity) mapping, err := Parse(s.localPath, s.pkgs, entityUpdate, s.kind) if err != nil { return fmt.Errorf("Parse entity struct: %w", err) } table := mapping.TableName(s.entity, s.config["table"]) all := mapping.ColumnFields("ID") // This exclude the ID column, which is autogenerated. updates := make([]string, 0, len(all)) for _, field := range all { column, value, err := field.InsertColumn(mapping, table, s.defs, s.registeredSQLStmts) if err != nil { return err } if column == "" && value == "" { continue } updates = append(updates, fmt.Sprintf("%s = %s", column, value)) } sql := fmt.Sprintf(stmts[s.kind], table, strings.Join(updates, ", "), "id = ?") kind := strings.ReplaceAll(s.kind, "-", "_") stmtName := stmtCodeVar(s.entity, kind) s.register(buf, stmtName, sql) return nil } func (s *Stmt) delete(buf *file.Buffer) error { mapping, err := Parse(s.localPath, s.pkgs, lex.PascalCase(s.entity), s.kind) if err != nil { return err } table := mapping.TableName(s.entity, s.config["table"]) var where string if mapping.Type == ReferenceTable || mapping.Type == MapTable { where = "%s_id = ?" } if strings.HasPrefix(s.kind, "delete-by") { filters := strings.Split(s.kind[len("delete-by-"):], "-and-") conditions := make([]string, 0, len(filters)) for _, filter := range filters { field, err := mapping.FilterFieldByName(filter) if err != nil { return err } column, value, err := field.InsertColumn(mapping, table, s.defs, s.registeredSQLStmts) if err != nil { return err } if column == "" && value == "" { continue } conditions = append(conditions, fmt.Sprintf("%s = %s", column, value)) } where = strings.Join(conditions, " AND ") } sql := fmt.Sprintf(stmts["delete"], table, where) kind := strings.ReplaceAll(s.kind, "-", "_") stmtName := stmtCodeVar(s.entity, kind) if mapping.Type == ReferenceTable || mapping.Type == MapTable { buf.L("const %s = `%s`", stmtName, sql) } else { s.register(buf, stmtName, sql) } return nil } // Output a line of code that registers the given statement and declares the // associated statement code global variable. func (s *Stmt) register(buf *file.Buffer, stmtName, sql string) { s.registeredSQLStmts[stmtName] = sql if !strings.HasPrefix(sql, "`") || !strings.HasSuffix(sql, "`") { sql = fmt.Sprintf("`\n%s\n`", sql) } buf.L("var %s = RegisterStmt(%s)", stmtName, sql) } // Map of boilerplate statements. var stmts = map[string]string{ "names": "SELECT %s\n FROM %s\n ORDER BY %s", "objects": "SELECT %s\n FROM %s\n ORDER BY %s", "create": "INSERT INTO %s (%s)\n VALUES (%s)", "replace": "INSERT OR REPLACE INTO %s (%s)\n VALUES (%s)", "id": "SELECT %s.id FROM %s\n WHERE %s", "rename": "UPDATE %s SET name = ? WHERE %s", "update": "UPDATE %s\n SET %s\n WHERE %s", "delete": "DELETE FROM %s WHERE %s", } incus-6.0.4/cmd/generate-database/file/000077500000000000000000000000001477363751000176755ustar00rootroot00000000000000incus-6.0.4/cmd/generate-database/file/boilerplate/000077500000000000000000000000001477363751000221775ustar00rootroot00000000000000incus-6.0.4/cmd/generate-database/file/boilerplate/boilerplate.go000066400000000000000000000114301477363751000250270ustar00rootroot00000000000000package boilerplate import ( "context" "database/sql" "encoding/json" "errors" "fmt" ) type tx interface { //nolint:unused dbtx Commit() error Rollback() error } type dbtx interface { ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row } type preparer interface { Prepare(query string) (*sql.Stmt, error) } // RegisterStmt register a SQL statement. // // Registered statements will be prepared upfront and reused, to speed up // execution. // // Return a unique registration code. func RegisterStmt(sqlStmt string) int { code := len(stmts) stmts[code] = sqlStmt return code } // PrepareStmts prepares all registered statements and returns an index from // statement code to prepared statement object. func PrepareStmts(db preparer, skipErrors bool) (map[int]*sql.Stmt, error) { index := map[int]*sql.Stmt{} for code, sqlStmt := range stmts { stmt, err := db.Prepare(sqlStmt) if err != nil && !skipErrors { return nil, fmt.Errorf("%q: %w", sqlStmt, err) } index[code] = stmt } return index, nil } var stmts = map[int]string{} // Statement code to statement SQL text. // PreparedStmts is a placeholder for transitioning to package-scoped transaction functions. var PreparedStmts = map[int]*sql.Stmt{} // Stmt prepares the in-memory prepared statement for the transaction. func Stmt(db dbtx, code int) (*sql.Stmt, error) { stmt, ok := PreparedStmts[code] if !ok { return nil, fmt.Errorf("No prepared statement registered with code %d", code) } tx, ok := db.(*sql.Tx) if ok { return tx.Stmt(stmt), nil } return stmt, nil } // StmtString returns the in-memory query string with the given code. func StmtString(code int) (string, error) { stmt, ok := stmts[code] if !ok { return "", fmt.Errorf("No prepared statement registered with code %d", code) } return stmt, nil } var ( // ErrNotFound is the error returned, if the entity is not found in the DB. ErrNotFound = errors.New("Not found") // ErrConflict is the error returned, if the adding or updating an entity // causes a conflict with an existing entity. ErrConflict = errors.New("Conflict") ) var mapErr = defaultMapErr func defaultMapErr(err error, entity string) error { return err } // Marshaler is the interface that wraps the MarshalDB method, which converts // the underlying type into a string representation suitable for persistence in // the database. type Marshaler interface { MarshalDB() (string, error) } // Unmarshaler is the interface that wraps the UnmarshalDB method, which converts // a string representation retrieved from the database into the underlying type. type Unmarshaler interface { UnmarshalDB(string) error } func marshal(v any) (string, error) { marshaller, ok := v.(Marshaler) if !ok { return "", fmt.Errorf("Cannot marshal data, type does not implement DBMarshaler") } return marshaller.MarshalDB() } func unmarshal(data string, v any) error { if v == nil { return fmt.Errorf("Cannot unmarshal data into nil value") } unmarshaler, ok := v.(Unmarshaler) if !ok { return fmt.Errorf("Cannot marshal data, type does not implement DBUnmarshaler") } return unmarshaler.UnmarshalDB(data) } func marshalJSON(v any) (string, error) { marshalled, err := json.Marshal(v) if err != nil { return "", err } return string(marshalled), nil } func unmarshalJSON(data string, v any) error { return json.Unmarshal([]byte(data), v) } // dest is a function that is expected to return the objects to pass to the // 'dest' argument of sql.Rows.Scan(). It is invoked by SelectObjects once per // yielded row, and it will be passed the index of the row being scanned. type dest func(scan func(dest ...any) error) error // selectObjects executes a statement which must yield rows with a specific // columns schema. It invokes the given Dest hook for each yielded row. func selectObjects(ctx context.Context, stmt *sql.Stmt, rowFunc dest, args ...any) error { rows, err := stmt.QueryContext(ctx, args...) if err != nil { return err } defer func() { _ = rows.Close() }() for rows.Next() { err = rowFunc(rows.Scan) if err != nil { return err } } return rows.Err() } // scan runs a query with inArgs and provides the rowFunc with the scan function for each row. // It handles closing the rows and errors from the result set. func scan(ctx context.Context, db dbtx, sqlStmt string, rowFunc dest, inArgs ...any) error { rows, err := db.QueryContext(ctx, sqlStmt, inArgs...) if err != nil { return err } defer func() { _ = rows.Close() }() for rows.Next() { err = rowFunc(rows.Scan) if err != nil { return err } } return rows.Err() } incus-6.0.4/cmd/generate-database/file/boilerplate/boilerplate_test.go000066400000000000000000000004221477363751000260650ustar00rootroot00000000000000package boilerplate import ( "testing" ) func Test(t *testing.T) { // Fake the usage of the private variables and functions in the boilerplate. _ = mapErr _ = defaultMapErr _ = marshal _ = unmarshal _ = marshalJSON _ = unmarshalJSON _ = selectObjects _ = scan } incus-6.0.4/cmd/generate-database/file/buffer.go000066400000000000000000000013461477363751000215010ustar00rootroot00000000000000package file import ( "bytes" "fmt" "go/format" ) // Buffer for accumulating source code output. type Buffer struct { buf *bytes.Buffer } // Create a new source code text buffer. func newBuffer() *Buffer { return &Buffer{ buf: bytes.NewBuffer(nil), } } // L accumulates a single line of source code. func (b *Buffer) L(format string, a ...any) { fmt.Fprintf(b.buf, format, a...) b.N() } // N accumulates a single new line. func (b *Buffer) N() { fmt.Fprintf(b.buf, "\n") } // Returns the source code to add to the target file. func (b *Buffer) code() ([]byte, error) { code, err := format.Source(b.buf.Bytes()) if err != nil { return nil, fmt.Errorf("Can't format generated source code: %w", err) } return code, nil } incus-6.0.4/cmd/generate-database/file/doc.go000066400000000000000000000001271477363751000207710ustar00rootroot00000000000000// Package file contains helpers to write auto-generated Go source files. package file incus-6.0.4/cmd/generate-database/file/snippet.go000066400000000000000000000002731477363751000217100ustar00rootroot00000000000000package file // Snippet generates a single code snippet of a target source file code. type Snippet interface { Generate(buffer *Buffer) error GenerateSignature(buffer *Buffer) error } incus-6.0.4/cmd/generate-database/file/write.go000066400000000000000000000114021477363751000213540ustar00rootroot00000000000000package file import ( _ "embed" "errors" "fmt" "os" "strings" "github.com/lxc/incus/v6/cmd/generate-database/lex" ) const codeGeneratedByLine = `// Code generated by generate-database from the incus project - DO NOT EDIT.` //go:embed boilerplate/boilerplate.go var boilerplate string // Boilerplate writes the general boilerplate code for mapper to the target package. func Boilerplate(path string) error { boilerplate = strings.Replace(boilerplate, "package boilerplate", fmt.Sprintf("package %s", os.Getenv("GOPACKAGE")), 1) content := codeGeneratedByLine + "\n\n" + boilerplate bytes := []byte(content) var err error if path == "-" { _, err = os.Stdout.Write(bytes) } else { err = os.WriteFile(path, []byte(content), 0o644) } if err != nil { return fmt.Errorf("Mapper boilerplate file %q: %w", path, err) } return nil } // Reset an auto-generated source file, writing a new empty file header. func Reset(path string, imports []string, buildComment string, iface bool) error { // A new line needs to be appended after the build comment. if buildComment != "" { buildComment = fmt.Sprintf(`%s `, buildComment) } if iface { err := resetInterface(path, buildComment) if err != nil { return err } } content := fmt.Sprintf(`%s%s package %s import ( `, buildComment, codeGeneratedByLine, os.Getenv("GOPACKAGE")) for _, uri := range imports { content += fmt.Sprintf("\t%q\n", uri) } content += ")\n\n" bytes := []byte(content) var err error if path == "-" { _, err = os.Stdout.Write(bytes) } else { err = os.WriteFile(path, []byte(content), 0o644) } if err != nil { return fmt.Errorf("Reset target source file %q: %w", path, err) } return nil } func resetInterface(path string, buildComment string) error { if strings.HasSuffix(path, "mapper.go") { parts := strings.Split(path, ".") interfacePath := strings.Join(parts[:len(parts)-2], ".") + ".interface.mapper.go" content := fmt.Sprintf("%spackage %s", buildComment, os.Getenv("GOPACKAGE")) err := os.WriteFile(interfacePath, []byte(content), 0o644) return err } return nil } // Append a code snippet to a file. func Append(entity string, path string, snippet Snippet, iface bool) error { if iface { err := appendInterface(entity, path, snippet) if err != nil { return err } } buffer := newBuffer() buffer.N() err := snippet.Generate(buffer) if err != nil { return fmt.Errorf("Generate code snippet: %w", err) } var file *os.File if path == "-" { file = os.Stdout } else { file, err = os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0o644) if err != nil { return fmt.Errorf("Open target source code file %q: %w", path, err) } defer func() { _ = file.Close() }() } bytes, err := buffer.code() if err != nil { return err } _, err = file.Write(bytes) if err != nil { return fmt.Errorf("Append snippet to target source code file %q: %w", path, err) } // Return any errors on close if file is not stdout. if path != "-" { return file.Close() } return nil } func appendInterface(entity string, path string, snippet Snippet) error { if !strings.HasSuffix(path, ".mapper.go") { return nil } parts := strings.Split(path, ".") interfacePath := strings.Join(parts[:len(parts)-2], ".") + ".interface.mapper.go" stat, err := os.Stat(interfacePath) if err != nil { if errors.Is(err, os.ErrNotExist) { return nil } return fmt.Errorf("could not get file info for path %q: %w", interfacePath, err) } buffer := newBuffer() file, err := os.OpenFile(interfacePath, os.O_RDWR, 0o644) if err != nil { return fmt.Errorf("Open target source code file %q: %w", interfacePath, err) } defer func() { _ = file.Close() }() err = snippet.GenerateSignature(buffer) if err != nil { return fmt.Errorf("Generate interface snippet: %w", err) } bytes, err := buffer.code() if err != nil { return err } declaration := fmt.Sprintf("type %sGenerated interface {", lex.PascalCase(entity)) content := make([]byte, stat.Size()) _, err = file.Read(content) if err != nil { return fmt.Errorf("Could not read interface file %q: %w", interfacePath, err) } firstWrite := !strings.Contains(string(content), declaration) if firstWrite { // If this is the first signature write to the file, append the whole thing. _, err = file.WriteAt(bytes, stat.Size()) } else { // If an interface already exists, just append the method, omitting everything before the first '{'. startIndex := 0 for i := range bytes { // type ObjectGenerated interface { if string(bytes[i]) == "{" { startIndex = i + 1 break } } // overwrite the closing brace. _, err = file.WriteAt(bytes[startIndex:], stat.Size()-2) } if err != nil { return fmt.Errorf("Append snippet to target source code file %q: %w", interfacePath, err) } return file.Close() } incus-6.0.4/cmd/generate-database/lex/000077500000000000000000000000001477363751000175465ustar00rootroot00000000000000incus-6.0.4/cmd/generate-database/lex/case.go000066400000000000000000000040631477363751000210130ustar00rootroot00000000000000package lex import ( "bytes" "strings" "unicode" "golang.org/x/text/cases" "golang.org/x/text/language" ) // Capital capitalizes the given string ("foo" -> "Foo"). func Capital(s string) string { return cases.Title(language.English, cases.NoLower).String(s) } // Minuscule turns the first character to lower case ("Foo" -> "foo") or the whole word if it is all uppercase ("UUID" -> "uuid"). func Minuscule(s string) string { if strings.ToUpper(s) == s { return strings.ToLower(s) } return strings.ToLower(s[:1]) + s[1:] } // CamelCase converts to camel case ("foo_bar" -> "fooBar"). func CamelCase(s string) string { return Minuscule(PascalCase(s)) } // PascalCase converts to pascal case ("foo_bar" -> "FooBar"). func PascalCase(s string) string { words := strings.Split(s, "_") for i := range words { words[i] = Capital(words[i]) } return strings.Join(words, "") } // SnakeCase converts to snake case ("FooBar" -> "foo_bar"). func SnakeCase(name string) string { var ret bytes.Buffer multipleUpper := false var lastUpper rune var beforeUpper rune for _, c := range name { // Non-lowercase character after uppercase is considered to be uppercase too. isUpper := (unicode.IsUpper(c) || (lastUpper != 0 && !unicode.IsLower(c))) if lastUpper != 0 { // Output a delimiter if last character was either the // first uppercase character in a row, or the last one // in a row (e.g. 'S' in "HTTPServer"). Do not output // a delimiter at the beginning of the name. firstInRow := !multipleUpper lastInRow := !isUpper if ret.Len() > 0 && (firstInRow || lastInRow) && beforeUpper != '_' { ret.WriteByte('_') } ret.WriteRune(unicode.ToLower(lastUpper)) } // Buffer uppercase char, do not output it yet as a delimiter // may be required if the next character is lowercase. if isUpper { multipleUpper = (lastUpper != 0) lastUpper = c continue } ret.WriteRune(c) lastUpper = 0 beforeUpper = c multipleUpper = false } if lastUpper != 0 { ret.WriteRune(unicode.ToLower(lastUpper)) } return ret.String() } incus-6.0.4/cmd/generate-database/lex/config.go000066400000000000000000000005631477363751000213460ustar00rootroot00000000000000package lex import ( "fmt" "strings" ) // KeyValue extracts the key and value encoded in the given string and // separated by '=' (foo=bar -> foo, bar). func KeyValue(s string) (string, string, error) { parts := strings.Split(s, "=") if len(parts) != 2 { return "", "", fmt.Errorf("The token %q is not a key/value pair", s) } return parts[0], parts[1], nil } incus-6.0.4/cmd/generate-database/lex/form.go000066400000000000000000000013671477363751000210470ustar00rootroot00000000000000package lex import ( "strings" ) // Plural converts to plural form ("foo" -> "foos"). func Plural(s string) string { // TODO: smarter algorithm? :) if strings.HasSuffix(strings.ToLower(s), "config") { return s } if strings.HasSuffix(s, "ch") || strings.HasSuffix(s, "sh") || strings.HasSuffix(s, "ss") { return s + "es" } if s[len(s)-1] != 's' { return s + "s" } return s } // Singular converts to singular form ("foos" -> "foo"). func Singular(s string) string { // TODO: smarter algorithm? :) before, ok := strings.CutSuffix(s, "es") if ok && (strings.HasSuffix(before, "ch") || strings.HasSuffix(before, "sh") || strings.HasSuffix(before, "ss")) { return before } if s[len(s)-1] == 's' { return s[:len(s)-1] } return s } incus-6.0.4/cmd/generate-database/lex/lang.go000066400000000000000000000016121477363751000210160ustar00rootroot00000000000000package lex import ( "fmt" ) // VarDecl holds information about a variable declaration. type VarDecl struct { Name string Expr string } func (d VarDecl) String() string { return fmt.Sprintf("%s %s", d.Name, d.Expr) } // MethodSignature holds information about a method signature. type MethodSignature struct { Comment string // Method comment Name string // Method name Receiver VarDecl // Receiver name and type Args []VarDecl // Method arguments Return []string // Return type } // Slice returns the type name of a slice of items of the given type. func Slice(typ string) string { return fmt.Sprintf("[]%s", typ) } // Element is the reverse of Slice, returning the element type name the slice // with given type. func Element(typ string) string { return typ[len("[]"):] } // Star adds a "*" prefix to the given string. func Star(s string) string { return "*" + s } incus-6.0.4/cmd/generate-database/main.go000066400000000000000000000001731477363751000202320ustar00rootroot00000000000000package main import ( "os" ) func main() { root := newRoot() err := root.Execute() if err != nil { os.Exit(1) } } incus-6.0.4/cmd/generate-database/root.go000066400000000000000000000013321477363751000202670ustar00rootroot00000000000000package main import ( "fmt" "github.com/spf13/cobra" ) // Return a new root command. func newRoot() *cobra.Command { cmd := &cobra.Command{ Use: "generate-database", Short: "Code generation tool for Incus development", Long: `This is the entry point for all "go:generate" directives used in Incus' source code.`, RunE: func(cmd *cobra.Command, args []string) error { return fmt.Errorf("Not implemented") }, CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true}, } cmd.AddCommand(newDb()) // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } return cmd } incus-6.0.4/cmd/incus-agent/000077500000000000000000000000001477363751000156375ustar00rootroot00000000000000incus-6.0.4/cmd/incus-agent/api.go000066400000000000000000000010361477363751000167370ustar00rootroot00000000000000package main import ( "net/http" "github.com/lxc/incus/v6/internal/server/response" ) // APIEndpoint represents a URL in our API. type APIEndpoint struct { Name string // Name for this endpoint. Path string // Path pattern for this endpoint. Get APIEndpointAction Put APIEndpointAction Post APIEndpointAction Delete APIEndpointAction Patch APIEndpointAction } // APIEndpointAction represents an action on an API endpoint. type APIEndpointAction struct { Handler func(d *Daemon, r *http.Request) response.Response } incus-6.0.4/cmd/incus-agent/api_1.0.go000066400000000000000000000130241477363751000173150ustar00rootroot00000000000000package main import ( "encoding/json" "errors" "fmt" "io" "net/http" "os" "github.com/mdlayher/vsock" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/linux" "github.com/lxc/incus/v6/internal/ports" "github.com/lxc/incus/v6/internal/server/response" localvsock "github.com/lxc/incus/v6/internal/server/vsock" "github.com/lxc/incus/v6/internal/version" "github.com/lxc/incus/v6/shared/api" agentAPI "github.com/lxc/incus/v6/shared/api/agent" "github.com/lxc/incus/v6/shared/logger" localtls "github.com/lxc/incus/v6/shared/tls" ) var api10Cmd = APIEndpoint{ Get: APIEndpointAction{Handler: api10Get}, Put: APIEndpointAction{Handler: api10Put}, } var api10 = []APIEndpoint{ api10Cmd, execCmd, eventsCmd, metricsCmd, operationsCmd, operationCmd, operationWebsocket, operationWait, sftpCmd, stateCmd, } func api10Get(d *Daemon, r *http.Request) response.Response { srv := api.ServerUntrusted{ APIExtensions: version.APIExtensions, APIStatus: "stable", APIVersion: version.APIVersion, Public: false, Auth: "trusted", AuthMethods: []string{api.AuthenticationMethodTLS}, } uname, err := linux.Uname() if err != nil { return response.InternalError(err) } serverName, err := os.Hostname() if err != nil { return response.SmartError(err) } env := api.ServerEnvironment{ Kernel: uname.Sysname, KernelArchitecture: uname.Machine, KernelVersion: uname.Release, Server: "incus-agent", ServerPid: os.Getpid(), ServerVersion: version.Version, ServerName: serverName, } fullSrv := api.Server{ServerUntrusted: srv} fullSrv.Environment = env return response.SyncResponseETag(true, fullSrv, fullSrv) } func setConnectionInfo(d *Daemon, rd io.Reader) error { var data agentAPI.API10Put err := json.NewDecoder(rd).Decode(&data) if err != nil { return err } d.DevIncusMu.Lock() d.serverCID = data.CID d.serverPort = data.Port d.serverCertificate = data.Certificate d.DevIncusEnabled = data.DevIncus d.DevIncusMu.Unlock() return nil } func api10Put(d *Daemon, r *http.Request) response.Response { err := setConnectionInfo(d, r.Body) if err != nil { return response.ErrorResponse(http.StatusInternalServerError, err.Error()) } // Try connecting to the host. client, err := getClient(d.serverCID, int(d.serverPort), d.serverCertificate) if err != nil { return response.ErrorResponse(http.StatusInternalServerError, err.Error()) } server, err := incus.ConnectIncusHTTP(nil, client) if err != nil { return response.ErrorResponse(http.StatusInternalServerError, err.Error()) } defer server.Disconnect() // Let the host know, we were able to connect successfully. d.chConnected <- struct{}{} if d.DevIncusEnabled { err = startDevIncusServer(d) } else { err = stopDevIncusServer(d) } if err != nil { return response.ErrorResponse(http.StatusInternalServerError, err.Error()) } return response.EmptySyncResponse } func startDevIncusServer(d *Daemon) error { d.DevIncusMu.Lock() defer d.DevIncusMu.Unlock() // If a DevIncus server is already running, don't start a second one. if d.DevIncusRunning { return nil } servers["DevIncus"] = devIncusServer(d) // Prepare the DevIncus server. DevIncusListener, err := createDevIncuslListener("/dev") if err != nil { return err } d.DevIncusRunning = true // Start the DevIncus listener. go func() { err := servers["DevIncus"].Serve(DevIncusListener) if err != nil { d.DevIncusMu.Lock() d.DevIncusRunning = false d.DevIncusMu.Unlock() // http.ErrServerClosed can be ignored as this is returned when the server is closed intentionally. if !errors.Is(err, http.ErrServerClosed) { errChan <- err } } }() return nil } func stopDevIncusServer(d *Daemon) error { d.DevIncusMu.Lock() d.DevIncusRunning = false d.DevIncusMu.Unlock() if servers["DevIncus"] != nil { return servers["DevIncus"].Close() } return nil } func getClient(CID uint32, port int, serverCertificate string) (*http.Client, error) { agentCert, err := os.ReadFile("agent.crt") if err != nil { return nil, err } agentKey, err := os.ReadFile("agent.key") if err != nil { return nil, err } client, err := localvsock.HTTPClient(CID, port, string(agentCert), string(agentKey), serverCertificate) if err != nil { return nil, err } return client, nil } func startHTTPServer(d *Daemon, debug bool) error { const CIDAny uint32 = 4294967295 // Equivalent to VMADDR_CID_ANY. // Setup the listener on wildcard CID for inbound connections from Incus. // We use the VMADDR_CID_ANY CID so that if the VM's CID changes in the future the listener still works. // A CID change can occur when restoring a stateful VM that was previously using one CID but is // subsequently restored using a different one. l, err := vsock.ListenContextID(CIDAny, ports.HTTPSDefaultPort, nil) if err != nil { return fmt.Errorf("Failed to listen on vsock: %w", err) } logger.Info("Started vsock listener") // Load the expected server certificate. cert, err := localtls.ReadCert("server.crt") if err != nil { return fmt.Errorf("Failed to read client certificate: %w", err) } tlsConfig, err := serverTLSConfig() if err != nil { return fmt.Errorf("Failed to get TLS config: %w", err) } // Prepare the HTTP server. servers["http"] = restServer(tlsConfig, cert, debug, d) // Start the server. go func() { err := servers["http"].Serve(networkTLSListener(l, tlsConfig)) if !errors.Is(err, http.ErrServerClosed) { errChan <- err } l.Close() }() return nil } incus-6.0.4/cmd/incus-agent/daemon.go000066400000000000000000000014201477363751000174260ustar00rootroot00000000000000package main import ( "sync" "github.com/lxc/incus/v6/internal/server/events" ) // A Daemon can respond to requests from a shared client. type Daemon struct { // Event servers events *events.Server // ContextID and port of the host socket server. serverCID uint32 serverPort uint32 serverCertificate string // The channel which is used to indicate that the agent was able to connect to the host. chConnected chan struct{} DevIncusRunning bool DevIncusMu sync.Mutex DevIncusEnabled bool } // newDaemon returns a new Daemon object with the given configuration. func newDaemon(debug, verbose bool) *Daemon { hostEvents := events.NewServer(debug, verbose, nil) return &Daemon{ events: hostEvents, chConnected: make(chan struct{}), } } incus-6.0.4/cmd/incus-agent/dev_incus.go000066400000000000000000000222101477363751000201420ustar00rootroot00000000000000package main import ( "fmt" "net" "net/http" "net/url" "os" "path/filepath" "strings" "time" "github.com/gorilla/mux" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/server/daemon" "github.com/lxc/incus/v6/internal/server/device/config" localUtil "github.com/lxc/incus/v6/internal/server/util" api "github.com/lxc/incus/v6/shared/api/guest" "github.com/lxc/incus/v6/shared/logger" "github.com/lxc/incus/v6/shared/util" ) // DevIncusServer creates an http.Server capable of handling requests against the // /dev/incus Unix socket endpoint created inside VMs. func devIncusServer(d *Daemon) *http.Server { return &http.Server{ Handler: devIncusAPI(d), } } type devIncusHandler struct { path string /* * This API will have to be changed slightly when we decide to support * websocket events upgrading, but since we don't have events on the * server side right now either, I went the simple route to avoid * needless noise. */ f func(d *Daemon, w http.ResponseWriter, r *http.Request) *devIncusResponse } func getVsockClient(d *Daemon) (incus.InstanceServer, error) { // Try connecting to the host. client, err := getClient(d.serverCID, int(d.serverPort), d.serverCertificate) if err != nil { return nil, err } server, err := incus.ConnectIncusHTTP(nil, client) if err != nil { return nil, err } return server, nil } var DevIncusConfigGet = devIncusHandler{"/1.0/config", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devIncusResponse { client, err := getVsockClient(d) if err != nil { return smartResponse(fmt.Errorf("Failed connecting to the host over vsock: %w", err)) } defer client.Disconnect() resp, _, err := client.RawQuery("GET", "/1.0/config", nil, "") if err != nil { return smartResponse(err) } var config []string err = resp.MetadataAsStruct(&config) if err != nil { return smartResponse(fmt.Errorf("Failed parsing response from host: %w", err)) } filtered := []string{} for _, k := range config { if strings.HasPrefix(k, "/1.0/config/user.") || strings.HasPrefix(k, "/1.0/config/cloud-init.") { filtered = append(filtered, k) } } return okResponse(filtered, "json") }} var DevIncusConfigKeyGet = devIncusHandler{"/1.0/config/{key}", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devIncusResponse { key, err := url.PathUnescape(mux.Vars(r)["key"]) if err != nil { return &devIncusResponse{"bad request", http.StatusBadRequest, "raw"} } if !strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "cloud-init.") { return &devIncusResponse{"not authorized", http.StatusForbidden, "raw"} } client, err := getVsockClient(d) if err != nil { return smartResponse(fmt.Errorf("Failed connecting to host over vsock: %w", err)) } defer client.Disconnect() resp, _, err := client.RawQuery("GET", fmt.Sprintf("/1.0/config/%s", key), nil, "") if err != nil { return smartResponse(err) } var value string err = resp.MetadataAsStruct(&value) if err != nil { return smartResponse(fmt.Errorf("Failed parsing response from host: %w", err)) } return okResponse(value, "raw") }} var DevIncusMetadataGet = devIncusHandler{"/1.0/meta-data", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devIncusResponse { var client incus.InstanceServer var err error for i := 0; i < 10; i++ { client, err = getVsockClient(d) if err == nil { break } time.Sleep(500 * time.Millisecond) } if err != nil { return smartResponse(fmt.Errorf("Failed connecting to host over vsock: %w", err)) } defer client.Disconnect() resp, _, err := client.RawQuery("GET", "/1.0/meta-data", nil, "") if err != nil { return smartResponse(err) } var metaData string err = resp.MetadataAsStruct(&metaData) if err != nil { return smartResponse(fmt.Errorf("Failed parsing response from host: %w", err)) } return okResponse(metaData, "raw") }} var devIncusEventsGet = devIncusHandler{"/1.0/events", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devIncusResponse { err := eventsGet(d, r).Render(w) if err != nil { return smartResponse(err) } return okResponse("", "raw") }} var DevIncusAPIGet = devIncusHandler{"/1.0", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devIncusResponse { client, err := getVsockClient(d) if err != nil { return smartResponse(fmt.Errorf("Failed connecting to host over vsock: %w", err)) } defer client.Disconnect() if r.Method == "GET" { resp, _, err := client.RawQuery(r.Method, "/1.0", nil, "") if err != nil { return smartResponse(err) } var instanceData api.DevIncusGet err = resp.MetadataAsStruct(&instanceData) if err != nil { return smartResponse(fmt.Errorf("Failed parsing response from host: %w", err)) } return okResponse(instanceData, "json") } else if r.Method == "PATCH" { _, _, err := client.RawQuery(r.Method, "/1.0", r.Body, "") if err != nil { return smartResponse(err) } return okResponse("", "raw") } return &devIncusResponse{fmt.Sprintf("method %q not allowed", r.Method), http.StatusBadRequest, "raw"} }} var DevIncusDevicesGet = devIncusHandler{"/1.0/devices", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devIncusResponse { client, err := getVsockClient(d) if err != nil { return smartResponse(fmt.Errorf("Failed connecting to host over vsock: %w", err)) } defer client.Disconnect() resp, _, err := client.RawQuery("GET", "/1.0/devices", nil, "") if err != nil { return smartResponse(err) } var devices config.Devices err = resp.MetadataAsStruct(&devices) if err != nil { return smartResponse(fmt.Errorf("Failed parsing response from host: %w", err)) } return okResponse(devices, "json") }} var handlers = []devIncusHandler{ {"/", func(d *Daemon, w http.ResponseWriter, r *http.Request) *devIncusResponse { return okResponse([]string{"/1.0"}, "json") }}, DevIncusAPIGet, DevIncusConfigGet, DevIncusConfigKeyGet, DevIncusMetadataGet, devIncusEventsGet, DevIncusDevicesGet, } func hoistReq(f func(*Daemon, http.ResponseWriter, *http.Request) *devIncusResponse, d *Daemon) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { resp := f(d, w, r) if resp.code != http.StatusOK { http.Error(w, fmt.Sprintf("%s", resp.content), resp.code) } else if resp.ctype == "json" { w.Header().Set("Content-Type", "application/json") var debugLogger logger.Logger if daemon.Debug { debugLogger = logger.Logger(logger.Log) } _ = localUtil.WriteJSON(w, resp.content, debugLogger) } else if resp.ctype != "websocket" { w.Header().Set("Content-Type", "application/octet-stream") _, _ = fmt.Fprint(w, resp.content.(string)) } } } func devIncusAPI(d *Daemon) http.Handler { m := mux.NewRouter() m.UseEncodedPath() // Allow encoded values in path segments. for _, handler := range handlers { m.HandleFunc(handler.path, hoistReq(handler.f, d)) } return m } // Create a new net.Listener bound to the unix socket of the DevIncus endpoint. func createDevIncuslListener(dir string) (net.Listener, error) { path := filepath.Join(dir, "incus", "sock") err := os.MkdirAll(filepath.Dir(path), 0o755) if err != nil { return nil, err } // Add a symlink for legacy support. err = os.Symlink(filepath.Join(dir, "incus"), filepath.Join(dir, "lxd")) if err != nil && !os.IsExist(err) { return nil, err } // If this socket exists, that means a previous agent instance died and // didn't clean up. We assume that such agent instance is actually dead // if we get this far, since localCreateListener() tries to connect to // the actual incus socket to make sure that it is actually dead. So, it // is safe to remove it here without any checks. // // Also, it would be nice to SO_REUSEADDR here so we don't have to // delete the socket, but we can't: // http://stackoverflow.com/questions/15716302/so-reuseaddr-and-af-unix // // Note that this will force clients to reconnect when the daemon is restarted. err = socketUnixRemoveStale(path) if err != nil { return nil, err } listener, err := socketUnixListen(path) if err != nil { return nil, err } err = socketUnixSetPermissions(path, 0o600) if err != nil { _ = listener.Close() return nil, err } return listener, nil } // Remove any stale socket file at the given path. func socketUnixRemoveStale(path string) error { // If there's no socket file at all, there's nothing to do. if !util.PathExists(path) { return nil } logger.Debugf("Detected stale unix socket, deleting") err := os.Remove(path) if err != nil { return fmt.Errorf("could not delete stale local socket: %w", err) } return nil } // Change the file mode of the given unix socket file. func socketUnixSetPermissions(path string, mode os.FileMode) error { err := os.Chmod(path, mode) if err != nil { return fmt.Errorf("cannot set permissions on local socket: %w", err) } return nil } // Bind to the given unix socket path. func socketUnixListen(path string) (net.Listener, error) { addr, err := net.ResolveUnixAddr("unix", path) if err != nil { return nil, fmt.Errorf("cannot resolve socket address: %w", err) } listener, err := net.ListenUnix("unix", addr) if err != nil { return nil, fmt.Errorf("cannot bind socket: %w", err) } return listener, err } incus-6.0.4/cmd/incus-agent/events.go000066400000000000000000000072411477363751000174760ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "net/http" "strings" "github.com/lxc/incus/v6/internal/server/events" "github.com/lxc/incus/v6/internal/server/response" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/logger" "github.com/lxc/incus/v6/shared/ws" ) var eventsCmd = APIEndpoint{ Path: "events", Get: APIEndpointAction{Handler: eventsGet}, Post: APIEndpointAction{Handler: eventsPost}, } type eventsServe struct { req *http.Request d *Daemon } func (r *eventsServe) Render(w http.ResponseWriter) error { return eventsSocket(r.d, r.req, w) } func (r *eventsServe) String() string { return "event handler" } // Code returns the HTTP code. func (r *eventsServe) Code() int { return http.StatusOK } func eventsSocket(d *Daemon, r *http.Request, w http.ResponseWriter) error { typeStr := r.FormValue("type") if typeStr == "" { // We add 'config' here to allow listeners on /dev/incus/sock to receive config changes. typeStr = "logging,operation,lifecycle,config,device" } var listenerConnection events.EventListenerConnection // If the client has not requested a websocket connection then fallback to long polling event stream mode. if r.Header.Get("Upgrade") == "websocket" { // Upgrade the connection to websocket conn, err := ws.Upgrader.Upgrade(w, r, nil) if err != nil { return err } defer func() { _ = conn.Close() }() // Ensure listener below ends when this function ends. listenerConnection = events.NewWebsocketListenerConnection(conn) } else { h, ok := w.(http.Hijacker) if !ok { return fmt.Errorf("Missing implemented http.Hijacker interface") } conn, _, err := h.Hijack() if err != nil { return err } defer func() { _ = conn.Close() }() // Ensure listener below ends when this function ends. listenerConnection, err = events.NewStreamListenerConnection(conn) if err != nil { return err } } // As we don't know which project we are in, subscribe to events from all projects. listener, err := d.events.AddListener("", true, nil, listenerConnection, strings.Split(typeStr, ","), nil, nil, nil) if err != nil { return err } listener.Wait(r.Context()) return nil } func eventsGet(d *Daemon, r *http.Request) response.Response { return &eventsServe{req: r, d: d} } func eventsPost(d *Daemon, r *http.Request) response.Response { var event api.Event err := json.NewDecoder(r.Body).Decode(&event) if err != nil { return response.InternalError(err) } err = d.events.Send("", event.Type, event.Metadata) if err != nil { return response.InternalError(err) } // Handle device related actions locally. go eventsProcess(event) return response.SyncResponse(true, nil) } func eventsProcess(event api.Event) { // We currently only need to react to device events. if event.Type != "device" { return } type deviceEvent struct { Action string `json:"action"` Config map[string]string `json:"config"` Name string `json:"name"` } e := deviceEvent{} err := json.Unmarshal(event.Metadata, &e) if err != nil { return } // Only care about device additions, we don't try to handle remove. if e.Action != "added" { return } // We only handle disk hotplug. if e.Config["type"] != "disk" { return } // And only for path based devices. if e.Config["path"] == "" { return } // Attempt to perform the mount. mntSource := fmt.Sprintf("incus_%s", e.Name) err = tryMountShared(mntSource, e.Config["path"], "virtiofs", nil) if err != nil { logger.Infof("Failed to mount hotplug %q (Type: %q) to %q", mntSource, "virtiofs", e.Config["path"]) return } logger.Infof("Mounted hotplug %q (Type: %q) to %q", mntSource, "virtiofs", e.Config["path"]) } incus-6.0.4/cmd/incus-agent/exec.go000066400000000000000000000275631477363751000171270ustar00rootroot00000000000000package main import ( "context" "encoding/json" "errors" "fmt" "io" "io/fs" "net/http" "os" "os/exec" "strconv" "sync" "syscall" "time" "github.com/gorilla/websocket" "golang.org/x/sys/unix" "github.com/lxc/incus/v6/internal/jmap" "github.com/lxc/incus/v6/internal/linux" "github.com/lxc/incus/v6/internal/server/db/operationtype" "github.com/lxc/incus/v6/internal/server/operations" "github.com/lxc/incus/v6/internal/server/response" internalUtil "github.com/lxc/incus/v6/internal/util" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/logger" "github.com/lxc/incus/v6/shared/util" "github.com/lxc/incus/v6/shared/ws" ) const ( execWSControl = -1 execWSStdin = 0 execWSStdout = 1 execWSStderr = 2 ) var execCmd = APIEndpoint{ Name: "exec", Path: "exec", Post: APIEndpointAction{Handler: execPost}, } func execPost(d *Daemon, r *http.Request) response.Response { post := api.InstanceExecPost{} buf, err := io.ReadAll(r.Body) if err != nil { return response.BadRequest(err) } err = json.Unmarshal(buf, &post) if err != nil { return response.BadRequest(err) } if !post.WaitForWS { return response.BadRequest(fmt.Errorf("Websockets are required for VM exec")) } env := map[string]string{} if post.Environment != nil { for k, v := range post.Environment { env[k] = v } } // Set default value for PATH _, ok := env["PATH"] if !ok { env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" } if util.PathExists("/snap/bin") { env["PATH"] = fmt.Sprintf("%s:/snap/bin", env["PATH"]) } // If running as root, set some env variables if post.User == 0 { // Set default value for HOME _, ok = env["HOME"] if !ok { env["HOME"] = "/root" } // Set default value for USER _, ok = env["USER"] if !ok { env["USER"] = "root" } } // Set default value for LANG _, ok = env["LANG"] if !ok { env["LANG"] = "C.UTF-8" } // Set the default working directory if post.Cwd == "" { post.Cwd = env["HOME"] if post.Cwd == "" { post.Cwd = "/" } } ws := &execWs{} ws.fds = map[int]string{} ws.conns = map[int]*websocket.Conn{} ws.conns[execWSControl] = nil ws.conns[0] = nil // This is used for either TTY or Stdin. if !post.Interactive { ws.conns[execWSStdout] = nil ws.conns[execWSStderr] = nil } ws.requiredConnectedCtx, ws.requiredConnectedDone = context.WithCancel(context.Background()) ws.interactive = post.Interactive for i := range ws.conns { ws.fds[i], err = internalUtil.RandomHexString(32) if err != nil { return response.InternalError(err) } } ws.command = post.Command ws.env = env ws.width = post.Width ws.height = post.Height ws.cwd = post.Cwd ws.uid = post.User ws.gid = post.Group resources := map[string][]api.URL{} op, err := operations.OperationCreate(nil, "", operations.OperationClassWebsocket, operationtype.CommandExec, resources, ws.Metadata(), ws.Do, nil, ws.Connect, r) if err != nil { return response.InternalError(err) } // Link the operation to the agent's event server. op.SetEventServer(d.events) return operations.OperationResponse(op) } type execWs struct { command []string env map[string]string conns map[int]*websocket.Conn connsLock sync.Mutex requiredConnectedCtx context.Context requiredConnectedDone func() interactive bool fds map[int]string width int height int uid uint32 gid uint32 cwd string } func (s *execWs) Metadata() any { fds := jmap.Map{} for fd, secret := range s.fds { if fd == execWSControl { fds[api.SecretNameControl] = secret } else { fds[strconv.Itoa(fd)] = secret } } return jmap.Map{ "fds": fds, "command": s.command, "environment": s.env, "interactive": s.interactive, } } func (s *execWs) Connect(op *operations.Operation, r *http.Request, w http.ResponseWriter) error { secret := r.FormValue("secret") if secret == "" { return fmt.Errorf("missing secret") } for fd, fdSecret := range s.fds { if secret == fdSecret { conn, err := ws.Upgrader.Upgrade(w, r, nil) if err != nil { return err } s.connsLock.Lock() defer s.connsLock.Unlock() val, found := s.conns[fd] if found && val == nil { s.conns[fd] = conn for _, c := range s.conns { if c == nil { return nil // Not all required connections connected yet. } } s.requiredConnectedDone() // All required connections now connected. return nil } else if !found { return fmt.Errorf("Unknown websocket number") } else { return fmt.Errorf("Websocket number already connected") } } } /* If we didn't find the right secret, the user provided a bad one, * which 403, not 404, since this Operation actually exists */ return os.ErrPermission } func (s *execWs) Do(op *operations.Operation) error { // Once this function ends ensure that any connected websockets are closed. defer func() { s.connsLock.Lock() for i := range s.conns { if s.conns[i] != nil { _ = s.conns[i].Close() } } s.connsLock.Unlock() }() // As this function only gets called when the exec request has WaitForWS enabled, we expect the client to // connect to all of the required websockets within a short period of time and we won't proceed until then. logger.Debug("Waiting for exec websockets to connect") select { case <-s.requiredConnectedCtx.Done(): break case <-time.After(time.Second * 5): return fmt.Errorf("Timed out waiting for websockets to connect") } var err error var ttys []*os.File var ptys []*os.File var stdin *os.File var stdout *os.File var stderr *os.File if s.interactive { ttys = make([]*os.File, 1) ptys = make([]*os.File, 1) ptys[0], ttys[0], err = linux.OpenPty(int64(s.uid), int64(s.gid)) if err != nil { return err } stdin = ttys[0] stdout = ttys[0] stderr = ttys[0] if s.width > 0 && s.height > 0 { _ = linux.SetPtySize(int(ptys[0].Fd()), s.width, s.height) } } else { ttys = make([]*os.File, 3) ptys = make([]*os.File, 3) for i := 0; i < len(ttys); i++ { ptys[i], ttys[i], err = os.Pipe() if err != nil { return err } } stdin = ptys[execWSStdin] stdout = ttys[execWSStdout] stderr = ttys[execWSStderr] } waitAttachedChildIsDead, markAttachedChildIsDead := context.WithCancel(context.Background()) var wgEOF sync.WaitGroup finisher := func(cmdResult int, cmdErr error) error { // Cancel this before closing the control connection so control handler can detect command ending. markAttachedChildIsDead() for _, tty := range ttys { _ = tty.Close() } s.connsLock.Lock() conn := s.conns[-1] s.connsLock.Unlock() if conn != nil { _ = conn.Close() // Close control connection (will cause control go routine to end). } wgEOF.Wait() for _, pty := range ptys { _ = pty.Close() } metadata := jmap.Map{"return": cmdResult} err = op.UpdateMetadata(metadata) if err != nil { return err } return cmdErr } var cmd *exec.Cmd if len(s.command) > 1 { cmd = exec.Command(s.command[0], s.command[1:]...) } else { cmd = exec.Command(s.command[0]) } // Prepare the environment for k, v := range s.env { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) } cmd.Stdin = stdin cmd.Stdout = stdout cmd.Stderr = stderr cmd.SysProcAttr = &syscall.SysProcAttr{ Credential: &syscall.Credential{ Uid: s.uid, Gid: s.gid, }, // Creates a new session if the calling process is not a process group leader. // The calling process is the leader of the new session, the process group leader of // the new process group, and has no controlling terminal. // This is important to allow remote shells to handle ctrl+c. Setsid: true, } // Make the given terminal the controlling terminal of the calling process. // The calling process must be a session leader and not have a controlling terminal already. // This is important as allows ctrl+c to work as expected for non-shell programs. if s.interactive { cmd.SysProcAttr.Setctty = true } cmd.Dir = s.cwd err = cmd.Start() if err != nil { exitStatus := -1 if errors.Is(err, exec.ErrNotFound) || errors.Is(err, fs.ErrNotExist) { exitStatus = 127 } else if errors.Is(err, fs.ErrPermission) { exitStatus = 126 } return finisher(exitStatus, err) } l := logger.AddContext(logger.Ctx{"PID": cmd.Process.Pid, "interactive": s.interactive}) l.Debug("Instance process started") wgEOF.Add(1) go func() { defer wgEOF.Done() l.Debug("Exec control handler started") defer l.Debug("Exec control handler finished") s.connsLock.Lock() conn := s.conns[-1] s.connsLock.Unlock() for { mt, r, err := conn.NextReader() if err != nil || mt == websocket.CloseMessage { // Check if command process has finished normally, if so, no need to kill it. if waitAttachedChildIsDead.Err() != nil { return } if mt == websocket.CloseMessage { l.Warn("Got exec control websocket close message, killing command") } else { l.Warn("Failed getting exec control websocket reader, killing command", logger.Ctx{"err": err}) } err := unix.Kill(cmd.Process.Pid, unix.SIGKILL) if err != nil { l.Error("Failed to send SIGKILL") } else { l.Info("Sent SIGKILL") } return } buf, err := io.ReadAll(r) if err != nil { // Check if command process has finished normally, if so, no need to kill it. if waitAttachedChildIsDead.Err() != nil { return } l.Warn("Failed reading control websocket message, killing command", logger.Ctx{"err": err}) return } command := api.InstanceExecControl{} err = json.Unmarshal(buf, &command) if err != nil { l.Debug("Failed to unmarshal control socket command", logger.Ctx{"err": err}) continue } if command.Command == "window-resize" && s.interactive { winchWidth, err := strconv.Atoi(command.Args["width"]) if err != nil { l.Debug("Unable to extract window width", logger.Ctx{"err": err}) continue } winchHeight, err := strconv.Atoi(command.Args["height"]) if err != nil { l.Debug("Unable to extract window height", logger.Ctx{"err": err}) continue } err = linux.SetPtySize(int(ptys[0].Fd()), winchWidth, winchHeight) if err != nil { l.Debug("Failed to set window size", logger.Ctx{"err": err, "width": winchWidth, "height": winchHeight}) continue } } else if command.Command == "signal" { err := unix.Kill(cmd.Process.Pid, unix.Signal(command.Signal)) if err != nil { l.Debug("Failed forwarding signal", logger.Ctx{"err": err, "signal": command.Signal}) continue } l.Info("Forwarded signal", logger.Ctx{"signal": command.Signal}) } } }() if s.interactive { wgEOF.Add(1) go func() { defer wgEOF.Done() l.Debug("Exec mirror websocket started", logger.Ctx{"number": 0}) defer l.Debug("Exec mirror websocket finished", logger.Ctx{"number": 0}) s.connsLock.Lock() conn := s.conns[0] s.connsLock.Unlock() readDone, writeDone := ws.Mirror(conn, linux.NewExecWrapper(waitAttachedChildIsDead, ptys[0])) <-readDone <-writeDone _ = conn.Close() }() } else { wgEOF.Add(len(ttys) - 1) for i := 0; i < len(ttys); i++ { go func(i int) { l.Debug("Exec mirror websocket started", logger.Ctx{"number": i}) defer l.Debug("Exec mirror websocket finished", logger.Ctx{"number": i}) if i == 0 { s.connsLock.Lock() conn := s.conns[i] s.connsLock.Unlock() <-ws.MirrorWrite(conn, ttys[i]) _ = ttys[i].Close() } else { s.connsLock.Lock() conn := s.conns[i] s.connsLock.Unlock() <-ws.MirrorRead(conn, ptys[i]) _ = ptys[i].Close() wgEOF.Done() } }(i) } } exitStatus, err := linux.ExitStatus(cmd.Wait()) l.Debug("Instance process stopped", logger.Ctx{"err": err, "exitStatus": exitStatus}) return finisher(exitStatus, nil) } incus-6.0.4/cmd/incus-agent/main.go000066400000000000000000000021621477363751000171130ustar00rootroot00000000000000package main import ( "os" "github.com/spf13/cobra" "github.com/lxc/incus/v6/internal/version" ) type cmdGlobal struct { flagVersion bool flagHelp bool flagLogVerbose bool flagLogDebug bool } func main() { // agent command (main) agentCmd := cmdAgent{} app := agentCmd.Command() app.SilenceUsage = true app.CompletionOptions = cobra.CompletionOptions{DisableDefaultCmd: true} // Workaround for main command app.Args = cobra.ArbitraryArgs // Global flags globalCmd := cmdGlobal{} agentCmd.global = &globalCmd app.PersistentFlags().BoolVar(&globalCmd.flagVersion, "version", false, "Print version number") app.PersistentFlags().BoolVarP(&globalCmd.flagHelp, "help", "h", false, "Print help") app.PersistentFlags().BoolVarP(&globalCmd.flagLogVerbose, "verbose", "v", false, "Show all information messages") app.PersistentFlags().BoolVarP(&globalCmd.flagLogDebug, "debug", "d", false, "Show all debug messages") // Version handling app.SetVersionTemplate("{{.Version}}\n") app.Version = version.Version // Run the main command and handle errors err := app.Execute() if err != nil { os.Exit(1) } } incus-6.0.4/cmd/incus-agent/main_agent.go000066400000000000000000000212451477363751000202740ustar00rootroot00000000000000package main import ( "context" "encoding/json" "fmt" "io" "net/http" "os" "os/signal" "slices" "strings" "sync" "time" "github.com/spf13/cobra" "golang.org/x/sys/unix" "github.com/lxc/incus/v6/internal/linux" "github.com/lxc/incus/v6/internal/server/instance/instancetype" "github.com/lxc/incus/v6/shared/logger" "github.com/lxc/incus/v6/shared/subprocess" "github.com/lxc/incus/v6/shared/util" ) var ( servers = make(map[string]*http.Server, 2) errChan = make(chan error) ) type cmdAgent struct { global *cmdGlobal } func (c *cmdAgent) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "incus-agent [--debug]" cmd.Short = "Incus virtual machine agent" cmd.Long = `Description: Incus virtual machine agent This daemon is to be run inside virtual machines managed by Incus. It will normally be started through init scripts present or injected into the virtual machine. ` cmd.RunE = c.Run return cmd } func (c *cmdAgent) Run(cmd *cobra.Command, args []string) error { // Setup logger. err := logger.InitLogger("", "", c.global.flagLogVerbose, c.global.flagLogDebug, nil) if err != nil { os.Exit(1) } logger.Info("Starting") defer logger.Info("Stopped") // Apply the templated files. files, err := templatesApply("files/") if err != nil { return err } // Sync the hostname. if util.PathExists("/proc/sys/kernel/hostname") && slices.Contains(files, "/etc/hostname") { // Open the two files. src, err := os.Open("/etc/hostname") if err != nil { return err } dst, err := os.Create("/proc/sys/kernel/hostname") if err != nil { return err } // Copy the data. _, err = io.Copy(dst, src) if err != nil { return err } // Close the files. _ = src.Close() err = dst.Close() if err != nil { return err } } // Run cloud-init. if util.PathExists("/etc/cloud") && slices.Contains(files, "/var/lib/cloud/seed/nocloud-net/meta-data") { logger.Info("Seeding cloud-init") cloudInitPath := "/run/cloud-init" if util.PathExists(cloudInitPath) { logger.Info(fmt.Sprintf("Removing %q", cloudInitPath)) err = os.RemoveAll(cloudInitPath) if err != nil { return err } } logger.Info("Rebooting") _, _ = subprocess.RunCommand("reboot") // Wait up to 5min for the reboot to actually happen, if it doesn't, then move on to allowing connections. time.Sleep(300 * time.Second) } reconfigureNetworkInterfaces() // Load the kernel driver. if !util.PathExists("/dev/vsock") { logger.Info("Loading vsock module") err = linux.LoadModule("vsock") if err != nil { return fmt.Errorf("Unable to load the vsock kernel module: %w", err) } // Wait for vsock device to appear. for i := 0; i < 5; i++ { if !util.PathExists("/dev/vsock") { time.Sleep(1 * time.Second) } } } // Mount shares from host. c.mountHostShares() d := newDaemon(c.global.flagLogDebug, c.global.flagLogVerbose) // Start the server. err = startHTTPServer(d, c.global.flagLogDebug) if err != nil { return fmt.Errorf("Failed to start HTTP server: %w", err) } // Check whether we should start the DevIncus server in the early setup. This way, /dev/incus/sock // will be available for any systemd services starting after the agent. if util.PathExists("agent.conf") { f, err := os.Open("agent.conf") if err != nil { return err } err = setConnectionInfo(d, f) if err != nil { _ = f.Close() return err } _ = f.Close() if d.DevIncusEnabled { err = startDevIncusServer(d) if err != nil { return err } } } // Create a cancellation context. ctx, cancelFunc := context.WithCancel(context.Background()) // Start status notifier in background. cancelStatusNotifier := c.startStatusNotifier(ctx, d.chConnected) // Done with early setup, tell systemd to continue boot. // Allows a service that needs a file that's generated by the agent to be able to declare After=incus-agent // and know the file will have been created by the time the service is started. if os.Getenv("NOTIFY_SOCKET") != "" { _, err := subprocess.RunCommand("systemd-notify", "READY=1") if err != nil { cancelStatusNotifier() // Ensure STOPPED status is written to QEMU status ringbuffer. cancelFunc() return fmt.Errorf("Failed to notify systemd of readiness: %w", err) } } // Cancel context when SIGTEM is received. chSignal := make(chan os.Signal, 1) signal.Notify(chSignal, unix.SIGTERM) exitStatus := 0 select { case <-chSignal: case err := <-errChan: fmt.Fprintln(os.Stderr, err) exitStatus = 1 } cancelStatusNotifier() // Ensure STOPPED status is written to QEMU status ringbuffer. cancelFunc() os.Exit(exitStatus) return nil } // startStatusNotifier sends status of agent to vserial ring buffer every 10s or when context is done. // Returns a function that can be used to update the running status to STOPPED in the ring buffer. func (c *cmdAgent) startStatusNotifier(ctx context.Context, chConnected <-chan struct{}) context.CancelFunc { // Write initial started status. _ = c.writeStatus("STARTED") wg := sync.WaitGroup{} exitCtx, exit := context.WithCancel(ctx) // Allows manual synchronous cancellation via cancel function. cancel := func() { exit() // Signal for the go routine to end. wg.Wait() // Wait for the go routine to actually finish. } wg.Add(1) go func() { defer wg.Done() // Signal to cancel function that we are done. ticker := time.NewTicker(time.Duration(time.Second) * 5) defer ticker.Stop() for { select { case <-chConnected: _ = c.writeStatus("CONNECTED") // Indicate we were able to connect. case <-ticker.C: _ = c.writeStatus("STARTED") // Re-populate status periodically in case the daemon restarts. case <-exitCtx.Done(): _ = c.writeStatus("STOPPED") // Indicate we are stopping and exit go routine. return } } }() return cancel } // writeStatus writes a status code to the vserial ring buffer used to detect agent status on host. func (c *cmdAgent) writeStatus(status string) error { if util.PathExists("/dev/virtio-ports/org.linuxcontainers.incus") { vSerial, err := os.OpenFile("/dev/virtio-ports/org.linuxcontainers.incus", os.O_RDWR, 0o600) if err != nil { return err } defer vSerial.Close() _, err = vSerial.Write([]byte(fmt.Sprintf("%s\n", status))) if err != nil { return err } } return nil } // mountHostShares reads the agent-mounts.json file from config share and mounts the shares requested. func (c *cmdAgent) mountHostShares() { agentMountsFile := "./agent-mounts.json" if !util.PathExists(agentMountsFile) { return } b, err := os.ReadFile(agentMountsFile) if err != nil { logger.Errorf("Failed to load agent mounts file %q: %v", agentMountsFile, err) } var agentMounts []instancetype.VMAgentMount err = json.Unmarshal(b, &agentMounts) if err != nil { logger.Errorf("Failed to parse agent mounts file %q: %v", agentMountsFile, err) return } for _, mount := range agentMounts { if !slices.Contains([]string{"9p", "virtiofs"}, mount.FSType) { logger.Infof("Unsupported mount fstype %q", mount.FSType) continue } err = tryMountShared(mount.Source, mount.Target, mount.FSType, mount.Options) if err != nil { logger.Infof("Failed to mount %q (Type: %q, Options: %v) to %q: %v", mount.Source, "virtiofs", mount.Options, mount.Target, err) continue } logger.Infof("Mounted %q (Type: %q, Options: %v) to %q", mount.Source, mount.FSType, mount.Options, mount.Target) } } func tryMountShared(src string, dst string, fstype string, opts []string) error { // Convert relative mounts to absolute from / otherwise dir creation fails or mount fails. if !strings.HasPrefix(dst, "/") { dst = fmt.Sprintf("/%s", dst) } // Check mount path. if !util.PathExists(dst) { // Create the mount path. err := os.MkdirAll(dst, 0o755) if err != nil { return fmt.Errorf("Failed to create mount target %q", dst) } } else if linux.IsMountPoint(dst) { // Already mounted. return nil } // Prepare the arguments. sharedArgs := []string{} p9Args := []string{} for _, opt := range opts { // transport and msize mount option are specific to 9p. if strings.HasPrefix(opt, "trans=") || strings.HasPrefix(opt, "msize=") { p9Args = append(p9Args, "-o", opt) continue } sharedArgs = append(sharedArgs, "-o", opt) } // Always try virtiofs first. args := []string{"-t", "virtiofs", src, dst} args = append(args, sharedArgs...) _, err := subprocess.RunCommand("mount", args...) if err == nil { return nil } else if fstype == "virtiofs" { return err } // Then fallback to 9p. args = []string{"-t", "9p", src, dst} args = append(args, sharedArgs...) args = append(args, p9Args...) _, err = subprocess.RunCommand("mount", args...) if err != nil { return err } return nil } incus-6.0.4/cmd/incus-agent/metrics.go000066400000000000000000000236421477363751000176430ustar00rootroot00000000000000package main import ( "bufio" "bytes" "fmt" "net/http" "os" "path/filepath" "regexp" "slices" "strconv" "strings" "github.com/lxc/incus/v6/internal/linux" "github.com/lxc/incus/v6/internal/server/metrics" "github.com/lxc/incus/v6/internal/server/response" "github.com/lxc/incus/v6/shared/logger" ) // These mountpoints are excluded as they are irrelevant for metrics. // /var/lib/docker/* subdirectories are excluded for this reason: https://github.com/prometheus/node_exporter/pull/1003 var ( defMountPointsExcluded = regexp.MustCompile(`^/(?:dev|proc|sys|var/lib/docker/.+)(?:$|/)`) defFSTypesExcluded = []string{ "autofs", "binfmt_misc", "bpf", "cgroup", "cgroup2", "configfs", "debugfs", "devpts", "devtmpfs", "fusectl", "hugetlbfs", "iso9660", "mqueue", "nsfs", "overlay", "proc", "procfs", "pstore", "rpc_pipefs", "securityfs", "selinuxfs", "squashfs", "sysfs", "tracefs", } ) var metricsCmd = APIEndpoint{ Path: "metrics", Get: APIEndpointAction{Handler: metricsGet}, } func metricsGet(d *Daemon, r *http.Request) response.Response { out := metrics.Metrics{} diskStats, err := getDiskMetrics(d) if err != nil { logger.Warn("Failed to get disk metrics", logger.Ctx{"err": err}) } else { out.Disk = diskStats } filesystemStats, err := getFilesystemMetrics(d) if err != nil { logger.Warn("Failed to get filesystem metrics", logger.Ctx{"err": err}) } else { out.Filesystem = filesystemStats } memStats, err := getMemoryMetrics(d) if err != nil { logger.Warn("Failed to get memory metrics", logger.Ctx{"err": err}) } else { out.Memory = memStats } netStats, err := getNetworkMetrics(d) if err != nil { logger.Warn("Failed to get network metrics", logger.Ctx{"err": err}) } else { out.Network = netStats } out.ProcessesTotal, err = getTotalProcesses(d) if err != nil { logger.Warn("Failed to get total processes", logger.Ctx{"err": err}) } cpuStats, err := getCPUMetrics(d) if err != nil { logger.Warn("Failed to get CPU metrics", logger.Ctx{"err": err}) } else { out.CPU = cpuStats } return response.SyncResponse(true, &out) } func getCPUMetrics(d *Daemon) ([]metrics.CPUMetrics, error) { stats, err := os.ReadFile("/proc/stat") if err != nil { return nil, fmt.Errorf("Failed to read /proc/stat: %w", err) } out := []metrics.CPUMetrics{} scanner := bufio.NewScanner(bytes.NewReader(stats)) for scanner.Scan() { line := scanner.Text() fields := strings.Fields(line) // Only consider CPU info, skip everything else. Skip aggregated CPU stats since there will // be stats for each individual CPU. if !strings.HasPrefix(fields[0], "cpu") || fields[0] == "cpu" { continue } // Validate the number of fields only for lines starting with "cpu". if len(fields) < 9 { return nil, fmt.Errorf("Invalid /proc/stat content: %q", line) } stats := metrics.CPUMetrics{} stats.SecondsUser, err = strconv.ParseFloat(fields[1], 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[1], err) } stats.SecondsUser /= 100 stats.SecondsNice, err = strconv.ParseFloat(fields[2], 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[2], err) } stats.SecondsNice /= 100 stats.SecondsSystem, err = strconv.ParseFloat(fields[3], 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[3], err) } stats.SecondsSystem /= 100 stats.SecondsIdle, err = strconv.ParseFloat(fields[4], 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[4], err) } stats.SecondsIdle /= 100 stats.SecondsIOWait, err = strconv.ParseFloat(fields[5], 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[5], err) } stats.SecondsIOWait /= 100 stats.SecondsIRQ, err = strconv.ParseFloat(fields[6], 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[6], err) } stats.SecondsIRQ /= 100 stats.SecondsSoftIRQ, err = strconv.ParseFloat(fields[7], 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[7], err) } stats.SecondsSoftIRQ /= 100 stats.SecondsSteal, err = strconv.ParseFloat(fields[8], 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[8], err) } stats.SecondsSteal /= 100 stats.CPU = fields[0] out = append(out, stats) } return out, nil } func getTotalProcesses(d *Daemon) (uint64, error) { entries, err := os.ReadDir("/proc") if err != nil { return 0, fmt.Errorf("Failed to read dir %q: %w", "/proc", err) } pidCount := uint64(0) for _, entry := range entries { // Skip everything which isn't a directory if !entry.IsDir() { continue } name := entry.Name() // Skip all non-PID directories _, err := strconv.ParseUint(name, 10, 64) if err != nil { continue } cmdlinePath := filepath.Join("/proc", name, "cmdline") cmdline, err := os.ReadFile(cmdlinePath) if err != nil { continue } if string(cmdline) == "" { continue } pidCount++ } return pidCount, nil } func getDiskMetrics(d *Daemon) ([]metrics.DiskMetrics, error) { diskStats, err := os.ReadFile("/proc/diskstats") if err != nil { return nil, fmt.Errorf("Failed to read /proc/diskstats: %w", err) } out := []metrics.DiskMetrics{} scanner := bufio.NewScanner(bytes.NewReader(diskStats)) for scanner.Scan() { line := scanner.Text() if line == "" { continue } fields := strings.Fields(line) if len(fields) < 10 { return nil, fmt.Errorf("Invalid /proc/diskstats content: %q", line) } stats := metrics.DiskMetrics{} stats.ReadsCompleted, err = strconv.ParseUint(fields[3], 10, 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[3], err) } sectorsRead, err := strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[3], err) } stats.ReadBytes = sectorsRead * 512 stats.WritesCompleted, err = strconv.ParseUint(fields[7], 10, 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[3], err) } sectorsWritten, err := strconv.ParseUint(fields[9], 10, 64) if err != nil { return nil, fmt.Errorf("Failed to parse %q: %w", fields[3], err) } stats.WrittenBytes = sectorsWritten * 512 stats.Device = fields[2] out = append(out, stats) } return out, nil } func getFilesystemMetrics(d *Daemon) ([]metrics.FilesystemMetrics, error) { mounts, err := os.ReadFile("/proc/mounts") if err != nil { return nil, fmt.Errorf("Failed to read /proc/mounts: %w", err) } out := []metrics.FilesystemMetrics{} scanner := bufio.NewScanner(bytes.NewReader(mounts)) for scanner.Scan() { line := scanner.Text() fields := strings.Fields(line) if len(fields) < 3 { return nil, fmt.Errorf("Invalid /proc/mounts content: %q", line) } // Skip uninteresting mounts if slices.Contains(defFSTypesExcluded, fields[2]) || defMountPointsExcluded.MatchString(fields[1]) { continue } stats := metrics.FilesystemMetrics{} stats.Mountpoint = fields[1] statfs, err := linux.StatVFS(stats.Mountpoint) if err != nil { return nil, fmt.Errorf("Failed to stat %s: %w", stats.Mountpoint, err) } fsType, err := linux.FSTypeToName(int32(statfs.Type)) if err == nil { stats.FSType = fsType } stats.AvailableBytes = statfs.Bavail * uint64(statfs.Bsize) stats.FreeBytes = statfs.Bfree * uint64(statfs.Bsize) stats.SizeBytes = statfs.Blocks * uint64(statfs.Bsize) stats.Device = fields[0] out = append(out, stats) } return out, nil } func getMemoryMetrics(d *Daemon) (metrics.MemoryMetrics, error) { content, err := os.ReadFile("/proc/meminfo") if err != nil { return metrics.MemoryMetrics{}, fmt.Errorf("Failed to read /proc/meminfo: %w", err) } out := metrics.MemoryMetrics{} scanner := bufio.NewScanner(bytes.NewReader(content)) for scanner.Scan() { line := scanner.Text() fields := strings.Fields(line) if len(fields) < 2 { return metrics.MemoryMetrics{}, fmt.Errorf("Invalid /proc/meminfo content: %q", line) } fields[0] = strings.TrimRight(fields[0], ":") value, err := strconv.ParseUint(fields[1], 10, 64) if err != nil { return metrics.MemoryMetrics{}, fmt.Errorf("Failed to parse %q: %w", fields[1], err) } // Multiply suffix (kB) if len(fields) == 3 { value *= 1024 } // FIXME: Missing RSS switch fields[0] { case "Active": out.ActiveBytes = value case "Active(anon)": out.ActiveAnonBytes = value case "Active(file)": out.ActiveFileBytes = value case "Cached": out.CachedBytes = value case "Dirty": out.DirtyBytes = value case "HugePages_Free": out.HugepagesFreeBytes = value case "HugePages_Total": out.HugepagesTotalBytes = value case "Inactive": out.InactiveBytes = value case "Inactive(anon)": out.InactiveAnonBytes = value case "Inactive(file)": out.InactiveFileBytes = value case "Mapped": out.MappedBytes = value case "MemAvailable": out.MemAvailableBytes = value case "MemFree": out.MemFreeBytes = value case "MemTotal": out.MemTotalBytes = value case "Shmem": out.ShmemBytes = value case "SwapCached": out.SwapBytes = value case "Unevictable": out.UnevictableBytes = value case "Writeback": out.WritebackBytes = value } } return out, nil } func getNetworkMetrics(d *Daemon) ([]metrics.NetworkMetrics, error) { out := []metrics.NetworkMetrics{} for dev, state := range networkState() { stats := metrics.NetworkMetrics{} stats.ReceiveBytes = uint64(state.Counters.BytesReceived) stats.ReceiveDrop = uint64(state.Counters.PacketsDroppedInbound) stats.ReceiveErrors = uint64(state.Counters.ErrorsReceived) stats.ReceivePackets = uint64(state.Counters.PacketsReceived) stats.TransmitBytes = uint64(state.Counters.BytesSent) stats.TransmitDrop = uint64(state.Counters.PacketsDroppedOutbound) stats.TransmitErrors = uint64(state.Counters.ErrorsSent) stats.TransmitPackets = uint64(state.Counters.PacketsSent) stats.Device = dev out = append(out, stats) } return out, nil } incus-6.0.4/cmd/incus-agent/network.go000066400000000000000000000110751477363751000176630ustar00rootroot00000000000000package main import ( "crypto/tls" "encoding/json" "errors" "io/fs" "net" "os" "path/filepath" "sync" "github.com/lxc/incus/v6/internal/linux" deviceConfig "github.com/lxc/incus/v6/internal/server/device/config" "github.com/lxc/incus/v6/internal/server/ip" "github.com/lxc/incus/v6/internal/server/util" "github.com/lxc/incus/v6/shared/logger" "github.com/lxc/incus/v6/shared/revert" localtls "github.com/lxc/incus/v6/shared/tls" ) // A variation of the standard tls.Listener that supports atomically swapping // the underlying TLS configuration. Requests served before the swap will // continue using the old configuration. type networkListener struct { net.Listener mu sync.RWMutex config *tls.Config } func networkTLSListener(inner net.Listener, config *tls.Config) *networkListener { listener := &networkListener{ Listener: inner, config: config, } return listener } // Accept waits for and returns the next incoming TLS connection then use the // current TLS configuration to handle it. func (l *networkListener) Accept() (net.Conn, error) { c, err := l.Listener.Accept() if err != nil { return nil, err } l.mu.RLock() defer l.mu.RUnlock() return tls.Server(c, l.config), nil } func serverTLSConfig() (*tls.Config, error) { certInfo, err := localtls.KeyPairAndCA(".", "agent", localtls.CertServer, false) if err != nil { return nil, err } tlsConfig := util.ServerTLSConfig(certInfo) return tlsConfig, nil } // reconfigureNetworkInterfaces checks for the existence of files under NICConfigDir in the config share. // Each file is named .json and contains the Device Name, NIC Name, MTU and MAC address. func reconfigureNetworkInterfaces() { nicDirEntries, err := os.ReadDir(deviceConfig.NICConfigDir) if err != nil { // Abort if configuration folder does not exist (nothing to do), otherwise log and return. if errors.Is(err, fs.ErrNotExist) { return } logger.Error("Could not read network interface configuration directory", logger.Ctx{"err": err}) return } // Attempt to load the virtio_net driver in case it's not be loaded yet. _ = linux.LoadModule("virtio_net") // nicData is a map of MAC address to NICConfig. nicData := make(map[string]deviceConfig.NICConfig, len(nicDirEntries)) for _, f := range nicDirEntries { nicBytes, err := os.ReadFile(filepath.Join(deviceConfig.NICConfigDir, f.Name())) if err != nil { logger.Error("Could not read network interface configuration file", logger.Ctx{"err": err}) } var conf deviceConfig.NICConfig err = json.Unmarshal(nicBytes, &conf) if err != nil { logger.Error("Could not parse network interface configuration file", logger.Ctx{"err": err}) return } if conf.MACAddress != "" { nicData[conf.MACAddress] = conf } } // configureNIC applies any config specified for the interface based on its current MAC address. configureNIC := func(currentNIC net.Interface) error { revert := revert.New() defer revert.Fail() // Look for a NIC config entry for this interface based on its MAC address. nic, ok := nicData[currentNIC.HardwareAddr.String()] if !ok { return nil } var changeName, changeMTU bool if nic.NICName != "" && currentNIC.Name != nic.NICName { changeName = true } if nic.MTU > 0 && currentNIC.MTU != int(nic.MTU) { changeMTU = true } if !changeName && !changeMTU { return nil // Nothing to do. } link := ip.Link{ Name: currentNIC.Name, MTU: uint32(currentNIC.MTU), } err := link.SetDown() if err != nil { return err } revert.Add(func() { _ = link.SetUp() }) // Apply the name from the NIC config if needed. if changeName { err = link.SetName(nic.NICName) if err != nil { return err } revert.Add(func() { err := link.SetName(currentNIC.Name) if err != nil { return } link.Name = currentNIC.Name }) link.Name = nic.NICName } // Apply the MTU from the NIC config if needed. if changeMTU { err = link.SetMTU(nic.MTU) if err != nil { return err } link.MTU = nic.MTU revert.Add(func() { err := link.SetMTU(uint32(currentNIC.MTU)) if err != nil { return } link.MTU = uint32(currentNIC.MTU) }) } err = link.SetUp() if err != nil { return err } revert.Success() return nil } ifaces, err := net.Interfaces() if err != nil { logger.Error("Unable to read network interfaces", logger.Ctx{"err": err}) } for _, iface := range ifaces { err = configureNIC(iface) if err != nil { logger.Error("Unable to reconfigure network interface", logger.Ctx{"interface": iface.Name, "err": err}) } } } incus-6.0.4/cmd/incus-agent/operations.go000066400000000000000000000110701477363751000203500ustar00rootroot00000000000000package main import ( "context" "fmt" "log" "net/http" "net/url" "strconv" "strings" "time" "github.com/gorilla/mux" "github.com/lxc/incus/v6/internal/jmap" "github.com/lxc/incus/v6/internal/server/operations" "github.com/lxc/incus/v6/internal/server/response" localUtil "github.com/lxc/incus/v6/internal/server/util" "github.com/lxc/incus/v6/shared/api" ) var operationCmd = APIEndpoint{ Path: "operations/{id}", Delete: APIEndpointAction{Handler: operationDelete}, Get: APIEndpointAction{Handler: operationGet}, } var operationsCmd = APIEndpoint{ Path: "operations", Get: APIEndpointAction{Handler: operationsGet}, } var operationWebsocket = APIEndpoint{ Path: "operations/{id}/websocket", Get: APIEndpointAction{Handler: operationWebsocketGet}, } var operationWait = APIEndpoint{ Path: "operations/{id}/wait", Get: APIEndpointAction{Handler: operationWaitGet}, } func operationDelete(d *Daemon, r *http.Request) response.Response { id, err := url.PathUnescape(mux.Vars(r)["id"]) if err != nil { return response.SmartError(err) } // First check if the query is for a local operation from this node op, err := operations.OperationGetInternal(id) if err != nil { return response.SmartError(err) } _, err = op.Cancel() if err != nil { return response.BadRequest(err) } return response.EmptySyncResponse } func operationGet(d *Daemon, r *http.Request) response.Response { id, err := url.PathUnescape(mux.Vars(r)["id"]) if err != nil { return response.SmartError(err) } var body *api.Operation // First check if the query is for a local operation from this node op, err := operations.OperationGetInternal(id) if err != nil { return response.SmartError(err) } _, body, err = op.Render() if err != nil { log.Println(fmt.Errorf("Failed to handle operations request: %w", err)) } return response.SyncResponse(true, body) } func operationsGet(d *Daemon, r *http.Request) response.Response { recursion := localUtil.IsRecursionRequest(r) localOperationURLs := func() (jmap.Map, error) { // Get all the operations ops := operations.Clone() // Build a list of URLs body := jmap.Map{} for _, v := range ops { status := strings.ToLower(v.Status().String()) _, ok := body[status] if !ok { body[status] = make([]string, 0) } body[status] = append(body[status].([]string), v.URL()) } return body, nil } localOperations := func() (jmap.Map, error) { // Get all the operations ops := operations.Clone() // Build a list of operations body := jmap.Map{} for _, v := range ops { status := strings.ToLower(v.Status().String()) _, ok := body[status] if !ok { body[status] = make([]*api.Operation, 0) } _, op, err := v.Render() if err != nil { return nil, err } body[status] = append(body[status].([]*api.Operation), op) } return body, nil } // Start with local operations var md jmap.Map var err error if recursion { md, err = localOperations() if err != nil { return response.InternalError(err) } } else { md, err = localOperationURLs() if err != nil { return response.InternalError(err) } } return response.SyncResponse(true, md) } func operationWebsocketGet(d *Daemon, r *http.Request) response.Response { id, err := url.PathUnescape(mux.Vars(r)["id"]) if err != nil { return response.SmartError(err) } // First check if the query is for a local operation from this node op, err := operations.OperationGetInternal(id) if err != nil { return response.SmartError(err) } return operations.OperationWebSocket(r, op) } func operationWaitGet(d *Daemon, r *http.Request) response.Response { id, err := url.PathUnescape(mux.Vars(r)["id"]) if err != nil { return response.InternalError(fmt.Errorf("Failed to extract operation ID from URL: %w", err)) } timeoutSecs := -1 if r.FormValue("timeout") != "" { timeoutSecs, err = strconv.Atoi(r.FormValue("timeout")) if err != nil { return response.InternalError(fmt.Errorf("Failed to extract operation wait timeout from URL: %w", err)) } } var ctx context.Context var cancel context.CancelFunc if timeoutSecs > -1 { ctx, cancel = context.WithDeadline(r.Context(), time.Now().Add(time.Second*time.Duration(timeoutSecs))) } else { ctx, cancel = context.WithCancel(r.Context()) } defer cancel() op, err := operations.OperationGetInternal(id) if err != nil { return response.NotFound(err) } err = op.Wait(ctx) if err != nil { return response.SmartError(err) } _, opAPI, err := op.Render() if err != nil { return response.SmartError(err) } return response.SyncResponse(true, opAPI) } incus-6.0.4/cmd/incus-agent/response.go000066400000000000000000000012171477363751000200250ustar00rootroot00000000000000package main import ( "net/http" "github.com/lxc/incus/v6/shared/api" ) type devIncusResponse struct { content any code int ctype string } func errorResponse(code int, msg string) *devIncusResponse { return &devIncusResponse{msg, code, "raw"} } func okResponse(ct any, ctype string) *devIncusResponse { return &devIncusResponse{ct, http.StatusOK, ctype} } func smartResponse(err error) *devIncusResponse { if err == nil { return okResponse(nil, "") } statusCode, found := api.StatusErrorMatch(err) if found { return errorResponse(statusCode, err.Error()) } return errorResponse(http.StatusInternalServerError, err.Error()) } incus-6.0.4/cmd/incus-agent/server.go000066400000000000000000000062601477363751000175000ustar00rootroot00000000000000package main import ( "bytes" "crypto/tls" "crypto/x509" "fmt" "io" "net/http" "github.com/gorilla/mux" internalIO "github.com/lxc/incus/v6/internal/io" "github.com/lxc/incus/v6/internal/server/response" localUtil "github.com/lxc/incus/v6/internal/server/util" "github.com/lxc/incus/v6/shared/logger" ) func restServer(tlsConfig *tls.Config, cert *x509.Certificate, debug bool, d *Daemon) *http.Server { mux := mux.NewRouter() mux.StrictSlash(false) // Don't redirect to URL with trailing slash. mux.UseEncodedPath() // Allow encoded values in path segments. mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") _ = response.SyncResponse(true, []string{"/1.0"}).Render(w) }) for _, c := range api10 { createCmd(mux, "1.0", c, cert, debug, d) } return &http.Server{Handler: mux, TLSConfig: tlsConfig} } func createCmd(restAPI *mux.Router, version string, c APIEndpoint, cert *x509.Certificate, debug bool, d *Daemon) { var uri string if c.Path == "" { uri = fmt.Sprintf("/%s", version) } else { uri = fmt.Sprintf("/%s/%s", version, c.Path) } route := restAPI.HandleFunc(uri, func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") if !authenticate(r, cert) { logger.Error("Not authorized") _ = response.InternalError(fmt.Errorf("Not authorized")).Render(w) return } // Dump full request JSON when in debug mode if r.Method != "GET" && localUtil.IsJSONRequest(r) { newBody := &bytes.Buffer{} captured := &bytes.Buffer{} multiW := io.MultiWriter(newBody, captured) _, err := io.Copy(multiW, r.Body) if err != nil { _ = response.InternalError(err).Render(w) return } r.Body = internalIO.BytesReadCloser{Buf: newBody} localUtil.DebugJSON("API Request", captured, logger.Log) } // Actually process the request var resp response.Response handleRequest := func(action APIEndpointAction) response.Response { if action.Handler == nil { return response.NotImplemented(nil) } return action.Handler(d, r) } switch r.Method { case "GET": resp = handleRequest(c.Get) case "PUT": resp = handleRequest(c.Put) case "POST": resp = handleRequest(c.Post) case "DELETE": resp = handleRequest(c.Delete) case "PATCH": resp = handleRequest(c.Patch) default: resp = response.NotFound(fmt.Errorf("Method %q not found", r.Method)) } // Handle errors err := resp.Render(w) if err != nil { writeErr := response.InternalError(err).Render(w) if writeErr != nil { logger.Error("Failed writing error for HTTP response", logger.Ctx{"url": uri, "error": err, "writeErr": writeErr}) } } }) // If the endpoint has a canonical name then record it so it can be used to build URLS // and accessed in the context of the request by the handler function. if c.Name != "" { route.Name(c.Name) } } func authenticate(r *http.Request, cert *x509.Certificate) bool { clientCerts := map[string]x509.Certificate{"0": *cert} for _, cert := range r.TLS.PeerCertificates { trusted, _ := localUtil.CheckTrustState(*cert, clientCerts, nil, false) if trusted { return true } } return false } incus-6.0.4/cmd/incus-agent/sftp.go000066400000000000000000000025521477363751000171460ustar00rootroot00000000000000package main import ( "fmt" "net/http" "github.com/pkg/sftp" "github.com/lxc/incus/v6/internal/server/response" ) var sftpCmd = APIEndpoint{ Name: "sftp", Path: "sftp", Get: APIEndpointAction{Handler: sftpHandler}, } func sftpHandler(d *Daemon, r *http.Request) response.Response { return &sftpServe{d, r} } type sftpServe struct { d *Daemon r *http.Request } func (r *sftpServe) String() string { return "sftp handler" } // Code returns the HTTP code. func (r *sftpServe) Code() int { return http.StatusOK } func (r *sftpServe) Render(w http.ResponseWriter) error { // Upgrade to sftp. if r.r.Header.Get("Upgrade") != "sftp" { http.Error(w, "Missing or invalid upgrade header", http.StatusBadRequest) return nil } hijacker, ok := w.(http.Hijacker) if !ok { http.Error(w, "Webserver doesn't support hijacking", http.StatusInternalServerError) return nil } conn, _, err := hijacker.Hijack() if err != nil { http.Error(w, fmt.Errorf("Failed to hijack connection: %w", err).Error(), http.StatusInternalServerError) return nil } defer func() { _ = conn.Close() }() err = response.Upgrade(conn, "sftp") if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return nil } // Start sftp server. server, err := sftp.NewServer(conn, sftp.WithAllocator()) if err != nil { return nil } return server.Serve() } incus-6.0.4/cmd/incus-agent/state.go000066400000000000000000000153131477363751000173110ustar00rootroot00000000000000package main import ( "bufio" "bytes" "context" "fmt" "net" "net/http" "os" "strconv" "strings" "time" "github.com/lxc/incus/v6/internal/linux" "github.com/lxc/incus/v6/internal/server/response" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/logger" "github.com/lxc/incus/v6/shared/osarch" "github.com/lxc/incus/v6/shared/util" ) var stateCmd = APIEndpoint{ Name: "state", Path: "state", Get: APIEndpointAction{Handler: stateGet}, Put: APIEndpointAction{Handler: statePut}, } func stateGet(d *Daemon, r *http.Request) response.Response { return response.SyncResponse(true, renderState()) } func statePut(d *Daemon, r *http.Request) response.Response { return response.NotImplemented(nil) } func renderState() *api.InstanceState { return &api.InstanceState{ CPU: cpuState(), Memory: memoryState(), Network: networkState(), Pid: 1, Processes: processesState(), OSInfo: osState(), } } func cpuState() api.InstanceStateCPU { var value []byte var err error cpu := api.InstanceStateCPU{} if util.PathExists("/sys/fs/cgroup/cpuacct/cpuacct.usage") { // CPU usage in seconds value, err = os.ReadFile("/sys/fs/cgroup/cpuacct/cpuacct.usage") if err != nil { cpu.Usage = -1 return cpu } valueInt, err := strconv.ParseInt(strings.TrimSpace(string(value)), 10, 64) if err != nil { cpu.Usage = -1 return cpu } cpu.Usage = valueInt return cpu } else if util.PathExists("/sys/fs/cgroup/cpu.stat") { stats, err := os.ReadFile("/sys/fs/cgroup/cpu.stat") if err != nil { cpu.Usage = -1 return cpu } scanner := bufio.NewScanner(bytes.NewReader(stats)) for scanner.Scan() { fields := strings.Fields(scanner.Text()) if fields[0] == "usage_usec" { valueInt, err := strconv.ParseInt(fields[1], 10, 64) if err != nil { cpu.Usage = -1 return cpu } // usec -> nsec cpu.Usage = valueInt * 1000 return cpu } } } cpu.Usage = -1 return cpu } func memoryState() api.InstanceStateMemory { memory := api.InstanceStateMemory{} stats, err := getMemoryMetrics(nil) if err != nil { return memory } memory.Usage = int64(stats.MemTotalBytes) - int64(stats.MemFreeBytes) memory.Total = int64(stats.MemTotalBytes) // Memory peak in bytes value, err := os.ReadFile("/sys/fs/cgroup/memory/memory.max_usage_in_bytes") valueInt, err1 := strconv.ParseInt(strings.TrimSpace(string(value)), 10, 64) if err == nil && err1 == nil { memory.UsagePeak = valueInt } return memory } func networkState() map[string]api.InstanceStateNetwork { result := map[string]api.InstanceStateNetwork{} ifs, err := linux.NetlinkInterfaces() if err != nil { logger.Errorf("Failed to retrieve network interfaces: %v", err) return result } for _, iface := range ifs { network := api.InstanceStateNetwork{ Addresses: []api.InstanceStateNetworkAddress{}, Counters: api.InstanceStateNetworkCounters{}, } network.Hwaddr = iface.HardwareAddr.String() network.Mtu = iface.MTU if iface.Flags&net.FlagUp != 0 { network.State = "up" } else { network.State = "down" } if iface.Flags&net.FlagBroadcast != 0 { network.Type = "broadcast" } else if iface.Flags&net.FlagLoopback != 0 { network.Type = "loopback" } else if iface.Flags&net.FlagPointToPoint != 0 { network.Type = "point-to-point" } else { network.Type = "unknown" } // Counters value, err := os.ReadFile(fmt.Sprintf("/sys/class/net/%s/statistics/tx_bytes", iface.Name)) valueInt, err1 := strconv.ParseInt(strings.TrimSpace(string(value)), 10, 64) if err == nil && err1 == nil { network.Counters.BytesSent = valueInt } value, err = os.ReadFile(fmt.Sprintf("/sys/class/net/%s/statistics/rx_bytes", iface.Name)) valueInt, err1 = strconv.ParseInt(strings.TrimSpace(string(value)), 10, 64) if err == nil && err1 == nil { network.Counters.BytesReceived = valueInt } value, err = os.ReadFile(fmt.Sprintf("/sys/class/net/%s/statistics/tx_packets", iface.Name)) valueInt, err1 = strconv.ParseInt(strings.TrimSpace(string(value)), 10, 64) if err == nil && err1 == nil { network.Counters.PacketsSent = valueInt } value, err = os.ReadFile(fmt.Sprintf("/sys/class/net/%s/statistics/rx_packets", iface.Name)) valueInt, err1 = strconv.ParseInt(strings.TrimSpace(string(value)), 10, 64) if err == nil && err1 == nil { network.Counters.PacketsReceived = valueInt } // Addresses for _, addr := range iface.Addresses { addressFields := strings.Split(addr.String(), "/") networkAddress := api.InstanceStateNetworkAddress{ Address: addressFields[0], Netmask: addressFields[1], } scope := "global" if strings.HasPrefix(addressFields[0], "127") { scope = "local" } if addressFields[0] == "::1" { scope = "local" } if strings.HasPrefix(addressFields[0], "169.254") { scope = "link" } if strings.HasPrefix(addressFields[0], "fe80:") { scope = "link" } networkAddress.Scope = scope if strings.Contains(addressFields[0], ":") { networkAddress.Family = "inet6" } else { networkAddress.Family = "inet" } network.Addresses = append(network.Addresses, networkAddress) } result[iface.Name] = network } return result } func processesState() int64 { pids := []int64{1} // Go through the pid list, adding new pids at the end so we go through them all for i := 0; i < len(pids); i++ { fname := fmt.Sprintf("/proc/%d/task/%d/children", pids[i], pids[i]) fcont, err := os.ReadFile(fname) if err != nil { // the process terminated during execution of this loop continue } content := strings.Split(string(fcont), " ") for j := 0; j < len(content); j++ { pid, err := strconv.ParseInt(content[j], 10, 64) if err == nil { pids = append(pids, pid) } } } return int64(len(pids)) } func osState() *api.InstanceStateOSInfo { osInfo := &api.InstanceStateOSInfo{} // Get information about the OS. lsbRelease, err := osarch.GetOSRelease() if err == nil { osInfo.OS = lsbRelease["NAME"] osInfo.OSVersion = lsbRelease["VERSION_ID"] } // Get information about the kernel version. uname, err := linux.Uname() if err == nil { osInfo.KernelVersion = uname.Release } // Get the hostname. hostname, err := os.Hostname() if err == nil { osInfo.Hostname = hostname } // Get the FQDN. To avoid needing to run `hostname -f`, do a reverse host lookup for 127.0.1.1, and if found, return the first hostname as the FQDN. ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) defer cancel() var r net.Resolver fqdn, err := r.LookupAddr(ctx, "127.0.0.1") if err == nil && len(fqdn) > 0 { // Take the first returned hostname and trim the trailing dot. osInfo.FQDN = strings.TrimSuffix(fqdn[0], ".") } return osInfo } incus-6.0.4/cmd/incus-agent/templates.go000066400000000000000000000056141477363751000201720ustar00rootroot00000000000000package main import ( "fmt" "io" "io/fs" "os" "path/filepath" "strconv" "gopkg.in/yaml.v2" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/util" ) func templatesApply(path string) ([]string, error) { metaName := filepath.Join(path, "metadata.yaml") if !util.PathExists(metaName) { return nil, nil } // Parse the metadata. content, err := os.ReadFile(metaName) if err != nil { return nil, fmt.Errorf("Failed to read metadata: %w", err) } metadata := new(api.ImageMetadata) err = yaml.Unmarshal(content, &metadata) if err != nil { return nil, fmt.Errorf("Could not parse metadata.yaml: %w", err) } // Go through the files and copy them into place. files := []string{} for tplPath, tpl := range metadata.Templates { err = func(tplPath string, tpl *api.ImageMetadataTemplate) error { filePath := filepath.Join(path, fmt.Sprintf("%s.out", tpl.Template)) if !util.PathExists(filePath) { return nil } var w *os.File if util.PathExists(tplPath) { if tpl.CreateOnly { return nil } // Open the existing file. w, err = os.Create(tplPath) if err != nil { return fmt.Errorf("Failed to create template file: %w", err) } } else { // UID and GID fileUID := int64(0) fileGID := int64(0) if tpl.UID != "" { id, err := strconv.ParseInt(tpl.UID, 10, 64) if err != nil { return fmt.Errorf("Bad file UID %q for %q: %w", tpl.UID, tplPath, err) } fileUID = id } if tpl.GID != "" { id, err := strconv.ParseInt(tpl.GID, 10, 64) if err != nil { return fmt.Errorf("Bad file GID %q for %q: %w", tpl.GID, tplPath, err) } fileGID = id } // Mode fileMode := fs.FileMode(0o644) if tpl.Mode != "" { if len(tpl.Mode) == 3 { tpl.Mode = fmt.Sprintf("0%s", tpl.Mode) } mode, err := strconv.ParseInt(tpl.Mode, 0, 0) if err != nil { return fmt.Errorf("Bad mode %q for %q: %w", tpl.Mode, tplPath, err) } fileMode = os.FileMode(mode) & os.ModePerm } // Create the directories leading to the file. err := os.MkdirAll(filepath.Dir(tplPath), 0o755) if err != nil { return err } // Create the file itself. w, err = os.Create(tplPath) if err != nil { return err } // Fix ownership. err = w.Chown(int(fileUID), int(fileGID)) if err != nil { return err } // Fix mode. err = w.Chmod(fileMode) if err != nil { return err } } defer func() { _ = w.Close() }() // Do the copy. src, err := os.Open(filePath) if err != nil { return err } defer func() { _ = src.Close() }() _, err = io.Copy(w, src) if err != nil { return err } err = w.Close() if err != nil { return err } files = append(files, tplPath) return nil }(tplPath, tpl) if err != nil { return nil, err } } return files, nil } incus-6.0.4/cmd/incus-benchmark/000077500000000000000000000000001477363751000164735ustar00rootroot00000000000000incus-6.0.4/cmd/incus-benchmark/benchmark.go000066400000000000000000000153231477363751000207600ustar00rootroot00000000000000package main import ( "fmt" "strings" "sync" "time" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/version" "github.com/lxc/incus/v6/shared/api" config "github.com/lxc/incus/v6/shared/cliconfig" ) const userConfigKey = "user.incus-benchmark" // PrintServerInfo prints out information about the server. func PrintServerInfo(c incus.InstanceServer) error { server, _, err := c.GetServer() if err != nil { return err } env := server.Environment fmt.Println("Test environment:") fmt.Println(" Server backend:", env.Server) fmt.Println(" Server version:", env.ServerVersion) fmt.Println(" Kernel:", env.Kernel) fmt.Println(" Kernel tecture:", env.KernelArchitecture) fmt.Println(" Kernel version:", env.KernelVersion) fmt.Println(" Storage backend:", env.Storage) fmt.Println(" Storage version:", env.StorageVersion) fmt.Println(" Container backend:", env.Driver) fmt.Println(" Container version:", env.DriverVersion) fmt.Println("") return nil } // LaunchContainers launches a set of containers. func LaunchContainers(c incus.InstanceServer, count int, parallel int, image string, privileged bool, start bool, freeze bool) (time.Duration, error) { var duration time.Duration batchSize, err := getBatchSize(parallel) if err != nil { return duration, err } printTestConfig(count, batchSize, image, privileged, freeze) fingerprint, err := ensureImage(c, image) if err != nil { return duration, err } batchStart := func(index int, wg *sync.WaitGroup) { defer wg.Done() name := getContainerName(count, index) err := createContainer(c, fingerprint, name, privileged) if err != nil { logf("Failed to launch container '%s': %s", name, err) return } if start { err := startContainer(c, name) if err != nil { logf("Failed to start container '%s': %s", name, err) return } if freeze { err := freezeContainer(c, name) if err != nil { logf("Failed to freeze container '%s': %s", name, err) return } } } } duration = processBatch(count, batchSize, batchStart) return duration, nil } // CreateContainers create the specified number of containers. func CreateContainers(c incus.InstanceServer, count int, parallel int, fingerprint string, privileged bool) (time.Duration, error) { var duration time.Duration batchSize, err := getBatchSize(parallel) if err != nil { return duration, err } batchCreate := func(index int, wg *sync.WaitGroup) { defer wg.Done() name := getContainerName(count, index) err := createContainer(c, fingerprint, name, privileged) if err != nil { logf("Failed to launch container '%s': %s", name, err) return } } duration = processBatch(count, batchSize, batchCreate) return duration, nil } // GetContainers returns containers created by the benchmark. func GetContainers(c incus.InstanceServer) ([]api.Instance, error) { containers := []api.Instance{} allContainers, err := c.GetInstances(api.InstanceTypeContainer) if err != nil { return containers, err } for _, container := range allContainers { if container.Config[userConfigKey] == "true" { containers = append(containers, container) } } return containers, nil } // StartContainers starts containers created by the benchmark. func StartContainers(c incus.InstanceServer, containers []api.Instance, parallel int) (time.Duration, error) { var duration time.Duration batchSize, err := getBatchSize(parallel) if err != nil { return duration, err } count := len(containers) logf("Starting %d containers", count) batchStart := func(index int, wg *sync.WaitGroup) { defer wg.Done() container := containers[index] if !container.IsActive() { err := startContainer(c, container.Name) if err != nil { logf("Failed to start container '%s': %s", container.Name, err) return } } } duration = processBatch(count, batchSize, batchStart) return duration, nil } // StopContainers stops containers created by the benchmark. func StopContainers(c incus.InstanceServer, containers []api.Instance, parallel int) (time.Duration, error) { var duration time.Duration batchSize, err := getBatchSize(parallel) if err != nil { return duration, err } count := len(containers) logf("Stopping %d containers", count) batchStop := func(index int, wg *sync.WaitGroup) { defer wg.Done() container := containers[index] if container.IsActive() { err := stopContainer(c, container.Name) if err != nil { logf("Failed to stop container '%s': %s", container.Name, err) return } } } duration = processBatch(count, batchSize, batchStop) return duration, nil } // DeleteContainers removes containers created by the benchmark. func DeleteContainers(c incus.InstanceServer, containers []api.Instance, parallel int) (time.Duration, error) { var duration time.Duration batchSize, err := getBatchSize(parallel) if err != nil { return duration, err } count := len(containers) logf("Deleting %d containers", count) batchDelete := func(index int, wg *sync.WaitGroup) { defer wg.Done() container := containers[index] name := container.Name if container.IsActive() { err := stopContainer(c, name) if err != nil { logf("Failed to stop container '%s': %s", name, err) return } } err = deleteContainer(c, name) if err != nil { logf("Failed to delete container: %s", name) return } } duration = processBatch(count, batchSize, batchDelete) return duration, nil } func ensureImage(c incus.InstanceServer, image string) (string, error) { var fingerprint string if strings.Contains(image, ":") { defaultConfig := config.NewConfig("", true) defaultConfig.UserAgent = version.UserAgent remote, fp, err := defaultConfig.ParseRemote(image) if err != nil { return "", err } fingerprint = fp imageServer, err := defaultConfig.GetImageServer(remote) if err != nil { return "", err } if fingerprint == "" { fingerprint = "default" } alias, _, err := imageServer.GetImageAlias(fingerprint) if err == nil { fingerprint = alias.Target } _, _, err = c.GetImage(fingerprint) if err != nil { logf("Importing image into local store: %s", fingerprint) image, _, err := imageServer.GetImage(fingerprint) if err != nil { logf("Failed to import image: %s", err) return "", err } err = copyImage(c, imageServer, *image) if err != nil { logf("Failed to import image: %s", err) return "", err } } } else { fingerprint = image alias, _, err := c.GetImageAlias(image) if err == nil { fingerprint = alias.Target } else { _, _, err = c.GetImage(image) } if err != nil { logf("Image not found in local store: %s", image) return "", err } } logf("Found image in local store: %s", fingerprint) return fingerprint, nil } incus-6.0.4/cmd/incus-benchmark/benchmark_batch.go000066400000000000000000000023041477363751000221140ustar00rootroot00000000000000package main import ( "os" "sync" "time" ) func getBatchSize(parallel int) (int, error) { batchSize := parallel if batchSize < 1 { // Detect the number of parallel actions cpus, err := os.ReadDir("/sys/bus/cpu/devices") if err != nil { return -1, err } batchSize = len(cpus) } return batchSize, nil } func processBatch(count int, batchSize int, process func(index int, wg *sync.WaitGroup)) time.Duration { batches := count / batchSize remainder := count % batchSize processed := 0 wg := sync.WaitGroup{} nextStat := batchSize logf("Batch processing start") timeStart := time.Now() for i := 0; i < batches; i++ { for j := 0; j < batchSize; j++ { wg.Add(1) go process(processed, &wg) processed++ } wg.Wait() if processed >= nextStat { interval := time.Since(timeStart).Seconds() logf("Processed %d containers in %.3fs (%.3f/s)", processed, interval, float64(processed)/interval) nextStat = nextStat * 2 } } for k := 0; k < remainder; k++ { wg.Add(1) go process(processed, &wg) processed++ } wg.Wait() timeEnd := time.Now() duration := timeEnd.Sub(timeStart) logf("Batch processing completed in %.3fs", duration.Seconds()) return duration } incus-6.0.4/cmd/incus-benchmark/benchmark_operation.go000066400000000000000000000030561477363751000230400ustar00rootroot00000000000000package main import ( incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/shared/api" ) func createContainer(c incus.InstanceServer, fingerprint string, name string, privileged bool) error { config := map[string]string{} if privileged { config["security.privileged"] = "true" } config[userConfigKey] = "true" req := api.InstancesPost{ Name: name, Source: api.InstanceSource{ Type: "image", Fingerprint: fingerprint, }, } req.Config = config op, err := c.CreateInstance(req) if err != nil { return err } return op.Wait() } func startContainer(c incus.InstanceServer, name string) error { op, err := c.UpdateInstanceState( name, api.InstanceStatePut{Action: "start", Timeout: -1}, "") if err != nil { return err } return op.Wait() } func stopContainer(c incus.InstanceServer, name string) error { op, err := c.UpdateInstanceState( name, api.InstanceStatePut{Action: "stop", Timeout: -1, Force: true}, "") if err != nil { return err } return op.Wait() } func freezeContainer(c incus.InstanceServer, name string) error { op, err := c.UpdateInstanceState( name, api.InstanceStatePut{Action: "freeze", Timeout: -1}, "") if err != nil { return err } return op.Wait() } func deleteContainer(c incus.InstanceServer, name string) error { op, err := c.DeleteInstance(name) if err != nil { return err } return op.Wait() } func copyImage(c incus.InstanceServer, s incus.ImageServer, image api.Image) error { op, err := c.CopyImage(s, image, nil) if err != nil { return err } return op.Wait() } incus-6.0.4/cmd/incus-benchmark/benchmark_report.go000066400000000000000000000040511477363751000223470ustar00rootroot00000000000000package main import ( "encoding/csv" "fmt" "io" "os" "time" ) // Subset of JMeter CSV log format that are required by Jenkins performance // plugin // (see http://jmeter.apache.org/usermanual/listeners.html#csvlogformat) var csvFields = []string{ "timeStamp", // in milliseconds since 1/1/1970 "elapsed", // in milliseconds "label", "responseCode", "success", // "true" or "false" } // CSVReport reads/writes a CSV report file. type CSVReport struct { Filename string records [][]string } // Load reads current content of the filename and loads records. func (r *CSVReport) Load() error { file, err := os.Open(r.Filename) if err != nil { return err } defer func() { _ = file.Close() }() reader := csv.NewReader(file) for line := 1; err != io.EOF; line++ { record, err := reader.Read() if err == io.EOF { break } else if err != nil { return err } err = r.addRecord(record) if err != nil { return err } } logf("Loaded report file %s", r.Filename) return nil } // Write writes current records to file. func (r *CSVReport) Write() error { file, err := os.OpenFile(r.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o640) if err != nil { return err } defer func() { _ = file.Close() }() writer := csv.NewWriter(file) err = writer.WriteAll(r.records) if err != nil { return err } logf("Written report file %s", r.Filename) return file.Close() } // AddRecord adds a record to the report. func (r *CSVReport) AddRecord(label string, elapsed time.Duration) error { if len(r.records) == 0 { err := r.addRecord(csvFields) if err != nil { return err } } record := []string{ fmt.Sprintf("%d", time.Now().UnixNano()/int64(time.Millisecond)), // timestamp fmt.Sprintf("%d", elapsed/time.Millisecond), label, "", // responseCode is not used "true", // success" } return r.addRecord(record) } func (r *CSVReport) addRecord(record []string) error { if len(record) != len(csvFields) { return fmt.Errorf("Invalid number of fields : %q", record) } r.records = append(r.records, record) return nil } incus-6.0.4/cmd/incus-benchmark/benchmark_util.go000066400000000000000000000017471477363751000220220ustar00rootroot00000000000000package main import ( "fmt" "time" ) func getContainerName(count int, index int) string { nameFormat := "benchmark-%." + fmt.Sprintf("%d", len(fmt.Sprintf("%d", count))) + "d" return fmt.Sprintf(nameFormat, index+1) } func logf(format string, args ...any) { fmt.Printf(fmt.Sprintf("[%s] %s\n", time.Now().Format(time.StampMilli), format), args...) } func printTestConfig(count int, batchSize int, image string, privileged bool, freeze bool) { privilegedStr := "unprivileged" if privileged { privilegedStr = "privileged" } mode := "normal startup" if freeze { mode = "start and freeze" } batches := count / batchSize remainder := count % batchSize fmt.Println("Test variables:") fmt.Println(" Container count:", count) fmt.Println(" Container mode:", privilegedStr) fmt.Println(" Startup mode:", mode) fmt.Println(" Image:", image) fmt.Println(" Batches:", batches) fmt.Println(" Batch size:", batchSize) fmt.Println(" Remainder:", remainder) fmt.Println("") } incus-6.0.4/cmd/incus-benchmark/main.go000066400000000000000000000073501477363751000177530ustar00rootroot00000000000000package main import ( "os" "time" "github.com/spf13/cobra" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/version" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/util" ) type cmdGlobal struct { flagHelp bool flagParallel int flagProject string flagReportFile string flagReportLabel string flagVersion bool srv incus.InstanceServer report *CSVReport reportDuration time.Duration } func (c *cmdGlobal) Run(cmd *cobra.Command, args []string) error { // Connect to the daemon srv, err := incus.ConnectIncusUnix("", nil) if err != nil { return err } c.srv = srv.UseProject(c.flagProject) // Print the initial header err = PrintServerInfo(srv) if err != nil { return err } // Setup report handling if c.flagReportFile != "" { c.report = &CSVReport{Filename: c.flagReportFile} if util.PathExists(c.flagReportFile) { err := c.report.Load() if err != nil { return err } } } return nil } func (c *cmdGlobal) Teardown(cmd *cobra.Command, args []string) error { // Nothing to do with not reporting if c.report == nil { return nil } label := cmd.Name() if c.flagReportLabel != "" { label = c.flagReportLabel } err := c.report.AddRecord(label, c.reportDuration) if err != nil { return err } err = c.report.Write() if err != nil { return err } return nil } func main() { app := &cobra.Command{} app.Use = "incus-benchmark" app.Short = "Benchmark performance of Incus" app.Long = `Description: Benchmark performance of Incus This tool lets you benchmark various actions on a local Incus daemon. It can be used just to check how fast a given host is, to compare performance on different servers or for performance tracking when doing changes to the codebase. A CSV report can be produced to be consumed by graphing software. ` app.Example = ` # Spawn 20 containers in batches of 4 incus-benchmark launch --count 20 --parallel 4 # Create 50 Alpine containers in batches of 10 incus-benchmark init --count 50 --parallel 10 images:alpine/edge # Delete all test containers using dynamic batch size incus-benchmark delete` app.SilenceUsage = true app.CompletionOptions = cobra.CompletionOptions{DisableDefaultCmd: true} // Global flags globalCmd := cmdGlobal{} app.PersistentPreRunE = globalCmd.Run app.PersistentPostRunE = globalCmd.Teardown app.PersistentFlags().BoolVar(&globalCmd.flagVersion, "version", false, "Print version number") app.PersistentFlags().BoolVarP(&globalCmd.flagHelp, "help", "h", false, "Print help") app.PersistentFlags().IntVarP(&globalCmd.flagParallel, "parallel", "P", -1, "Number of threads to use"+"``") app.PersistentFlags().StringVar(&globalCmd.flagReportFile, "report-file", "", "Path to the CSV report file"+"``") app.PersistentFlags().StringVar(&globalCmd.flagReportLabel, "report-label", "", "Label for the new entry in the report [default=ACTION]"+"``") app.PersistentFlags().StringVar(&globalCmd.flagProject, "project", api.ProjectDefaultName, "Project to use") // Version handling app.SetVersionTemplate("{{.Version}}\n") app.Version = version.Version // init sub-command initCmd := cmdInit{global: &globalCmd} app.AddCommand(initCmd.Command()) // launch sub-command launchCmd := cmdLaunch{global: &globalCmd, init: &initCmd} app.AddCommand(launchCmd.Command()) // start sub-command startCmd := cmdStart{global: &globalCmd} app.AddCommand(startCmd.Command()) // stop sub-command stopCmd := cmdStop{global: &globalCmd} app.AddCommand(stopCmd.Command()) // delete sub-command deleteCmd := cmdDelete{global: &globalCmd} app.AddCommand(deleteCmd.Command()) // Run the main command and handle errors err := app.Execute() if err != nil { os.Exit(1) } } incus-6.0.4/cmd/incus-benchmark/main_delete.go000066400000000000000000000011471477363751000212730ustar00rootroot00000000000000package main import ( "github.com/spf13/cobra" ) type cmdDelete struct { global *cmdGlobal } func (c *cmdDelete) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "delete" cmd.Short = "Delete containers" cmd.RunE = c.Run return cmd } func (c *cmdDelete) Run(cmd *cobra.Command, args []string) error { // Get the containers containers, err := GetContainers(c.global.srv) if err != nil { return err } // Run the test duration, err := DeleteContainers(c.global.srv, containers, c.global.flagParallel) if err != nil { return err } c.global.reportDuration = duration return nil } incus-6.0.4/cmd/incus-benchmark/main_init.go000066400000000000000000000015611477363751000207740ustar00rootroot00000000000000package main import ( "github.com/spf13/cobra" ) type cmdInit struct { global *cmdGlobal flagCount int flagPrivileged bool } func (c *cmdInit) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "init [[:]]" cmd.Short = "Create containers" cmd.RunE = c.Run cmd.Flags().IntVarP(&c.flagCount, "count", "C", 1, "Number of containers to create"+"``") cmd.Flags().BoolVar(&c.flagPrivileged, "privileged", false, "Use privileged containers") return cmd } func (c *cmdInit) Run(cmd *cobra.Command, args []string) error { // Choose the image image := "images:ubuntu/22.04" if len(args) > 0 { image = args[0] } // Run the test duration, err := LaunchContainers(c.global.srv, c.flagCount, c.global.flagParallel, image, c.flagPrivileged, false, false) if err != nil { return err } c.global.reportDuration = duration return nil } incus-6.0.4/cmd/incus-benchmark/main_launch.go000066400000000000000000000015561477363751000213070ustar00rootroot00000000000000package main import ( "github.com/spf13/cobra" ) type cmdLaunch struct { global *cmdGlobal init *cmdInit flagFreeze bool } func (c *cmdLaunch) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "launch [[:]]" cmd.Short = "Create and start containers" cmd.RunE = c.Run cmd.Flags().AddFlagSet(c.init.Command().Flags()) cmd.Flags().BoolVarP(&c.flagFreeze, "freeze", "F", false, "Freeze the container right after start") return cmd } func (c *cmdLaunch) Run(cmd *cobra.Command, args []string) error { // Choose the image image := "images:ubuntu/22.04" if len(args) > 0 { image = args[0] } // Run the test duration, err := LaunchContainers(c.global.srv, c.init.flagCount, c.global.flagParallel, image, c.init.flagPrivileged, true, c.flagFreeze) if err != nil { return err } c.global.reportDuration = duration return nil } incus-6.0.4/cmd/incus-benchmark/main_start.go000066400000000000000000000011411477363751000211600ustar00rootroot00000000000000package main import ( "github.com/spf13/cobra" ) type cmdStart struct { global *cmdGlobal } func (c *cmdStart) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "start" cmd.Short = "Start containers" cmd.RunE = c.Run return cmd } func (c *cmdStart) Run(cmd *cobra.Command, args []string) error { // Get the containers containers, err := GetContainers(c.global.srv) if err != nil { return err } // Run the test duration, err := StartContainers(c.global.srv, containers, c.global.flagParallel) if err != nil { return err } c.global.reportDuration = duration return nil } incus-6.0.4/cmd/incus-benchmark/main_stop.go000066400000000000000000000011331477363751000210110ustar00rootroot00000000000000package main import ( "github.com/spf13/cobra" ) type cmdStop struct { global *cmdGlobal } func (c *cmdStop) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "stop" cmd.Short = "Stop containers" cmd.RunE = c.Run return cmd } func (c *cmdStop) Run(cmd *cobra.Command, args []string) error { // Get the containers containers, err := GetContainers(c.global.srv) if err != nil { return err } // Run the test duration, err := StopContainers(c.global.srv, containers, c.global.flagParallel) if err != nil { return err } c.global.reportDuration = duration return nil } incus-6.0.4/cmd/incus-migrate/000077500000000000000000000000001477363751000161715ustar00rootroot00000000000000incus-6.0.4/cmd/incus-migrate/cgo.go000066400000000000000000000006401477363751000172700ustar00rootroot00000000000000// build +linux,cgo package main // #cgo CFLAGS: -std=gnu11 -Wvla -Werror -fvisibility=hidden -Winit-self // #cgo CFLAGS: -Wformat=2 -Wshadow -Wendif-labels -fasynchronous-unwind-tables // #cgo CFLAGS: -pipe --param=ssp-buffer-size=4 -g -Wunused // #cgo CFLAGS: -Werror=implicit-function-declaration // #cgo CFLAGS: -Werror=return-type -Wendif-labels -Werror=overflow // #cgo CFLAGS: -Wnested-externs -fexceptions incus-6.0.4/cmd/incus-migrate/main.go000066400000000000000000000021151477363751000174430ustar00rootroot00000000000000package main import ( "bufio" "os" "github.com/spf13/cobra" "github.com/lxc/incus/v6/internal/version" "github.com/lxc/incus/v6/shared/ask" ) type cmdGlobal struct { asker ask.Asker flagVersion bool flagHelp bool } func main() { // migrate command (main) migrateCmd := cmdMigrate{} app := migrateCmd.Command() app.SilenceUsage = true app.CompletionOptions = cobra.CompletionOptions{DisableDefaultCmd: true} // Workaround for main command app.Args = cobra.ArbitraryArgs // Global flags globalCmd := cmdGlobal{asker: ask.NewAsker(bufio.NewReader(os.Stdin))} migrateCmd.global = &globalCmd app.PersistentFlags().BoolVar(&globalCmd.flagVersion, "version", false, "Print version number") app.PersistentFlags().BoolVarP(&globalCmd.flagHelp, "help", "h", false, "Print help") // Version handling app.SetVersionTemplate("{{.Version}}\n") app.Version = version.Version // netcat sub-command netcatCmd := cmdNetcat{global: &globalCmd} app.AddCommand(netcatCmd.Command()) // Run the main command and handle errors err := app.Execute() if err != nil { os.Exit(1) } } incus-6.0.4/cmd/incus-migrate/main_migrate.go000066400000000000000000000475551477363751000211740ustar00rootroot00000000000000package main import ( "bufio" "context" "errors" "fmt" "os" "os/exec" "os/signal" "path/filepath" "runtime" "slices" "sort" "strings" "github.com/spf13/cobra" "golang.org/x/sys/unix" "gopkg.in/yaml.v2" incus "github.com/lxc/incus/v6/client" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/linux" "github.com/lxc/incus/v6/internal/version" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/archive" "github.com/lxc/incus/v6/shared/osarch" "github.com/lxc/incus/v6/shared/revert" localtls "github.com/lxc/incus/v6/shared/tls" "github.com/lxc/incus/v6/shared/units" "github.com/lxc/incus/v6/shared/util" ) type cmdMigrate struct { global *cmdGlobal flagRsyncArgs string } func (c *cmdMigrate) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "incus-migrate" cmd.Short = "Physical to instance migration tool" cmd.Long = `Description: Physical to instance migration tool This tool lets you turn any Linux filesystem (including your current one) into an instance on a remote host. It will setup a clean mount tree made of the root filesystem and any additional mount you list, then transfer this through the migration API to create a new instance from it. The same set of options as ` + "`incus launch`" + ` are also supported. ` cmd.RunE = c.Run cmd.Flags().StringVar(&c.flagRsyncArgs, "rsync-args", "", "Extra arguments to pass to rsync"+"``") return cmd } type cmdMigrateData struct { SourcePath string SourceFormat string Mounts []string InstanceArgs api.InstancesPost Project string } func (c *cmdMigrateData) Render() string { data := struct { Name string `yaml:"Name"` Project string `yaml:"Project"` Type api.InstanceType `yaml:"Type"` Source string `yaml:"Source"` SourceFormat string `yaml:"Source format,omitempty"` Mounts []string `yaml:"Mounts,omitempty"` Profiles []string `yaml:"Profiles,omitempty"` StoragePool string `yaml:"Storage pool,omitempty"` StorageSize string `yaml:"Storage pool size,omitempty"` Network string `yaml:"Network name,omitempty"` Config map[string]string `yaml:"Config,omitempty"` }{ c.InstanceArgs.Name, c.Project, c.InstanceArgs.Type, c.SourcePath, c.SourceFormat, c.Mounts, c.InstanceArgs.Profiles, "", "", "", c.InstanceArgs.Config, } disk, ok := c.InstanceArgs.Devices["root"] if ok { data.StoragePool = disk["pool"] size, ok := disk["size"] if ok { data.StorageSize = size } } network, ok := c.InstanceArgs.Devices["eth0"] if ok { data.Network = network["parent"] } out, err := yaml.Marshal(&data) if err != nil { return "" } return string(out) } func (c *cmdMigrate) askServer() (incus.InstanceServer, string, error) { // Detect local server. local, err := c.connectLocal() if err == nil { useLocal, err := c.global.asker.AskBool("The local Incus server is the target [default=yes]: ", "yes") if err != nil { return nil, "", err } if useLocal { return local, "", nil } } // Server address serverURL, err := c.global.asker.AskString("Please provide Incus server URL: ", "", nil) if err != nil { return nil, "", err } serverURL, err = parseURL(serverURL) if err != nil { return nil, "", err } args := incus.ConnectionArgs{ UserAgent: fmt.Sprintf("LXC-MIGRATE %s", version.Version), } // Attempt to connect server, err := incus.ConnectIncus(serverURL, &args) if err != nil { // Failed to connect using the system CA, so retrieve the remote certificate. certificate, err := localtls.GetRemoteCertificate(serverURL, args.UserAgent) if err != nil { return nil, "", fmt.Errorf("Failed to get remote certificate: %w", err) } digest := localtls.CertFingerprint(certificate) fmt.Println("Certificate fingerprint:", digest) fmt.Print("ok (y/n)? ") buf := bufio.NewReader(os.Stdin) line, _, err := buf.ReadLine() if err != nil { return nil, "", err } if len(line) < 1 || line[0] != 'y' && line[0] != 'Y' { return nil, "", fmt.Errorf("Server certificate rejected by user") } args.InsecureSkipVerify = true server, err = incus.ConnectIncus(serverURL, &args) if err != nil { return nil, "", fmt.Errorf("Failed to connect to server: %w", err) } } apiServer, _, err := server.GetServer() if err != nil { return nil, "", fmt.Errorf("Failed to get server: %w", err) } fmt.Println("") type AuthMethod int const ( authMethodTLSCertificate AuthMethod = iota authMethodTLSTemporaryCertificate authMethodTLSCertificateToken ) // TLS is always available var availableAuthMethods []AuthMethod var authMethod AuthMethod i := 1 if slices.Contains(apiServer.AuthMethods, api.AuthenticationMethodTLS) { fmt.Printf("%d) Use a certificate token\n", i) availableAuthMethods = append(availableAuthMethods, authMethodTLSCertificateToken) i++ fmt.Printf("%d) Use an existing TLS authentication certificate\n", i) availableAuthMethods = append(availableAuthMethods, authMethodTLSCertificate) i++ fmt.Printf("%d) Generate a temporary TLS authentication certificate\n", i) availableAuthMethods = append(availableAuthMethods, authMethodTLSTemporaryCertificate) } if len(apiServer.AuthMethods) > 1 || slices.Contains(apiServer.AuthMethods, api.AuthenticationMethodTLS) { authMethodInt, err := c.global.asker.AskInt("Please pick an authentication mechanism above: ", 1, int64(i), "", nil) if err != nil { return nil, "", err } authMethod = availableAuthMethods[authMethodInt-1] } var certPath string var keyPath string var token string if authMethod == authMethodTLSCertificate { certPath, err = c.global.asker.AskString("Please provide the certificate path: ", "", func(path string) error { if !util.PathExists(path) { return errors.New("File does not exist") } return nil }) if err != nil { return nil, "", err } keyPath, err = c.global.asker.AskString("Please provide the keyfile path: ", "", func(path string) error { if !util.PathExists(path) { return errors.New("File does not exist") } return nil }) if err != nil { return nil, "", err } } else if authMethod == authMethodTLSCertificateToken { token, err = c.global.asker.AskString("Please provide the certificate token: ", "", func(token string) error { _, err := localtls.CertificateTokenDecode(token) if err != nil { return err } return nil }) if err != nil { return nil, "", err } } var authType string switch authMethod { case authMethodTLSCertificate, authMethodTLSTemporaryCertificate, authMethodTLSCertificateToken: authType = api.AuthenticationMethodTLS } return c.connectTarget(serverURL, certPath, keyPath, authType, token) } func (c *cmdMigrate) RunInteractive(server incus.InstanceServer) (cmdMigrateData, error) { var err error config := cmdMigrateData{} config.InstanceArgs = api.InstancesPost{ Source: api.InstanceSource{ Type: "migration", Mode: "push", }, } config.InstanceArgs.Config = map[string]string{} config.InstanceArgs.Devices = map[string]map[string]string{} // Provide instance type instanceType, err := c.global.asker.AskInt("Would you like to create a container (1) or virtual-machine (2)?: ", 1, 2, "1", nil) if err != nil { return cmdMigrateData{}, err } if instanceType == 1 { config.InstanceArgs.Type = api.InstanceTypeContainer } else if instanceType == 2 { config.InstanceArgs.Type = api.InstanceTypeVM } // Project projectNames, err := server.GetProjectNames() if err != nil { return cmdMigrateData{}, err } if len(projectNames) > 1 { project, err := c.global.asker.AskChoice("Project to create the instance in [default=default]: ", projectNames, api.ProjectDefaultName) if err != nil { return cmdMigrateData{}, err } config.Project = project server = server.UseProject(config.Project) } else { config.Project = api.ProjectDefaultName } // Instance name instanceNames, err := server.GetInstanceNames(api.InstanceTypeAny) if err != nil { return cmdMigrateData{}, err } for { instanceName, err := c.global.asker.AskString("Name of the new instance: ", "", nil) if err != nil { return cmdMigrateData{}, err } if slices.Contains(instanceNames, instanceName) { fmt.Printf("Instance %q already exists\n", instanceName) continue } config.InstanceArgs.Name = instanceName break } var question string // Provide source path if config.InstanceArgs.Type == api.InstanceTypeVM { question = "Please provide the path to a disk, partition, or qcow2/raw/vmdk image file: " } else { question = "Please provide the path to a root filesystem: " } config.SourcePath, err = c.global.asker.AskString(question, "", func(s string) error { if !util.PathExists(s) { return errors.New("Path does not exist") } _, err := os.Stat(s) if err != nil { return err } // When migrating a VM, report the detected source format if config.InstanceArgs.Type == api.InstanceTypeVM { if linux.IsBlockdevPath(s) { config.SourceFormat = "Block device" } else if _, ext, _, _ := archive.DetectCompression(s); ext == ".qcow2" { config.SourceFormat = "qcow2" } else if _, ext, _, _ := archive.DetectCompression(s); ext == ".vmdk" { config.SourceFormat = "vmdk" } else { // If the input isn't a block device or qcow2/vmdk image, assume it's raw. // Positively identifying a raw image depends on parsing MBR/GPT partition tables. config.SourceFormat = "raw" } } return nil }) if err != nil { return cmdMigrateData{}, err } if config.InstanceArgs.Type == api.InstanceTypeVM { architectureName, _ := osarch.ArchitectureGetLocal() if slices.Contains([]string{"x86_64", "aarch64"}, architectureName) { hasUEFI, err := c.global.asker.AskBool("Does the VM support UEFI booting? [default=yes]: ", "yes") if err != nil { return cmdMigrateData{}, err } if hasUEFI { hasSecureBoot, err := c.global.asker.AskBool("Does the VM support UEFI Secure Boot? [default=yes]: ", "yes") if err != nil { return cmdMigrateData{}, err } if !hasSecureBoot { config.InstanceArgs.Config["security.secureboot"] = "false" } } else { config.InstanceArgs.Config["security.csm"] = "true" config.InstanceArgs.Config["security.secureboot"] = "false" } } } var mounts []string // Additional mounts for containers if config.InstanceArgs.Type == api.InstanceTypeContainer { addMounts, err := c.global.asker.AskBool("Do you want to add additional filesystem mounts? [default=no]: ", "no") if err != nil { return cmdMigrateData{}, err } if addMounts { for { path, err := c.global.asker.AskString("Please provide a path the filesystem mount path [empty value to continue]: ", "", func(s string) error { if s != "" { if util.PathExists(s) { return nil } return errors.New("Path does not exist") } return nil }) if err != nil { return cmdMigrateData{}, err } if path == "" { break } mounts = append(mounts, path) } config.Mounts = append(config.Mounts, mounts...) } } for { fmt.Println("\nInstance to be created:") scanner := bufio.NewScanner(strings.NewReader(config.Render())) for scanner.Scan() { fmt.Printf(" %s\n", scanner.Text()) } fmt.Print(` Additional overrides can be applied at this stage: 1) Begin the migration with the above configuration 2) Override profile list 3) Set additional configuration options 4) Change instance storage pool or volume size 5) Change instance network `) choice, err := c.global.asker.AskInt("Please pick one of the options above [default=1]: ", 1, 5, "1", nil) if err != nil { return cmdMigrateData{}, err } switch choice { case 1: return config, nil case 2: err = c.askProfiles(server, &config) case 3: err = c.askConfig(&config) case 4: err = c.askStorage(server, &config) case 5: err = c.askNetwork(server, &config) } if err != nil { fmt.Println(err) } } } func (c *cmdMigrate) Run(cmd *cobra.Command, args []string) error { // Quick checks. if os.Geteuid() != 0 { return fmt.Errorf("This tool must be run as root") } _, err := exec.LookPath("rsync") if err != nil { return err } // Server server, clientFingerprint, err := c.askServer() if err != nil { return err } sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, os.Interrupt) ctx, cancel := context.WithCancel(context.Background()) go func() { <-sigChan if clientFingerprint != "" { _ = server.DeleteCertificate(clientFingerprint) } cancel() // The following nolint directive ignores the "deep-exit" rule of the revive linter. // We should be exiting cleanly by passing the above context into each invoked method and checking for // cancellation. Unfortunately our client methods do not accept a context argument. os.Exit(1) //nolint:revive }() if clientFingerprint != "" { defer func() { _ = server.DeleteCertificate(clientFingerprint) }() } config, err := c.RunInteractive(server) if err != nil { return err } if config.Project != "" { server = server.UseProject(config.Project) } config.Mounts = append(config.Mounts, config.SourcePath) // Get and sort the mounts sort.Strings(config.Mounts) // Create the mount namespace and ensure we're not moved around runtime.LockOSThread() defer runtime.UnlockOSThread() // Unshare a new mntns so our mounts don't leak err = unix.Unshare(unix.CLONE_NEWNS) if err != nil { return fmt.Errorf("Failed to unshare mount namespace: %w", err) } // Prevent mount propagation back to initial namespace err = unix.Mount("", "/", "", unix.MS_REC|unix.MS_PRIVATE, "") if err != nil { return fmt.Errorf("Failed to disable mount propagation: %w", err) } // Create the temporary directory to be used for the mounts path, err := os.MkdirTemp("", "incus-migrate_mount_") if err != nil { return err } // Automatically clean-up the temporary path on exit defer func(path string) { // Unmount the path if it's a mountpoint. _ = unix.Unmount(path, unix.MNT_DETACH) _ = unix.Unmount(filepath.Join(path, "root.img"), unix.MNT_DETACH) // Cleanup VM image files. _ = os.Remove(filepath.Join(path, "converted-raw-image.img")) _ = os.Remove(filepath.Join(path, "root.img")) // Remove the directory itself. _ = os.Remove(path) }(path) var fullPath string if config.InstanceArgs.Type == api.InstanceTypeContainer { // Create the rootfs directory fullPath = fmt.Sprintf("%s/rootfs", path) err = os.Mkdir(fullPath, 0o755) if err != nil { return err } // Setup the source (mounts) err = setupSource(fullPath, config.Mounts) if err != nil { return fmt.Errorf("Failed to setup the source: %w", err) } } else { _, ext, convCmd, _ := archive.DetectCompression(config.SourcePath) if ext == ".qcow2" || ext == ".vmdk" { destImg := filepath.Join(path, "converted-raw-image.img") cmd := []string{ "nice", "-n19", // Run with low priority to reduce CPU impact on other processes. } cmd = append(cmd, convCmd...) cmd = append(cmd, "-p", "-t", "writeback") // Check for Direct I/O support. from, err := os.OpenFile(config.SourcePath, unix.O_DIRECT|unix.O_RDONLY, 0) if err == nil { cmd = append(cmd, "-T", "none") _ = from.Close() } to, err := os.OpenFile(destImg, unix.O_DIRECT|unix.O_RDONLY, 0) if err == nil { cmd = append(cmd, "-t", "none") _ = to.Close() } cmd = append(cmd, config.SourcePath, destImg) fmt.Printf("Converting image %q to raw format before importing\n", config.SourcePath) c := exec.Command(cmd[0], cmd[1:]...) err = c.Run() if err != nil { return fmt.Errorf("Failed to convert image %q for importing: %w", config.SourcePath, err) } config.SourcePath = destImg } fullPath = path target := filepath.Join(path, "root.img") err := os.WriteFile(target, nil, 0o644) if err != nil { return fmt.Errorf("Failed to create %q: %w", target, err) } // Mount the path err = unix.Mount(config.SourcePath, target, "none", unix.MS_BIND, "") if err != nil { return fmt.Errorf("Failed to mount %s: %w", config.SourcePath, err) } // Make it read-only err = unix.Mount("", target, "none", unix.MS_BIND|unix.MS_RDONLY|unix.MS_REMOUNT, "") if err != nil { return fmt.Errorf("Failed to make %s read-only: %w", config.SourcePath, err) } } // System architecture architectureName, err := osarch.ArchitectureGetLocal() if err != nil { return err } config.InstanceArgs.Architecture = architectureName revert := revert.New() defer revert.Fail() // Create the instance op, err := server.CreateInstance(config.InstanceArgs) if err != nil { return err } revert.Add(func() { _, _ = server.DeleteInstance(config.InstanceArgs.Name) }) progress := cli.ProgressRenderer{Format: "Transferring instance: %s"} _, err = op.AddHandler(progress.UpdateOp) if err != nil { progress.Done("") return err } err = transferRootfs(ctx, server, op, fullPath, c.flagRsyncArgs, config.InstanceArgs.Type) if err != nil { return err } progress.Done(fmt.Sprintf("Instance %s successfully created", config.InstanceArgs.Name)) revert.Success() return nil } func (c *cmdMigrate) askProfiles(server incus.InstanceServer, config *cmdMigrateData) error { profileNames, err := server.GetProfileNames() if err != nil { return err } profiles, err := c.global.asker.AskString("Which profiles do you want to apply to the instance? (space separated) [default=default, \"-\" for none]: ", "default", func(s string) error { // This indicates that no profiles should be applied. if s == "-" { return nil } profiles := strings.Split(s, " ") for _, profile := range profiles { if !slices.Contains(profileNames, profile) { return fmt.Errorf("Unknown profile %q", profile) } } return nil }) if err != nil { return err } if profiles != "-" { config.InstanceArgs.Profiles = strings.Split(profiles, " ") } return nil } func (c *cmdMigrate) askConfig(config *cmdMigrateData) error { configs, err := c.global.asker.AskString("Please specify config keys and values (key=value ...): ", "", func(s string) error { if s == "" { return nil } for _, entry := range strings.Split(s, " ") { if !strings.Contains(entry, "=") { return fmt.Errorf("Bad key=value configuration: %v", entry) } } return nil }) if err != nil { return err } for _, entry := range strings.Split(configs, " ") { key, value, _ := strings.Cut(entry, "=") config.InstanceArgs.Config[key] = value } return nil } func (c *cmdMigrate) askStorage(server incus.InstanceServer, config *cmdMigrateData) error { storagePools, err := server.GetStoragePoolNames() if err != nil { return err } if len(storagePools) == 0 { return fmt.Errorf("No storage pools available") } storagePool, err := c.global.asker.AskChoice("Please provide the storage pool to use: ", storagePools, "") if err != nil { return err } config.InstanceArgs.Devices["root"] = map[string]string{ "type": "disk", "pool": storagePool, "path": "/", } changeStorageSize, err := c.global.asker.AskBool("Do you want to change the storage size? [default=no]: ", "no") if err != nil { return err } if changeStorageSize { size, err := c.global.asker.AskString("Please specify the storage size: ", "", func(s string) error { _, err := units.ParseByteSizeString(s) return err }) if err != nil { return err } config.InstanceArgs.Devices["root"]["size"] = size } return nil } func (c *cmdMigrate) askNetwork(server incus.InstanceServer, config *cmdMigrateData) error { networks, err := server.GetNetworkNames() if err != nil { return err } network, err := c.global.asker.AskChoice("Please specify the network to use for the instance: ", networks, "") if err != nil { return err } config.InstanceArgs.Devices["eth0"] = map[string]string{ "type": "nic", "nictype": "bridged", "parent": network, "name": "eth0", } return nil } incus-6.0.4/cmd/incus-migrate/main_netcat.go000066400000000000000000000022651477363751000210070ustar00rootroot00000000000000package main import ( "fmt" "io" "net" "os" "sync" "github.com/spf13/cobra" "github.com/lxc/incus/v6/internal/eagain" ) type cmdNetcat struct { global *cmdGlobal } func (c *cmdNetcat) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "netcat
" cmd.Short = "Sends stdin data to a unix socket" cmd.RunE = c.Run cmd.Hidden = true return cmd } func (c *cmdNetcat) Run(cmd *cobra.Command, args []string) error { // Help and usage if len(args) == 0 { _ = cmd.Help() return nil } // Handle mandatory arguments if len(args) != 1 { _ = cmd.Help() return fmt.Errorf("Missing required argument") } // Connect to the provided address uAddr, err := net.ResolveUnixAddr("unix", args[0]) if err != nil { return err } conn, err := net.DialUnix("unix", nil, uAddr) if err != nil { return err } // We'll wait until we're done reading from the socket wg := sync.WaitGroup{} wg.Add(1) go func() { _, err = io.Copy(eagain.Writer{Writer: os.Stdout}, eagain.Reader{Reader: conn}) _ = conn.Close() wg.Done() }() go func() { _, _ = io.Copy(eagain.Writer{Writer: conn}, eagain.Reader{Reader: os.Stdin}) }() // Wait wg.Wait() return err } incus-6.0.4/cmd/incus-migrate/transfer.go000066400000000000000000000060601477363751000203460ustar00rootroot00000000000000package main import ( "context" "fmt" "io" "net" "os" "os/exec" "strings" "github.com/google/uuid" "github.com/gorilla/websocket" "github.com/lxc/incus/v6/internal/linux" "github.com/lxc/incus/v6/internal/migration" "github.com/lxc/incus/v6/internal/rsync" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/util" "github.com/lxc/incus/v6/shared/ws" ) // Send an rsync stream of a path over a websocket. func rsyncSend(ctx context.Context, conn *websocket.Conn, path string, rsyncArgs string, instanceType api.InstanceType) error { cmd, dataSocket, stderr, err := rsyncSendSetup(ctx, path, rsyncArgs, instanceType) if err != nil { return err } if dataSocket != nil { defer func() { _ = dataSocket.Close() }() } readDone, writeDone := ws.Mirror(conn, dataSocket) <-writeDone _ = dataSocket.Close() output, err := io.ReadAll(stderr) if err != nil { _ = cmd.Process.Kill() _ = cmd.Wait() return fmt.Errorf("Failed to rsync: %v\n%s", err, output) } err = cmd.Wait() <-readDone if err != nil { return fmt.Errorf("Failed to rsync: %v\n%s", err, output) } return nil } // Spawn the rsync process. func rsyncSendSetup(ctx context.Context, path string, rsyncArgs string, instanceType api.InstanceType) (*exec.Cmd, net.Conn, io.ReadCloser, error) { auds := fmt.Sprintf("@incus-migrate/%s", uuid.New().String()) if len(auds) > linux.ABSTRACT_UNIX_SOCK_LEN-1 { auds = auds[:linux.ABSTRACT_UNIX_SOCK_LEN-1] } l, err := net.Listen("unix", auds) if err != nil { return nil, nil, nil, err } execPath, err := os.Readlink("/proc/self/exe") if err != nil { return nil, nil, nil, err } if !util.PathExists(execPath) { execPath = os.Args[0] } rsyncCmd := fmt.Sprintf("sh -c \"%s netcat %s\"", execPath, auds) args := []string{ "-ar", "--devices", "--numeric-ids", "--partial", "--sparse", } if instanceType == api.InstanceTypeContainer { args = append(args, "--xattrs", "--delete", "--compress", "--compress-level=2") } if instanceType == api.InstanceTypeVM { args = append(args, "--exclude", "*.img") } if rsync.AtLeast("3.1.3") { args = append(args, "--filter=-x security.selinux") } if rsync.AtLeast("3.1.0") { args = append(args, "--ignore-missing-args") } if rsyncArgs != "" { args = append(args, strings.Split(rsyncArgs, " ")...) } args = append(args, []string{path, "localhost:/tmp/foo"}...) args = append(args, []string{"-e", rsyncCmd}...) cmd := exec.CommandContext(ctx, "rsync", args...) cmd.Stdout = os.Stderr stderr, err := cmd.StderrPipe() if err != nil { return nil, nil, nil, err } err = cmd.Start() if err != nil { return nil, nil, nil, err } conn, err := l.Accept() if err != nil { _ = cmd.Process.Kill() _ = cmd.Wait() return nil, nil, nil, err } _ = l.Close() return cmd, conn, stderr, nil } func protoSendError(ws *websocket.Conn, err error) { migration.ProtoSendControl(ws, err) if err != nil { closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") _ = ws.WriteMessage(websocket.CloseMessage, closeMsg) _ = ws.Close() } } incus-6.0.4/cmd/incus-migrate/utils.go000066400000000000000000000175311477363751000176670ustar00rootroot00000000000000package main import ( "bufio" "context" "crypto/x509" "encoding/pem" "fmt" "io" "net/url" "os" "path/filepath" "reflect" "strings" "golang.org/x/sys/unix" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/migration" "github.com/lxc/incus/v6/internal/ports" internalUtil "github.com/lxc/incus/v6/internal/util" "github.com/lxc/incus/v6/internal/version" "github.com/lxc/incus/v6/shared/api" localtls "github.com/lxc/incus/v6/shared/tls" "github.com/lxc/incus/v6/shared/ws" ) func transferRootfs(ctx context.Context, dst incus.InstanceServer, op incus.Operation, rootfs string, rsyncArgs string, instanceType api.InstanceType) error { opAPI := op.Get() // Connect to the websockets wsControl, err := op.GetWebsocket(opAPI.Metadata[api.SecretNameControl].(string)) if err != nil { return err } abort := func(err error) error { protoSendError(wsControl, err) return err } wsFs, err := op.GetWebsocket(opAPI.Metadata[api.SecretNameFilesystem].(string)) if err != nil { return abort(err) } // Setup control struct var fs migration.MigrationFSType var rsyncHasFeature bool if instanceType == api.InstanceTypeVM { fs = migration.MigrationFSType_BLOCK_AND_RSYNC rsyncHasFeature = false } else { fs = migration.MigrationFSType_RSYNC rsyncHasFeature = true } offerHeader := migration.MigrationHeader{ RsyncFeatures: &migration.RsyncFeatures{ Xattrs: &rsyncHasFeature, Delete: &rsyncHasFeature, Compress: &rsyncHasFeature, }, Fs: &fs, } if instanceType == api.InstanceTypeVM { stat, err := os.Stat(filepath.Join(rootfs, "root.img")) if err != nil { return abort(err) } size := stat.Size() offerHeader.VolumeSize = &size rootfs = internalUtil.AddSlash(rootfs) } err = migration.ProtoSend(wsControl, &offerHeader) if err != nil { return abort(err) } var respHeader migration.MigrationHeader err = migration.ProtoRecv(wsControl, &respHeader) if err != nil { return abort(err) } rsyncFeaturesOffered := offerHeader.GetRsyncFeaturesSlice() rsyncFeaturesResponse := respHeader.GetRsyncFeaturesSlice() if !reflect.DeepEqual(rsyncFeaturesOffered, rsyncFeaturesResponse) { return abort(fmt.Errorf("Offered rsync features (%v) differ from those in the migration response (%v)", rsyncFeaturesOffered, rsyncFeaturesResponse)) } // Send the filesystem err = rsyncSend(ctx, wsFs, rootfs, rsyncArgs, instanceType) if err != nil { return abort(fmt.Errorf("Failed sending filesystem volume: %w", err)) } // Send block volume if instanceType == api.InstanceTypeVM { f, err := os.Open(filepath.Join(rootfs, "root.img")) if err != nil { return abort(err) } defer func() { _ = f.Close() }() conn := ws.NewWrapper(wsFs) go func() { <-ctx.Done() _ = conn.Close() _ = f.Close() }() _, err = io.Copy(conn, f) if err != nil { return abort(fmt.Errorf("Failed sending block volume: %w", err)) } err = conn.Close() if err != nil { return abort(err) } } // Check the result msg := migration.MigrationControl{} err = migration.ProtoRecv(wsControl, &msg) if err != nil { _ = wsControl.Close() return err } if !msg.GetSuccess() { return fmt.Errorf(msg.GetMessage()) } return nil } func (m *cmdMigrate) connectLocal() (incus.InstanceServer, error) { args := incus.ConnectionArgs{} args.UserAgent = fmt.Sprintf("LXC-MIGRATE %s", version.Version) return incus.ConnectIncusUnix("", &args) } func (m *cmdMigrate) connectTarget(url string, certPath string, keyPath string, authType string, token string) (incus.InstanceServer, string, error) { args := incus.ConnectionArgs{ AuthType: authType, } clientFingerprint := "" if authType == api.AuthenticationMethodTLS { var clientCrt []byte var clientKey []byte // Generate a new client certificate for this if certPath == "" || keyPath == "" { var err error clientCrt, clientKey, err = localtls.GenerateMemCert(true, false) if err != nil { return nil, "", err } clientFingerprint, err = localtls.CertFingerprintStr(string(clientCrt)) if err != nil { return nil, "", err } // When using certificate add tokens, there's no need to show the temporary certificate. if token == "" { fmt.Printf("\nYour temporary certificate is:\n%s\n", string(clientCrt)) } } else { var err error clientCrt, err = os.ReadFile(certPath) if err != nil { return nil, "", fmt.Errorf("Failed to read client certificate: %w", err) } clientKey, err = os.ReadFile(keyPath) if err != nil { return nil, "", fmt.Errorf("Failed to read client key: %w", err) } } args.TLSClientCert = string(clientCrt) args.TLSClientKey = string(clientKey) } // Attempt to connect using the system CA args.UserAgent = fmt.Sprintf("LXC-MIGRATE %s", version.Version) c, err := incus.ConnectIncus(url, &args) var certificate *x509.Certificate if err != nil { // Failed to connect using the system CA, so retrieve the remote certificate certificate, err = localtls.GetRemoteCertificate(url, args.UserAgent) if err != nil { return nil, "", err } } // Handle certificate prompt if certificate != nil { serverCrt := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certificate.Raw}) args.TLSServerCert = string(serverCrt) // Setup a new connection, this time with the remote certificate c, err = incus.ConnectIncus(url, &args) if err != nil { return nil, "", err } } // Get server information srv, _, err := c.GetServer() if err != nil { return nil, "", err } // Check if our cert is already trusted if srv.Auth == "trusted" { fmt.Printf("\nRemote server:\n Hostname: %s\n Version: %s\n\n", srv.Environment.ServerName, srv.Environment.ServerVersion) return c, "", nil } if authType == api.AuthenticationMethodTLS { if token != "" { req := api.CertificatesPost{ TrustToken: token, } err = c.CreateCertificate(req) if err != nil { return nil, "", fmt.Errorf("Failed to create certificate: %w", err) } } else { fmt.Println("A temporary client certificate was generated, use `incus config trust add` on the target server.") fmt.Println("") fmt.Print("Press ENTER after the certificate was added to the remote server: ") _, err = bufio.NewReader(os.Stdin).ReadString('\n') if err != nil { return nil, "", err } } } else { c.RequireAuthenticated(true) } // Get full server information srv, _, err = c.GetServer() if err != nil { if clientFingerprint != "" { _ = c.DeleteCertificate(clientFingerprint) } return nil, "", err } if srv.Auth == "untrusted" { return nil, "", fmt.Errorf("Server doesn't trust us after authentication") } fmt.Printf("\nRemote server:\n Hostname: %s\n Version: %s\n\n", srv.Environment.ServerName, srv.Environment.ServerVersion) return c, clientFingerprint, nil } func setupSource(path string, mounts []string) error { prefix := "/" if len(mounts) > 0 { prefix = mounts[0] } // Mount everything for _, mount := range mounts { target := fmt.Sprintf("%s/%s", path, strings.TrimPrefix(mount, prefix)) // Mount the path err := unix.Mount(mount, target, "none", unix.MS_BIND, "") if err != nil { return fmt.Errorf("Failed to mount %s: %w", mount, err) } // Make it read-only err = unix.Mount("", target, "none", unix.MS_BIND|unix.MS_RDONLY|unix.MS_REMOUNT, "") if err != nil { return fmt.Errorf("Failed to make %s read-only: %w", mount, err) } } return nil } func parseURL(URL string) (string, error) { u, err := url.Parse(URL) if err != nil { return "", err } // Create a URL with scheme and hostname since it wasn't provided if u.Scheme == "" && u.Host == "" && u.Path != "" { u, err = url.Parse(fmt.Sprintf("https://%s", u.Path)) if err != nil { return "", err } } // If no port was provided, use default port if u.Port() == "" { u.Host = fmt.Sprintf("%s:%d", u.Hostname(), ports.HTTPSDefaultPort) } return u.String(), nil } incus-6.0.4/cmd/incus-simplestreams/000077500000000000000000000000001477363751000174315ustar00rootroot00000000000000incus-6.0.4/cmd/incus-simplestreams/main.go000066400000000000000000000042321477363751000207050ustar00rootroot00000000000000package main import ( "fmt" "os" "github.com/spf13/cobra" "github.com/lxc/incus/v6/internal/version" ) type cmdGlobal struct { flagHelp bool flagVersion bool } func main() { app := &cobra.Command{} app.Use = "incus-simplestreams" app.Short = "Maintain and Incus-compatible simplestreams tree" app.Long = `Description: Maintain an Incus-compatible simplestreams tree This tool makes it easy to manage the files on a static image server using simplestreams index files as the publishing mechanism. ` app.SilenceUsage = true app.CompletionOptions = cobra.CompletionOptions{DisableDefaultCmd: true} // Global flags. globalCmd := cmdGlobal{} app.PersistentFlags().BoolVar(&globalCmd.flagVersion, "version", false, "Print version number") app.PersistentFlags().BoolVarP(&globalCmd.flagHelp, "help", "h", false, "Print help") // Help handling. app.SetHelpCommand(&cobra.Command{ Use: "no-help", Hidden: true, }) // Version handling. app.SetVersionTemplate("{{.Version}}\n") app.Version = version.Version // add sub-command. addCmd := cmdAdd{global: &globalCmd} app.AddCommand(addCmd.Command()) // generate-metadata sub-command. generateMetadataCmd := cmdGenerateMetadata{global: &globalCmd} app.AddCommand(generateMetadataCmd.Command()) // list sub-command. listCmd := cmdList{global: &globalCmd} app.AddCommand(listCmd.Command()) // remove sub-command. removeCmd := cmdRemove{global: &globalCmd} app.AddCommand(removeCmd.Command()) // verify sub-command. verifyCmd := cmdVerify{global: &globalCmd} app.AddCommand(verifyCmd.Command()) pruneCmd := cmdPrune{global: &globalCmd} app.AddCommand(pruneCmd.Command()) // Run the main command and handle errors. err := app.Execute() if err != nil { os.Exit(1) } } // CheckArgs validates the number of arguments passed to the function and shows the help if incorrect. func (c *cmdGlobal) CheckArgs(cmd *cobra.Command, args []string, minArgs int, maxArgs int) (bool, error) { if len(args) < minArgs || (maxArgs != -1 && len(args) > maxArgs) { _ = cmd.Help() if len(args) == 0 { return true, nil } return true, fmt.Errorf("Invalid number of arguments") } return false, nil } incus-6.0.4/cmd/incus-simplestreams/main_add.go000066400000000000000000000237051477363751000215230ustar00rootroot00000000000000package main import ( "archive/tar" "context" "crypto/sha256" "encoding/json" "errors" "fmt" "io" "io/fs" "os" "strings" "time" "github.com/spf13/cobra" "gopkg.in/yaml.v2" cli "github.com/lxc/incus/v6/internal/cmd" internalUtil "github.com/lxc/incus/v6/internal/util" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/archive" "github.com/lxc/incus/v6/shared/osarch" "github.com/lxc/incus/v6/shared/simplestreams" ) type cmdAdd struct { global *cmdGlobal flagAliases []string flagNoDefaultAlias bool } // Command generates the command definition. func (c *cmdAdd) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "add []" cmd.Short = "Add an image" cmd.Long = cli.FormatSection("Description", `Add an image to the server This command parses the metadata tarball to retrieve the following fields from its metadata.yaml: - architecture - creation_date - properties["description"] - properties["os"] - properties["release"] - properties["variant"] - properties["architecture"] It then check computes the hash for the new image, confirm it's not already on the image server and finally adds it to the index. Unless "--no-default-alias" is specified, it generates a default "{os}/{release}/{variant}" alias. If one argument is specified, it is assumed to be a unified image, with both the metadata and rootfs in a single tarball. Otherwise, it is a split image (separate files for metadata and rootfs/disk). `) cmd.RunE = c.Run cmd.Flags().StringArrayVar(&c.flagAliases, "alias", nil, "Add alias") cmd.Flags().BoolVar(&c.flagNoDefaultAlias, "no-default-alias", false, "Do not add the default alias") return cmd } // dataItem - holds information about the image data file. // used if different from the metadata file. type dataItem struct { Path string FileType string Size int64 Sha256 string Extension string combinedSha256 string } // parseImage parses the metadata and data, filling the dataItem struct. func (c *cmdAdd) parseImage(metaFile *os.File, dataFile *os.File) (*dataItem, error) { item := dataItem{ Path: dataFile.Name(), } // Read the header. _, extension, _, err := archive.DetectCompressionFile(dataFile) if err != nil { return nil, err } item.Extension = extension if item.Extension == ".squashfs" { item.FileType = "squashfs" } else if item.Extension == ".qcow2" { item.FileType = "disk-kvm.img" } else { return nil, fmt.Errorf("Unsupported data type %q", item.Extension) } // Get the size. dataStat, err := dataFile.Stat() if err != nil { return nil, err } item.Size = dataStat.Size() // Get the sha256. _, err = dataFile.Seek(0, 0) if err != nil { return nil, err } hash256 := sha256.New() _, err = io.Copy(hash256, dataFile) if err != nil { return nil, err } item.Sha256 = fmt.Sprintf("%x", hash256.Sum(nil)) // Get the combined sha256. _, err = metaFile.Seek(0, 0) if err != nil { return nil, err } _, err = dataFile.Seek(0, 0) if err != nil { return nil, err } hash256 = sha256.New() _, err = io.Copy(hash256, metaFile) if err != nil { return nil, err } _, err = io.Copy(hash256, dataFile) if err != nil { return nil, err } item.combinedSha256 = fmt.Sprintf("%x", hash256.Sum(nil)) return &item, nil } // Run runs the actual command logic. func (c *cmdAdd) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 2) if exit { return err } isUnifiedTarball := (len(args) == 1) // Open the metadata. metaFile, err := os.Open(args[0]) if err != nil { return err } defer metaFile.Close() // Read the header. _, _, unpacker, err := archive.DetectCompressionFile(metaFile) if err != nil { return err } // Get the size. metaStat, err := metaFile.Stat() if err != nil { return err } metaSize := metaStat.Size() // Get the sha256. _, err = metaFile.Seek(0, 0) if err != nil { return err } hash256 := sha256.New() _, err = io.Copy(hash256, metaFile) if err != nil { return err } metaSha256 := fmt.Sprintf("%x", hash256.Sum(nil)) // Set the metadata paths. metaPath := args[0] // Go through the tarball. _, err = metaFile.Seek(0, 0) if err != nil { return err } metaTar, metaTarCancel, err := archive.CompressedTarReader(context.Background(), metaFile, unpacker, "") if err != nil { return err } defer metaTarCancel() var hdr *tar.Header for { hdr, err = metaTar.Next() if err != nil { if err == io.EOF { break } return err } if hdr.Name == "metadata.yaml" { break } } if hdr == nil || hdr.Name != "metadata.yaml" { return fmt.Errorf("Couldn't find metadata.yaml in metadata tarball") } // Parse the metadata. metadata := api.ImageMetadata{} body, err := io.ReadAll(metaTar) if err != nil { return err } err = yaml.Unmarshal(body, &metadata) if err != nil { return err } // Validate the metadata. _, err = osarch.ArchitectureId(metadata.Architecture) if err != nil { return fmt.Errorf("Invalid architecture in metadata.yaml: %w", err) } if metadata.CreationDate == 0 { return fmt.Errorf("Missing creation date in metadata.yaml") } for _, prop := range []string{"os", "release", "variant", "architecture", "description"} { _, ok := metadata.Properties[prop] if !ok { return fmt.Errorf("Missing property %q in metadata.yaml", prop) } } var data *dataItem if !isUnifiedTarball { // Open the data. dataFile, err := os.Open(args[1]) if err != nil { return err } defer dataFile.Close() // Parse the content. data, err = c.parseImage(metaFile, dataFile) if err != nil { return err } } // Create the paths if missing. err = os.MkdirAll("images", 0o755) if err != nil && !os.IsExist(err) { return err } err = os.MkdirAll("streams/v1", 0o755) if err != nil && !os.IsExist(err) { return err } // Load the images file. products := simplestreams.Products{} body, err = os.ReadFile("streams/v1/images.json") if err != nil { if !errors.Is(err, fs.ErrNotExist) { return err } // Create a blank images file. products = simplestreams.Products{ ContentID: "images", DataType: "image-downloads", Format: "products:1.0", Products: map[string]simplestreams.Product{}, } } else { // Parse the existing images file. err = json.Unmarshal(body, &products) if err != nil { return err } } // Check if the product already exists. productName := fmt.Sprintf("%s:%s:%s:%s", metadata.Properties["os"], metadata.Properties["release"], metadata.Properties["variant"], metadata.Properties["architecture"]) product, ok := products.Products[productName] if !ok { var aliases []string if !c.flagNoDefaultAlias { // Generate a default alias aliases = append(aliases, fmt.Sprintf("%s/%s/%s", metadata.Properties["os"], metadata.Properties["release"], metadata.Properties["variant"])) } aliases = append(aliases, c.flagAliases...) // Create a new product. product = simplestreams.Product{ Aliases: strings.Join(aliases, ","), Architecture: metadata.Properties["architecture"], OperatingSystem: metadata.Properties["os"], Release: metadata.Properties["release"], ReleaseTitle: metadata.Properties["release"], Variant: metadata.Properties["variant"], Versions: map[string]simplestreams.ProductVersion{}, } } var fileType, fileKey, metaTargetPath string if !isUnifiedTarball { fileKey = "incus.tar.xz" fileType = "incus.tar.xz" metaTargetPath = fmt.Sprintf("images/%s.incus.tar.xz", metaSha256) } else { fileKey = "incus_combined.tar.gz" fileType = "incus_combined.tar.gz" metaTargetPath = fmt.Sprintf("images/%s.incus_combined.tar.gz", metaSha256) } // Check if a version already exists. versionName := time.Unix(metadata.CreationDate, 0).Format("200601021504") version, ok := product.Versions[versionName] if !ok { // Create a new version. version = simplestreams.ProductVersion{ Items: map[string]simplestreams.ProductVersionItem{ fileKey: { FileType: fileType, HashSha256: metaSha256, Size: metaSize, Path: metaTargetPath, }, }, } } else { // Check that we're dealing with the same metadata. _, ok := version.Items[fileKey] if !ok { // No fileKey found, add it. version.Items[fileKey] = simplestreams.ProductVersionItem{ FileType: fileType, HashSha256: metaSha256, Size: metaSize, Path: metaTargetPath, } } } // Copy the metadata file if missing. err = internalUtil.FileCopy(metaPath, metaTargetPath) if err != nil && !os.IsExist(err) { return err } if !isUnifiedTarball { // Check that the data file isn't already in. _, ok = version.Items[data.FileType] if ok { return fmt.Errorf("Already have a %q file for this image", data.FileType) } dataTargetPath := fmt.Sprintf("images/%s%s", metaSha256, data.Extension) // Add the file entry. version.Items[data.FileType] = simplestreams.ProductVersionItem{ FileType: data.FileType, HashSha256: data.Sha256, Size: data.Size, Path: dataTargetPath, } // Add the combined hash. metaItem := version.Items["incus.tar.xz"] if data.FileType == "squashfs" { metaItem.CombinedSha256SquashFs = data.combinedSha256 } else if data.FileType == "disk-kvm.img" { metaItem.CombinedSha256DiskKvmImg = data.combinedSha256 } version.Items["incus.tar.xz"] = metaItem // Copy the data file if missing. err = internalUtil.FileCopy(data.Path, dataTargetPath) if err != nil && !os.IsExist(err) { return err } } // Update the version. product.Versions[versionName] = version // Update the product. products.Products[productName] = product // Write back the images file. body, err = json.Marshal(&products) if err != nil { return err } err = os.WriteFile("streams/v1/images.json", body, 0o644) if err != nil { return err } // Re-generate the index. err = writeIndex(&products) if err != nil { return err } return nil } incus-6.0.4/cmd/incus-simplestreams/main_generate_metadata.go000066400000000000000000000073141477363751000244230ustar00rootroot00000000000000package main import ( "archive/tar" "bufio" "fmt" "io" "os" "os/exec" "time" "github.com/spf13/cobra" yaml "gopkg.in/yaml.v2" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/ask" "github.com/lxc/incus/v6/shared/osarch" ) type cmdGenerateMetadata struct { global *cmdGlobal } // Command generates the command definition. func (c *cmdGenerateMetadata) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "generate-metadata " cmd.Short = "Generate a metadata tarball" cmd.Long = cli.FormatSection("Description", `Generate a metadata tarball This command produces an incus.tar.xz tarball for use with an existing QCOW2 or squashfs disk image. This command will prompt for all of the metadata tarball fields: - Operating system name - Release - Variant - Architecture - Description `) cmd.RunE = c.Run return cmd } // Run runs the actual command logic. func (c *cmdGenerateMetadata) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Setup asker. asker := ask.NewAsker(bufio.NewReader(os.Stdin)) // Create the tarball. metaFile, err := os.Create(args[0]) if err != nil { return err } defer metaFile.Close() // Generate the metadata. timestamp := time.Now().UTC() metadata := api.ImageMetadata{ Properties: map[string]string{}, CreationDate: timestamp.Unix(), } // Question - os metaOS, err := asker.AskString("Operating system name: ", "", nil) if err != nil { return err } metadata.Properties["os"] = metaOS // Question - release metaRelease, err := asker.AskString("Release name: ", "", nil) if err != nil { return err } metadata.Properties["release"] = metaRelease // Question - variant metaVariant, err := asker.AskString("Variant name [default=\"default\"]: ", "default", nil) if err != nil { return err } metadata.Properties["variant"] = metaVariant // Question - architecture var incusArch string metaArchitecture, err := asker.AskString("Architecture name: ", "", func(value string) error { id, err := osarch.ArchitectureId(value) if err != nil { return err } incusArch, err = osarch.ArchitectureName(id) if err != nil { return err } return nil }) if err != nil { return err } metadata.Properties["architecture"] = metaArchitecture metadata.Architecture = incusArch // Question - description defaultDescription := fmt.Sprintf("%s %s (%s) (%s) (%s)", metaOS, metaRelease, metaVariant, metaArchitecture, timestamp.Format("200601021504")) metaDescription, err := asker.AskString(fmt.Sprintf("Description [default=\"%s\"]: ", defaultDescription), defaultDescription, nil) if err != nil { return err } metadata.Properties["description"] = metaDescription // Generate YAML. body, err := yaml.Marshal(&metadata) if err != nil { return err } // Prepare the tarball. tarPipeReader, tarPipeWriter := io.Pipe() tarWriter := tar.NewWriter(tarPipeWriter) // Compress the tarball. chDone := make(chan error) go func() { cmd := exec.Command("xz", "-9", "-c") cmd.Stdin = tarPipeReader cmd.Stdout = metaFile err := cmd.Run() chDone <- err }() // Add metadata.yaml. hdr := &tar.Header{ Name: "metadata.yaml", Size: int64(len(body)), Mode: 0o644, Uname: "root", Gname: "root", ModTime: time.Now(), } err = tarWriter.WriteHeader(hdr) if err != nil { return err } _, err = tarWriter.Write(body) if err != nil { return err } // Close the tarball. err = tarWriter.Close() if err != nil { return err } err = tarPipeWriter.Close() if err != nil { return err } err = <-chDone if err != nil { return err } return nil } incus-6.0.4/cmd/incus-simplestreams/main_list.go000066400000000000000000000035301477363751000217400ustar00rootroot00000000000000package main import ( "os" "sort" "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/shared/simplestreams" ) type cmdList struct { global *cmdGlobal flagFormat string } // Command generates the command definition. func (c *cmdList) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "list" cmd.Short = "List all images on the server" cmd.Long = cli.FormatSection("Description", `List all image on the server This renders a table with all images currently published on the server. `) cmd.RunE = c.Run cmd.Flags().StringVarP(&c.flagFormat, "format", "f", "table", `Format (csv|json|table|yaml|compact), use suffix ",noheader" to disable headers and ",header" to enable it if missing, e.g. csv,header`+"``") cmd.PreRunE = func(cmd *cobra.Command, _ []string) error { return cli.ValidateFlagFormatForListOutput(cmd.Flag("format").Value.String()) } return cmd } // Run runs the actual command logic. func (c *cmdList) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 0) if exit { return err } // Get a simplestreams client. ss := simplestreams.NewLocalClient("") // Get all the images. images, err := ss.ListImages() if err != nil { return err } // Generate the table. data := [][]string{} for _, image := range images { data = append(data, []string{image.Fingerprint, image.Properties["description"], image.Properties["os"], image.Properties["release"], image.Properties["variant"], image.Architecture, image.Type, image.CreatedAt.Format("2006/01/02 15:04 MST")}) } sort.Sort(cli.SortColumnsNaturally(data)) header := []string{ "FINGERPRINT", "DESCRIPTION", "OS", "RELEASE", "VARIANT", "ARCHITECTURE", "TYPE", "CREATED", } return cli.RenderTable(os.Stdout, c.flagFormat, header, data, images) } incus-6.0.4/cmd/incus-simplestreams/main_prune.go000066400000000000000000000114701477363751000221200ustar00rootroot00000000000000package main import ( "encoding/json" "errors" "fmt" "io/fs" "os" "path/filepath" "slices" "sort" "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/shared/simplestreams" ) type cmdPrune struct { global *cmdGlobal flagDryRun bool flagRetention int flagVerbose bool } // Command generates the command definition. func (c *cmdPrune) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "prune" cmd.Short = "Clean up obsolete files and data" cmd.Long = cli.FormatSection("Description", `Cleans up obsolete tarball files and removes outdated versions of a product The prune command scans the project directory for tarball files that do not have corresponding references in the 'images.json' file. Any tarball file that is not listed in images.json is considered orphaned and will be deleted. Additionally this command will delete older images, keeping a configurable number of older images per product.`) cmd.RunE = c.Run cmd.Flags().BoolVarP(&c.flagDryRun, "dry-run", "d", false, "Preview changes without executing actual operations") cmd.Flags().IntVarP(&c.flagRetention, "retention", "r", 2, "Number of older versions of the product to preserve"+"``") cmd.Flags().BoolVarP(&c.flagVerbose, "verbose", "v", false, "Show all information messages") return cmd } // Run runs the actual command logic. func (c *cmdPrune) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 0) if exit { return err } if c.flagDryRun { c.flagVerbose = true } err = c.prune() if err != nil { return err } return nil } func (c *cmdPrune) pruneFiles(products *simplestreams.Products, filesToPreserve []string) error { deletedFiles := []string{} err := filepath.WalkDir("./images", func(path string, d fs.DirEntry, err error) error { if err != nil { return err } // Omit the path if it is a directory or if it exists in the images.json file. if d.IsDir() || slices.Contains(filesToPreserve, path) { return nil } if c.flagVerbose { deletedFiles = append(deletedFiles, path) } if !c.flagDryRun { e := os.Remove(path) if e != nil { return e } } return nil }) if err != nil { return err } if c.flagVerbose && len(deletedFiles) > 0 { fmt.Printf("Following files were removed:\n") for _, file := range deletedFiles { fmt.Println(file) } } return nil } func (c *cmdPrune) prune() error { body, err := os.ReadFile("streams/v1/images.json") if err != nil { return err } products := simplestreams.Products{} err = json.Unmarshal(body, &products) if err != nil { return err } filesToPreserve := []string{} deletedItems := []string{} deletedVersions := []string{} for kProduct, product := range products.Products { versionNames := []string{} for kVersion, version := range product.Versions { for kItem, item := range version.Items { _, err := os.Stat(item.Path) if err != nil { if !errors.Is(err, os.ErrNotExist) { return err } if c.flagVerbose { deletedItems = append(deletedItems, fmt.Sprintf("%s:%s:%s", kProduct, kVersion, item.Path)) } // Corresponding file doesn't exist on disk. Remove item from products. delete(version.Items, kItem) } filesToPreserve = append(filesToPreserve, item.Path) } if len(version.Items) == 0 { delete(product.Versions, kVersion) continue } versionNames = append(versionNames, kVersion) } if len(product.Versions) == 0 { delete(products.Products, kProduct) continue } sort.Strings(versionNames) updatedVersions := map[string]simplestreams.ProductVersion{} iteration := 0 for i := len(versionNames) - 1; i >= 0; i-- { version := versionNames[i] if iteration <= c.flagRetention { updatedVersions[version] = product.Versions[version] } else if c.flagVerbose { deletedVersions = append(deletedVersions, fmt.Sprintf("%s:%s", kProduct, version)) } iteration += 1 } p := products.Products[kProduct] p.Versions = updatedVersions products.Products[kProduct] = p } if c.flagVerbose { if len(deletedItems) > 0 { fmt.Printf("Following items were removed from images.json:\n") for _, item := range deletedItems { fmt.Println(item) } } if len(deletedVersions) > 0 { fmt.Printf("Following versions were removed:\n") for _, version := range deletedVersions { fmt.Println(version) } } } if !c.flagDryRun { // Write back the images file. body, err = json.Marshal(&products) if err != nil { return err } err = os.WriteFile("streams/v1/images.json", body, 0o644) if err != nil { return err } // Re-generate the index. err = writeIndex(&products) if err != nil { return err } } err = c.pruneFiles(&products, filesToPreserve) if err != nil { return err } return nil } incus-6.0.4/cmd/incus-simplestreams/main_remove.go000066400000000000000000000072021477363751000222620ustar00rootroot00000000000000package main import ( "encoding/json" "errors" "fmt" "io/fs" "os" "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/shared/simplestreams" ) type cmdRemove struct { global *cmdGlobal flagVerbose bool } // Command generates the command definition. func (c *cmdRemove) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "remove " cmd.Short = "Remove an image" cmd.Long = cli.FormatSection("Description", `Remove an image from the server This command locates the image from its fingerprint and removes it from the index. `) cmd.RunE = c.Run cmd.Flags().BoolVarP(&c.flagVerbose, "verbose", "v", false, "Show all information messages") return cmd } func (c *cmdRemove) remove(path string) error { if c.flagVerbose { fmt.Printf("deleting: %s\n", path) } err := os.Remove(path) if err != nil && !errors.Is(err, fs.ErrNotExist) { return err } return nil } // Run runs the actual command logic. func (c *cmdRemove) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Get a simplestreams client. ss := simplestreams.NewLocalClient("") // Get the image. image, err := ss.GetImage(args[0]) if err != nil { return err } // Load the images file. body, err := os.ReadFile("streams/v1/images.json") if err != nil { return err } products := simplestreams.Products{} err = json.Unmarshal(body, &products) if err != nil { return err } // Delete the image entry. for kProduct, product := range products.Products { if product.OperatingSystem != image.Properties["os"] || product.Release != image.Properties["release"] || product.Variant != image.Properties["variant"] || product.Architecture != image.Properties["architecture"] { continue } for kVersion, version := range product.Versions { // Get the metadata entry. metaEntry, ok := version.Items["incus.tar.xz"] if ok { if metaEntry.CombinedSha256DiskKvmImg == image.Fingerprint { // Deleting a VM image. err = c.remove(version.Items["disk-kvm.img"].Path) if err != nil { return err } delete(version.Items, "disk-kvm.img") metaEntry.CombinedSha256DiskKvmImg = "" } else if metaEntry.CombinedSha256SquashFs == image.Fingerprint { // Deleting a container image. err = c.remove(version.Items["squashfs"].Path) if err != nil && !errors.Is(err, fs.ErrNotExist) { return err } delete(version.Items, "squashfs") metaEntry.CombinedSha256SquashFs = "" } else { continue } // Update the metadata entry. version.Items["incus.tar.xz"] = metaEntry // Delete the version if it's now empty. if len(version.Items) == 1 { err = c.remove(metaEntry.Path) if err != nil { return err } delete(product.Versions, kVersion) } } metaEntry, ok = version.Items["incus_combined.tar.gz"] if ok { if metaEntry.HashSha256 == image.Fingerprint { err = c.remove(metaEntry.Path) if err != nil { return err } delete(version.Items, "incus_combined.tar.gz") } // Delete the version if it's now empty. if len(version.Items) == 0 { delete(product.Versions, kVersion) } } } if len(product.Versions) == 0 { delete(products.Products, kProduct) } break } // Write back the images file. body, err = json.Marshal(&products) if err != nil { return err } err = os.WriteFile("streams/v1/images.json", body, 0o644) if err != nil { return err } // Re-generate the index. err = writeIndex(&products) if err != nil { return err } return nil } incus-6.0.4/cmd/incus-simplestreams/main_verify.go000066400000000000000000000044571477363751000223020ustar00rootroot00000000000000package main import ( "crypto/sha256" "encoding/json" "errors" "fmt" "io" "io/fs" "os" "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/shared/simplestreams" ) type cmdVerify struct { global *cmdGlobal } // Command generates the command definition. func (c *cmdVerify) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "verify" cmd.Short = "Verify the integrity of the server" cmd.Long = cli.FormatSection("Description", `Verify the integrity of the server This command will analyze the image index and for every image and file in the index, will validate that the files on disk exist and are of the correct size and content. `) cmd.RunE = c.Run return cmd } // Run runs the actual command logic. func (c *cmdVerify) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 0) if exit { return err } // Load the images file. products := simplestreams.Products{} body, err := os.ReadFile("streams/v1/images.json") if err != nil { if errors.Is(err, fs.ErrNotExist) { return nil } return err } // Parse the existing images file. err = json.Unmarshal(body, &products) if err != nil { return err } // Go over all the files. for _, product := range products.Products { for _, version := range product.Versions { for _, item := range version.Items { // Open the data. dataFile, err := os.Open(item.Path) if err != nil { if errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("Missing image file %q", item.Path) } return err } // Get the size. dataStat, err := dataFile.Stat() if err != nil { return err } if item.Size != dataStat.Size() { return fmt.Errorf("File %q has a different size than listed in the index", item.Path) } // Get the sha256. _, err = dataFile.Seek(0, 0) if err != nil { return err } hash256 := sha256.New() _, err = io.Copy(hash256, dataFile) if err != nil { return err } dataSha256 := fmt.Sprintf("%x", hash256.Sum(nil)) if item.HashSha256 != dataSha256 { return fmt.Errorf("File %q has a different SHA256 hash than listed in the index", item.Path) } // Done with this file. dataFile.Close() } } } return nil } incus-6.0.4/cmd/incus-simplestreams/utils.go000066400000000000000000000014371477363751000211250ustar00rootroot00000000000000package main import ( "encoding/json" "os" "github.com/lxc/incus/v6/shared/simplestreams" ) func writeIndex(products *simplestreams.Products) error { // Update the product list. productNames := make([]string, 0, len(products.Products)) for name := range products.Products { productNames = append(productNames, name) } // Write a new index file. stream := simplestreams.Stream{ Format: "index:1.0", Index: map[string]simplestreams.StreamIndex{ "images": { DataType: "image-downloads", Path: "streams/v1/images.json", Format: "products:1.0", Products: productNames, }, }, } body, err := json.Marshal(&stream) if err != nil { return err } err = os.WriteFile("streams/v1/index.json", body, 0o644) if err != nil { return err } return nil } incus-6.0.4/cmd/incus-user/000077500000000000000000000000001477363751000155175ustar00rootroot00000000000000incus-6.0.4/cmd/incus-user/main.go000066400000000000000000000032321477363751000167720ustar00rootroot00000000000000package main import ( "fmt" "os" "github.com/spf13/cobra" "github.com/lxc/incus/v6/internal/version" "github.com/lxc/incus/v6/shared/logger" ) type cmdGlobal struct { flagHelp bool flagVersion bool flagLogVerbose bool flagLogDebug bool } // PreRun runs immediately prior to the main Run function. func (c *cmdGlobal) PreRun(cmd *cobra.Command, args []string) error { return logger.InitLogger("", "", c.flagLogVerbose, c.flagLogDebug, nil) } func run() error { // daemon command (main) daemonCmd := cmdDaemon{} app := daemonCmd.Command() app.Use = "incus-user" app.Short = "Incus user project daemon" app.Long = `Description: Incus user project daemon This daemon is used to allow users that aren't considered to be Incus administrators access to a personal project with suitable restrictions. ` app.SilenceUsage = true app.CompletionOptions = cobra.CompletionOptions{DisableDefaultCmd: true} // Global flags globalCmd := cmdGlobal{} app.PersistentFlags().BoolVar(&globalCmd.flagVersion, "version", false, "Print version number") app.PersistentFlags().BoolVarP(&globalCmd.flagHelp, "help", "h", false, "Print help") app.PersistentFlags().BoolVarP(&globalCmd.flagLogVerbose, "verbose", "v", false, "Show all information messages") app.PersistentFlags().BoolVarP(&globalCmd.flagLogDebug, "debug", "d", false, "Show debug messages") app.PersistentPreRunE = globalCmd.PreRun // Version handling app.SetVersionTemplate("{{.Version}}\n") app.Version = version.Version // Run the main command and handle errors return app.Execute() } func main() { err := run() if err != nil { fmt.Fprintf(os.Stderr, "Error: %v", err) os.Exit(1) } } incus-6.0.4/cmd/incus-user/main_daemon.go000066400000000000000000000104171477363751000203200ustar00rootroot00000000000000package main import ( "errors" "fmt" "net" "os" "os/user" "strconv" "sync" "time" "github.com/spf13/cobra" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/linux" internalUtil "github.com/lxc/incus/v6/internal/util" "github.com/lxc/incus/v6/shared/logger" ) var ( mu sync.RWMutex connections uint64 transactions uint64 ) var projectNames []string type cmdDaemon struct { flagGroup string } func (c *cmdDaemon) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "incus-user" cmd.RunE = c.Run cmd.Flags().StringVar(&c.flagGroup, "group", "", "The group of users that will be allowed to talk to incus-user"+"``") return cmd } func (c *cmdDaemon) Run(cmd *cobra.Command, args []string) error { // Only root should run this. if os.Geteuid() != 0 { return fmt.Errorf("This must be run as root") } // Create storage. err := os.MkdirAll(internalUtil.VarPath("users"), 0o700) if err != nil && !os.IsExist(err) { return fmt.Errorf("Couldn't create storage: %w", err) } // Connect. logger.Debug("Connecting to the daemon") client, err := incus.ConnectIncusUnix("", nil) if err != nil { return fmt.Errorf("Unable to connect to the daemon: %w", err) } cinfo, err := client.GetConnectionInfo() if err != nil { return fmt.Errorf("Failed to obtain connection info: %w", err) } // Keep track of the socket path we used to successfully connect to the server serverUnixPath := cinfo.SocketPath // Validate the configuration. ok, err := serverIsConfigured(client) if err != nil { return fmt.Errorf("Failed to check the configuration: %w", err) } if !ok { logger.Info("Performing initial configuration") err = serverInitialConfiguration(client) if err != nil { return fmt.Errorf("Failed to apply initial configuration: %w", err) } } // Pull the list of projects. projectNames, err = client.GetProjectNames() if err != nil { return fmt.Errorf("Failed to pull project list: %w", err) } // Disconnect. client.Disconnect() // Setup the unix socket. listeners := linux.GetSystemdListeners(linux.SystemdListenFDsStart) if len(listeners) > 1 { return fmt.Errorf("More than one socket-activation FD received") } var listener *net.UnixListener if len(listeners) == 1 { // Handle socket activation. unixListener, ok := listeners[0].(*net.UnixListener) if !ok { return fmt.Errorf("Socket-activation FD isn't a unix socket") } listener = unixListener // Automatically shutdown after inactivity. go func() { for { time.Sleep(30 * time.Second) // Check for active connections. mu.RLock() if connections > 0 { mu.RUnlock() continue } // Look for recent activity oldCount := transactions mu.RUnlock() time.Sleep(5 * time.Second) mu.RLock() if oldCount == transactions { mu.RUnlock() // Daemon has been inactive for 10s, exit. os.Exit(0) } mu.RUnlock() } }() } else { // Create our own socket. unixPath := internalUtil.VarPath("unix.socket.user") err := os.Remove(unixPath) if err != nil && !errors.Is(err, os.ErrNotExist) { return fmt.Errorf("Failed to delete pre-existing unix socket: %w", err) } unixAddr, err := net.ResolveUnixAddr("unix", unixPath) if err != nil { return fmt.Errorf("Unable to resolve unix socket: %w", err) } server, err := net.ListenUnix("unix", unixAddr) if err != nil { return fmt.Errorf("Unable to setup unix socket: %w", err) } err = os.Chmod(unixPath, 0o660) if err != nil { return fmt.Errorf("Unable to set socket permissions: %w", err) } if c.flagGroup != "" { g, err := user.LookupGroup(c.flagGroup) if err != nil { return fmt.Errorf("Cannot get group ID of '%s': %w", c.flagGroup, err) } gid, err := strconv.Atoi(g.Gid) if err != nil { return err } err = os.Chown(unixPath, os.Getuid(), gid) if err != nil { return fmt.Errorf("Cannot change ownership on local socket: %w", err) } } server.SetUnlinkOnClose(true) listener = server } // Start accepting requests. logger.Info("Starting up the server") for { // Accept new connection. conn, err := listener.AcceptUnix() if err != nil { logger.Errorf("Failed to accept new connection: %v", err) continue } go proxyConnection(conn, serverUnixPath) } } incus-6.0.4/cmd/incus-user/proxy.go000066400000000000000000000062131477363751000172310ustar00rootroot00000000000000package main import ( "crypto/tls" "fmt" "io" "net" "os" "slices" log "github.com/sirupsen/logrus" "github.com/lxc/incus/v6/internal/linux" internalUtil "github.com/lxc/incus/v6/internal/util" localtls "github.com/lxc/incus/v6/shared/tls" "github.com/lxc/incus/v6/shared/util" ) func tlsConfig(uid uint32) (*tls.Config, error) { // Load the client certificate. content, err := os.ReadFile(internalUtil.VarPath("users", fmt.Sprintf("%d", uid), "client.crt")) if err != nil { return nil, fmt.Errorf("Unable to open client certificate: %w", err) } tlsClientCert := string(content) // Load the client key. content, err = os.ReadFile(internalUtil.VarPath("users", fmt.Sprintf("%d", uid), "client.key")) if err != nil { return nil, fmt.Errorf("Unable to open client key: %w", err) } tlsClientKey := string(content) // Load the server certificate. certPath := internalUtil.VarPath("cluster.crt") if !util.PathExists(certPath) { certPath = internalUtil.VarPath("server.crt") } content, err = os.ReadFile(certPath) if err != nil { return nil, fmt.Errorf("Unable to open server certificate: %w", err) } tlsServerCert := string(content) return localtls.GetTLSConfigMem(tlsClientCert, tlsClientKey, "", tlsServerCert, false) } func proxyConnection(conn *net.UnixConn, serverUnixPath string) { defer func() { _ = conn.Close() mu.Lock() connections -= 1 mu.Unlock() }() // Increase counters. mu.Lock() transactions += 1 connections += 1 mu.Unlock() // Get credentials. creds, err := linux.GetUcred(conn) if err != nil { log.Errorf("Unable to get user credentials: %s", err) return } // Setup logging context. logger := log.WithFields(log.Fields{ "uid": creds.Uid, "gid": creds.Gid, "pid": creds.Pid, }) logger.Debug("Connected") defer logger.Debug("Disconnected") // Check if the user was setup. if !util.PathExists(internalUtil.VarPath("users", fmt.Sprintf("%d", creds.Uid))) || !slices.Contains(projectNames, fmt.Sprintf("user-%d", creds.Uid)) { log.Infof("Setting up for uid %d", creds.Uid) err := serverSetupUser(creds.Uid) if err != nil { log.Errorf("Failed to setup new user: %v", err) return } } // Connect to the daemon. unixAddr, err := net.ResolveUnixAddr("unix", serverUnixPath) if err != nil { log.Errorf("Unable to resolve the target server: %v", err) return } client, err := net.DialUnix("unix", nil, unixAddr) if err != nil { log.Errorf("Unable to connect to target server: %v", err) return } defer func() { _ = client.Close() }() // Get the TLS configuration tlsConfig, err := tlsConfig(creds.Uid) if err != nil { log.Errorf("Failed to load TLS connection settings: %v", err) return } // Setup TLS. _, err = client.Write([]byte("STARTTLS\n")) if err != nil { log.Errorf("Failed to setup TLS connection to target server: %v", err) return } tlsClient := tls.Client(client, tlsConfig) // Establish the TLS handshake. err = tlsClient.Handshake() if err != nil { _ = conn.Close() log.Errorf("Failed TLS handshake with target server: %v", err) return } // Start proxying. go func() { _, _ = io.Copy(conn, tlsClient) }() _, _ = io.Copy(tlsClient, conn) } incus-6.0.4/cmd/incus-user/server.go000066400000000000000000000213621477363751000173600ustar00rootroot00000000000000package main import ( "encoding/base64" "fmt" "net/http" "os" "path/filepath" "slices" "strconv" "strings" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/linux" internalUtil "github.com/lxc/incus/v6/internal/util" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/idmap" "github.com/lxc/incus/v6/shared/revert" "github.com/lxc/incus/v6/shared/subprocess" localtls "github.com/lxc/incus/v6/shared/tls" "github.com/lxc/incus/v6/shared/util" ) func serverIsConfigured(client incus.InstanceServer) (bool, error) { // Look for networks. networks, err := client.GetNetworkNames() if err != nil { return false, fmt.Errorf("Failed to list networks: %w", err) } if !slices.Contains(networks, "incusbr0") { // Couldn't find incusbr0. return false, nil } // Look for storage pools. pools, err := client.GetStoragePoolNames() if err != nil { return false, fmt.Errorf("Failed to list storage pools: %w", err) } if !slices.Contains(pools, "default") { // No storage pool found. return false, nil } return true, nil } func serverInitialConfiguration(client incus.InstanceServer) error { // Load current server config. info, _, err := client.GetServer() if err != nil { return fmt.Errorf("Failed to get server info: %w", err) } availableBackends := linux.AvailableStorageDrivers(internalUtil.VarPath(), info.Environment.StorageSupportedDrivers, internalUtil.PoolTypeLocal) // Load the default profile. var profileNeedsUpdate bool profile, profileEtag, err := client.GetProfile("default") if err != nil { return fmt.Errorf("Failed to load default profile: %w", err) } // Look for storage pools. pools, err := client.GetStoragePools() if err != nil { return fmt.Errorf("Failed to list storage pools: %w", err) } if len(pools) == 0 { pool := api.StoragePoolsPost{} pool.Config = map[string]string{} pool.Name = "default" // Check if ZFS supported. if slices.Contains(availableBackends, "zfs") { pool.Driver = "zfs" // Check if zsys. poolName, _ := subprocess.RunCommand("zpool", "get", "-H", "-o", "value", "name", "rpool") if strings.TrimSpace(poolName) == "rpool" { pool.Config["source"] = "rpool/incus" } } else { // Fallback to dir backend. pool.Driver = "dir" } // Create the storage pool. err := client.CreateStoragePool(pool) if err != nil { return fmt.Errorf("Failed to create storage pool: %w", err) } // Add to default profile in default project. profile.Devices["root"] = map[string]string{ "type": "disk", "pool": "default", "path": "/", } profileNeedsUpdate = true } // Look for networks. networks, err := client.GetNetworks() if err != nil { return fmt.Errorf("Failed to list networks: %w", err) } found := false for _, network := range networks { if network.Managed { found = true break } } if !found { // Create incusbr0. network := api.NetworksPost{} network.Config = map[string]string{} network.Type = "bridge" network.Name = "incusbr0" err := client.CreateNetwork(network) if err != nil { return fmt.Errorf("Failed to create network: %w", err) } // Add to default profile in default project. profile.Devices["eth0"] = map[string]string{ "type": "nic", "network": "incusbr0", "name": "eth0", } profileNeedsUpdate = true } // Update the default profile. if profileNeedsUpdate { err = client.UpdateProfile("default", profile.Writable(), profileEtag) if err != nil { return fmt.Errorf("Failed to update default profile: %w", err) } } return nil } func serverSetupUser(uid uint32) error { projectName := fmt.Sprintf("user-%d", uid) networkName := fmt.Sprintf("incusbr-%d", uid) if len(networkName) > 15 { // For long UIDs, use a shorter slightly less descriptive interface name. networkName = fmt.Sprintf("user-%d", uid) } userPath := internalUtil.VarPath("users", fmt.Sprintf("%d", uid)) // User account. out, err := subprocess.RunCommand("getent", "passwd", fmt.Sprintf("%d", uid)) if err != nil { return fmt.Errorf("Failed to retrieve user information: %w", err) } pw := strings.Split(out, ":") if len(pw) != 7 { return fmt.Errorf("Invalid user entry") } // Setup reverter. revert := revert.New() defer revert.Fail() // Create certificate directory. err = os.MkdirAll(userPath, 0o700) if err != nil { return fmt.Errorf("Failed to create user directory: %w", err) } revert.Add(func() { _ = os.RemoveAll(userPath) }) // Generate certificate. if !util.PathExists(filepath.Join(userPath, "client.crt")) || !util.PathExists(filepath.Join(userPath, "client.key")) { err = localtls.FindOrGenCert(filepath.Join(userPath, "client.crt"), filepath.Join(userPath, "client.key"), true, false) if err != nil { return fmt.Errorf("Failed to generate user certificate: %w", err) } } // Connect to the daemon. client, err := incus.ConnectIncusUnix("", nil) if err != nil { return fmt.Errorf("Unable to connect to the daemon: %w", err) } _, _, _ = client.GetServer() if !slices.Contains(projectNames, projectName) { // Create the project. err := client.CreateProject(api.ProjectsPost{ Name: projectName, ProjectPut: api.ProjectPut{ Description: fmt.Sprintf("User restricted project for %q (%s)", pw[0], pw[2]), Config: map[string]string{ "features.images": "true", "features.networks": "false", "features.networks.zones": "true", "features.profiles": "true", "features.storage.volumes": "true", "features.storage.buckets": "true", "restricted": "true", "restricted.containers.nesting": "allow", "restricted.devices.disk": "allow", "restricted.devices.disk.paths": pw[5], "restricted.devices.gpu": "allow", "restricted.idmap.uid": pw[2], "restricted.idmap.gid": pw[3], "restricted.networks.access": networkName, }, }, }) if err != nil { return fmt.Errorf("Unable to create project: %w", err) } revert.Add(func() { _ = client.DeleteProject(projectName) }) // Create user-specific bridge. network := api.NetworksPost{} network.Config = map[string]string{} network.Type = "bridge" network.Name = networkName network.Description = fmt.Sprintf("Network for user restricted project %s", projectName) err = client.CreateNetwork(network) if err != nil && !api.StatusErrorCheck(err, http.StatusConflict) { return fmt.Errorf("Failed to create network: %w", err) } // Setup default profile. req := api.ProfilePut{ Description: "Default Incus profile", Devices: map[string]map[string]string{ "root": { "type": "disk", "path": "/", "pool": "default", }, "eth0": { "type": "nic", "name": "eth0", "network": networkName, }, }, } // Add uid/gid map if possible. pwUID, err := strconv.ParseInt(pw[2], 10, 64) if err != nil { return err } pwGID, err := strconv.ParseInt(pw[3], 10, 64) if err != nil { return err } idmapset, err := idmap.NewSetFromSystem("", "root") if err != nil && err != idmap.ErrSubidUnsupported { return fmt.Errorf("Failed to load system idmap: %w", err) } idmapAllowed := true if idmapset != nil { entries := []idmap.Entry{ {IsUID: true, HostID: pwUID, MapRange: 1}, {IsGID: true, HostID: pwGID, MapRange: 1}, } if !idmapset.Includes(&idmap.Set{Entries: entries}) { idmapAllowed = false } } if idmapAllowed { req.Config = map[string]string{ "raw.idmap": fmt.Sprintf("uid %d %d\ngid %d %d", pwUID, pwUID, pwGID, pwGID), } } err = client.UseProject(projectName).UpdateProfile("default", req, "") if err != nil { return fmt.Errorf("Unable to update the default profile: %w", err) } } // Parse the certificate. x509Cert, err := localtls.ReadCert(filepath.Join(userPath, "client.crt")) if err != nil { return fmt.Errorf("Unable to read user certificate: %w", err) } // Delete the certificate from the trust store if it already exists. fingerprint := localtls.CertFingerprint(x509Cert) _ = client.DeleteCertificate(fingerprint) // Add the certificate to the trust store. err = client.CreateCertificate(api.CertificatesPost{ CertificatePut: api.CertificatePut{ Name: fmt.Sprintf("incus-user-%d", uid), Type: "client", Restricted: true, Projects: []string{projectName}, Certificate: base64.StdEncoding.EncodeToString(x509Cert.Raw), }, }) if err != nil { return fmt.Errorf("Unable to add user certificate: %w", err) } revert.Add(func() { _ = client.DeleteCertificate(localtls.CertFingerprint(x509Cert)) }) // Add the new project to our list. if !slices.Contains(projectNames, projectName) { projectNames = append(projectNames, projectName) } revert.Success() return nil } incus-6.0.4/cmd/incus/000077500000000000000000000000001477363751000145435ustar00rootroot00000000000000incus-6.0.4/cmd/incus/action.go000066400000000000000000000302441477363751000163520ustar00rootroot00000000000000package main import ( "fmt" "os" "slices" "strings" "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/shared/api" config "github.com/lxc/incus/v6/shared/cliconfig" ) // Start. type cmdStart struct { global *cmdGlobal action *cmdAction } // The function Command() returns a cobra.Command object representing the "start" command. // It is used to start one or more instances specified by the user. func (c *cmdStart) Command() *cobra.Command { cmdAction := cmdAction{global: c.global} c.action = &cmdAction cmd := c.action.Command("start") cmd.Use = usage("start", i18n.G("[:] [[:]...]")) cmd.Short = i18n.G("Start instances") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Start instances`)) cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return c.global.cmpInstances(toComplete) } return cmd } // Pause. type cmdPause struct { global *cmdGlobal action *cmdAction } // The function Command() returns a cobra.Command object representing the "pause" command. // It is used to pause (or freeze) one or more instances specified by the user. This command is hidden and has an alias "freeze". func (c *cmdPause) Command() *cobra.Command { cmdAction := cmdAction{global: c.global} c.action = &cmdAction cmd := c.action.Command("pause") cmd.Use = usage("pause", i18n.G("[:] [[:]...]")) cmd.Short = i18n.G("Pause instances") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Pause instances`)) cmd.Aliases = []string{"freeze"} cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return c.global.cmpInstances(toComplete) } return cmd } // Resume. type cmdResume struct { global *cmdGlobal action *cmdAction } // Command returns a cobra.Command object representing the "resume" command. // It is used to resume (or unfreeze) one or more instances specified by the user. func (c *cmdResume) Command() *cobra.Command { cmdAction := cmdAction{global: c.global} c.action = &cmdAction cmd := c.action.Command("resume") cmd.Use = usage("resume", i18n.G("[:] [[:]...]")) cmd.Short = i18n.G("Resume instances") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Resume instances`)) cmd.Aliases = []string{"unfreeze"} cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return c.global.cmpInstances(toComplete) } return cmd } // Restart. type cmdRestart struct { global *cmdGlobal action *cmdAction } // The function Command() returns a cobra.Command object representing the "restart" command. // It is used to restart one or more instances specified by the user. This command restarts the instances, which is the opposite of the "pause" command. func (c *cmdRestart) Command() *cobra.Command { cmdAction := cmdAction{global: c.global} c.action = &cmdAction cmd := c.action.Command("restart") cmd.Use = usage("restart", i18n.G("[:] [[:]...]")) cmd.Short = i18n.G("Restart instances") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Restart instances`)) cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return c.global.cmpInstances(toComplete) } return cmd } // Stop. type cmdStop struct { global *cmdGlobal action *cmdAction } // The function Command() returns a cobra.Command object representing the "stop" command. // It is used to stop one or more instances specified by the user. This command stops the instances, effectively shutting them down. func (c *cmdStop) Command() *cobra.Command { cmdAction := cmdAction{global: c.global} c.action = &cmdAction cmd := c.action.Command("stop") cmd.Use = usage("stop", i18n.G("[:] [[:]...]")) cmd.Short = i18n.G("Stop instances") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Stop instances`)) cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return c.global.cmpInstances(toComplete) } return cmd } type cmdAction struct { global *cmdGlobal flagAll bool flagConsole string flagForce bool flagStateful bool flagStateless bool flagTimeout int } // Command is a method of the cmdAction structure which constructs and configures a cobra Command object. // It creates a command with a specific action, defines flags based on that action, and assigns appropriate help text. func (c *cmdAction) Command(action string) *cobra.Command { cmd := &cobra.Command{} cmd.RunE = c.Run cmd.Flags().BoolVar(&c.flagAll, "all", false, i18n.G("Run against all instances")) if action == "stop" { cmd.Flags().BoolVar(&c.flagStateful, "stateful", false, i18n.G("Store the instance state")) } else if action == "start" { cmd.Flags().BoolVar(&c.flagStateless, "stateless", false, i18n.G("Ignore the instance state")) } if slices.Contains([]string{"start", "restart", "stop"}, action) { cmd.Flags().StringVar(&c.flagConsole, "console", "", i18n.G("Immediately attach to the console")+"``") cmd.Flags().Lookup("console").NoOptDefVal = "console" } if slices.Contains([]string{"restart", "stop"}, action) { cmd.Flags().BoolVarP(&c.flagForce, "force", "f", false, i18n.G("Force the instance to stop")) cmd.Flags().IntVar(&c.flagTimeout, "timeout", -1, i18n.G("Time to wait for the instance to shutdown cleanly")+"``") } return cmd } // doActionAll is a method of the cmdAction structure. It performs a specified action on all instances of a remote resource. // It ensures that flags and parameters are appropriately set, and handles any errors that may occur during the process. func (c *cmdAction) doActionAll(action string, resource remoteResource) error { if resource.name != "" { // both --all and instance name given. return fmt.Errorf(i18n.G("Both --all and instance name given")) } remote := resource.remote d, err := c.global.conf.GetInstanceServer(remote) if err != nil { return err } // Pause is called freeze, resume is called unfreeze. if action == "pause" { action = "freeze" } else if action == "resume" { action = "unfreeze" } // Only store state if asked to. state := false if action == "stop" && c.flagStateful { state = true } req := api.InstancesPut{ State: &api.InstanceStatePut{ Action: action, Timeout: c.flagTimeout, Force: c.flagForce, Stateful: state, }, } // Update all instances. op, err := d.UpdateInstances(req, "") if err != nil { return err } progress := cli.ProgressRenderer{ Quiet: c.global.flagQuiet, } _, err = op.AddHandler(progress.UpdateOp) if err != nil { progress.Done("") return err } err = cli.CancelableWait(op, &progress) if err != nil { progress.Done("") return err } progress.Done("") return nil } // doAction is a method of the cmdAction structure. It carries out a specified action on an instance, // using a given config and instance name. It manages state changes, flag checks, error handling and console attachment. func (c *cmdAction) doAction(action string, conf *config.Config, nameArg string) error { state := false // Pause is called freeze if action == "pause" { action = "freeze" } // Resume is called unfreeze if action == "resume" { action = "unfreeze" } // Only store state if asked to if action == "stop" && c.flagStateful { state = true } if action == "stop" && c.flagForce && c.flagConsole != "" { return fmt.Errorf(i18n.G("--console can't be used while forcing instance shutdown")) } remote, name, err := conf.ParseRemote(nameArg) if err != nil { return err } d, err := conf.GetInstanceServer(remote) if err != nil { return err } if name == "" { return fmt.Errorf(i18n.G("Must supply instance name for: ")+"\"%s\"", nameArg) } if action == "start" { current, _, err := d.GetInstance(name) if err != nil { return err } // "start" for a frozen instance means "unfreeze" if current.StatusCode == api.Frozen { action = "unfreeze" } // Always restore state (if present) unless asked not to if action == "start" && current.Stateful && !c.flagStateless { state = true } } req := api.InstanceStatePut{ Action: action, Timeout: c.flagTimeout, Force: c.flagForce, Stateful: state, } op, err := d.UpdateInstanceState(name, req, "") if err != nil { return err } if action == "stop" && c.flagConsole != "" { // Handle console attach console := cmdConsole{} console.global = c.global console.flagType = c.flagConsole return console.console(d, name) } progress := cli.ProgressRenderer{ Quiet: c.global.flagQuiet, } _, err = op.AddHandler(progress.UpdateOp) if err != nil { progress.Done("") return err } // Wait for operation to finish err = cli.CancelableWait(op, &progress) if err != nil { progress.Done("") projectArg := "" if conf.ProjectOverride != "" && conf.ProjectOverride != api.ProjectDefaultName { projectArg = " --project " + conf.ProjectOverride } return fmt.Errorf("%s\n"+i18n.G("Try `incus info --show-log %s%s` for more info"), err, nameArg, projectArg) } progress.Done("") // Handle console attach if c.flagConsole != "" { console := cmdConsole{} console.global = c.global console.flagType = c.flagConsole consoleErr := console.console(d, name) if consoleErr != nil { // Check if still running. state, _, err := d.GetInstanceState(name) if err != nil { return err } if state.StatusCode != api.Stopped { return consoleErr } console.flagShowLog = true return console.console(d, name) } } return nil } // Run is a method of the cmdAction structure that implements the execution logic for the given Cobra command. // It handles actions on instances (single or all) and manages error handling, console flag restrictions, and batch operations. func (c *cmdAction) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf var names []string if c.flagAll { // If no server passed, use current default. if len(args) == 0 { args = []string{fmt.Sprintf("%s:", conf.DefaultRemote)} } // Get all the servers. resources, err := c.global.ParseServers(args...) if err != nil { return err } for _, resource := range resources { // We don't allow instance names with --all. if resource.name != "" { return fmt.Errorf(i18n.G("Both --all and instance name given")) } // See if we can use the bulk API. if resource.server.HasExtension("instance_bulk_state_change") { err = c.doActionAll(cmd.Name(), resource) if err != nil { return fmt.Errorf("%s: %w", resource.remote, err) } continue } ctslist, err := resource.server.GetInstances(api.InstanceTypeAny) if err != nil { return err } for _, ct := range ctslist { switch cmd.Name() { case "start": if ct.StatusCode == api.Running { continue } case "stop": if ct.StatusCode == api.Stopped { continue } } names = append(names, fmt.Sprintf("%s:%s", resource.remote, ct.Name)) } } } else { names = args if len(args) == 0 { _ = cmd.Usage() return nil } } if c.flagConsole != "" { if c.flagAll { return fmt.Errorf(i18n.G("--console can't be used with --all")) } if len(names) != 1 { return fmt.Errorf(i18n.G("--console only works with a single instance")) } } // Run the action for every listed instance results := runBatch(names, func(name string) error { return c.doAction(cmd.Name(), conf, name) }) // Single instance is easy if len(results) == 1 { return results[0].err } // Do fancier rendering for batches success := true for _, result := range results { if result.err == nil { continue } success = false msg := fmt.Sprintf(i18n.G("error: %v"), result.err) for _, line := range strings.Split(msg, "\n") { fmt.Fprintf(os.Stderr, "%s: %s\n", result.name, line) } } if !success { fmt.Fprintln(os.Stderr, "") return fmt.Errorf(i18n.G("Some instances failed to %s"), cmd.Name()) } return nil } incus-6.0.4/cmd/incus/admin.go000066400000000000000000000021331477363751000161610ustar00rootroot00000000000000//go:build linux package main import ( "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" ) type cmdAdmin struct { global *cmdGlobal } func (c *cmdAdmin) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("admin") cmd.Short = i18n.G("Manage incus daemon") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Manage incus daemon`)) // cluster adminClusterCmd := cmdAdminCluster{global: c.global} cmd.AddCommand(adminClusterCmd.Command()) // init adminInitCmd := cmdAdminInit{global: c.global} cmd.AddCommand(adminInitCmd.Command()) // recover sub-command adminRecoverCmd := cmdAdminRecover{global: c.global} cmd.AddCommand(adminRecoverCmd.Command()) // shutdown sub-command shutdownCmd := cmdAdminShutdown{global: c.global} cmd.AddCommand(shutdownCmd.Command()) // sql sub-command sqlCmd := cmdAdminSQL{global: c.global} cmd.AddCommand(sqlCmd.Command()) // waitready sub-command adminWaitreadyCmd := cmdAdminWaitready{global: c.global} cmd.AddCommand(adminWaitreadyCmd.Command()) return cmd } incus-6.0.4/cmd/incus/admin_cluster.go000066400000000000000000000026041477363751000177250ustar00rootroot00000000000000//go:build linux package main import ( "fmt" "os" "os/exec" "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/shared/util" ) type cmdAdminCluster struct { global *cmdGlobal } func (c *cmdAdminCluster) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("cluster") cmd.Short = i18n.G("Low-level cluster administration commands") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Low level administration tools for inspecting and recovering clusters.`)) cmd.Run = c.Run return cmd } func (c *cmdAdminCluster) Run(cmd *cobra.Command, args []string) { env := getEnviron() path, _ := exec.LookPath("incusd") if path == "" { if util.PathExists("/usr/libexec/incus/incusd") { path = "/usr/libexec/incus/incusd" } else if util.PathExists("/usr/lib/incus/incusd") { path = "/usr/lib/incus/incusd" } else if util.PathExists("/opt/incus/bin/incusd") { path = "/opt/incus/bin/incusd" env = append(env, "LD_LIBRARY_PATH=/opt/incus/lib/") } } if path == "" { fmt.Println(i18n.G(`The "cluster" subcommand requires access to internal server data. To do so, it's actually part of the "incusd" binary rather than "incus". You can invoke it through "incusd cluster".`)) os.Exit(1) } _ = doExec(path, append([]string{"incusd", "admin", "cluster"}, args...), env) } incus-6.0.4/cmd/incus/admin_init.go000066400000000000000000000174131477363751000172130ustar00rootroot00000000000000//go:build linux package main import ( "encoding/pem" "fmt" "os" "github.com/spf13/cobra" incus "github.com/lxc/incus/v6/client" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/internal/ports" internalUtil "github.com/lxc/incus/v6/internal/util" "github.com/lxc/incus/v6/internal/version" "github.com/lxc/incus/v6/shared/api" localtls "github.com/lxc/incus/v6/shared/tls" "github.com/lxc/incus/v6/shared/util" ) type cmdAdminInit struct { global *cmdGlobal flagAuto bool flagMinimal bool flagPreseed bool flagDump bool flagNetworkAddress string flagNetworkPort int flagStorageBackend string flagStorageDevice string flagStorageLoopSize int flagStoragePool string hostname string } func (c *cmdAdminInit) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("init") cmd.Short = i18n.G("Configure the daemon") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Configure the daemon`)) cmd.Example = ` init --minimal init --auto [--network-address=IP] [--network-port=8443] [--storage-backend=dir] [--storage-create-device=DEVICE] [--storage-create-loop=SIZE] [--storage-pool=POOL] init --preseed init --dump ` cmd.RunE = c.Run cmd.Flags().BoolVar(&c.flagAuto, "auto", false, i18n.G("Automatic (non-interactive) mode")) cmd.Flags().BoolVar(&c.flagMinimal, "minimal", false, i18n.G("Minimal configuration (non-interactive)")) cmd.Flags().BoolVar(&c.flagPreseed, "preseed", false, i18n.G("Pre-seed mode, expects YAML config from stdin")) cmd.Flags().BoolVar(&c.flagDump, "dump", false, i18n.G("Dump YAML config to stdout")) cmd.Flags().StringVar(&c.flagNetworkAddress, "network-address", "", i18n.G("Address to bind to (default: none)")+"``") cmd.Flags().IntVar(&c.flagNetworkPort, "network-port", -1, fmt.Sprintf(i18n.G("Port to bind to (default: %d)")+"``", ports.HTTPSDefaultPort)) cmd.Flags().StringVar(&c.flagStorageBackend, "storage-backend", "", i18n.G("Storage backend to use (btrfs, dir, lvm or zfs, default: dir)")+"``") cmd.Flags().StringVar(&c.flagStorageDevice, "storage-create-device", "", i18n.G("Setup device based storage using DEVICE")+"``") cmd.Flags().IntVar(&c.flagStorageLoopSize, "storage-create-loop", -1, i18n.G("Setup loop based storage with SIZE in GiB")+"``") cmd.Flags().StringVar(&c.flagStoragePool, "storage-pool", "", i18n.G("Storage pool to use or create")+"``") return cmd } func (c *cmdAdminInit) Run(cmd *cobra.Command, args []string) error { // Quick checks. if c.flagAuto && c.flagPreseed { return fmt.Errorf(i18n.G("Can't use --auto and --preseed together")) } if c.flagMinimal && c.flagPreseed { return fmt.Errorf(i18n.G("Can't use --minimal and --preseed together")) } if c.flagMinimal && c.flagAuto { return fmt.Errorf(i18n.G("Can't use --minimal and --auto together")) } if !c.flagAuto && (c.flagNetworkAddress != "" || c.flagNetworkPort != -1 || c.flagStorageBackend != "" || c.flagStorageDevice != "" || c.flagStorageLoopSize != -1 || c.flagStoragePool != "") { return fmt.Errorf(i18n.G("Configuration flags require --auto")) } if c.flagDump && (c.flagAuto || c.flagMinimal || c.flagPreseed || c.flagNetworkAddress != "" || c.flagNetworkPort != -1 || c.flagStorageBackend != "" || c.flagStorageDevice != "" || c.flagStorageLoopSize != -1 || c.flagStoragePool != "") { return fmt.Errorf(i18n.G("Can't use --dump with other flags")) } // Connect to the daemon d, err := incus.ConnectIncusUnix("", nil) if err != nil { return fmt.Errorf(i18n.G("Failed to connect to local daemon: %w"), err) } server, _, err := d.GetServer() if err != nil { return fmt.Errorf(i18n.G("Failed to connect to get server info: %w"), err) } // Dump mode if c.flagDump { err := c.RunDump(d) if err != nil { return err } return nil } // Prepare the input data var config *api.InitPreseed // Preseed mode if c.flagPreseed { config, err = c.RunPreseed(cmd, args, d) if err != nil { return err } } // Auto mode if c.flagAuto || c.flagMinimal { config, err = c.RunAuto(cmd, args, d, server) if err != nil { return err } } // Interactive mode if !c.flagAuto && !c.flagMinimal && !c.flagPreseed { config, err = c.RunInteractive(cmd, args, d, server) if err != nil { return err } } // Check if the path to the cluster certificate is set // If yes then read cluster certificate from file if config.Cluster != nil && config.Cluster.ClusterCertificatePath != "" { if !util.PathExists(config.Cluster.ClusterCertificatePath) { return fmt.Errorf(i18n.G("Path %s doesn't exist"), config.Cluster.ClusterCertificatePath) } content, err := os.ReadFile(config.Cluster.ClusterCertificatePath) if err != nil { return err } config.Cluster.ClusterCertificate = string(content) } // Check if we got a cluster join token, if so, fill in the config with it. if config.Cluster != nil && config.Cluster.ClusterToken != "" { joinToken, err := internalUtil.JoinTokenDecode(config.Cluster.ClusterToken) if err != nil { return fmt.Errorf(i18n.G("Invalid cluster join token: %w"), err) } // Set server name from join token config.Cluster.ServerName = joinToken.ServerName // Attempt to find a working cluster member to use for joining by retrieving the // cluster certificate from each address in the join token until we succeed. for _, clusterAddress := range joinToken.Addresses { // Cluster URL config.Cluster.ClusterAddress = internalUtil.CanonicalNetworkAddress(clusterAddress, ports.HTTPSDefaultPort) // Cluster certificate cert, err := localtls.GetRemoteCertificate(fmt.Sprintf("https://%s", config.Cluster.ClusterAddress), version.UserAgent) if err != nil { fmt.Printf(i18n.G("Error connecting to existing cluster member %q: %v")+"\n", clusterAddress, err) continue } certDigest := localtls.CertFingerprint(cert) if joinToken.Fingerprint != certDigest { return fmt.Errorf(i18n.G("Certificate fingerprint mismatch between join token and cluster member %q"), clusterAddress) } config.Cluster.ClusterCertificate = string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})) break // We've found a working cluster member. } if config.Cluster.ClusterCertificate == "" { return fmt.Errorf(i18n.G("Unable to connect to any of the cluster members specified in join token")) } } // If clustering is enabled, and no cluster.https_address network address // was specified, we fallback to core.https_address. if config.Cluster != nil && config.Server.Config["core.https_address"] != "" && config.Server.Config["cluster.https_address"] == "" { config.Server.Config["cluster.https_address"] = config.Server.Config["core.https_address"] } // Detect if the user has chosen to join a cluster using the new // cluster join API format, and use the dedicated API if so. if config.Cluster != nil && config.Cluster.ClusterAddress != "" && config.Cluster.ServerAddress != "" { // Ensure the server and cluster addresses are in canonical form. config.Cluster.ServerAddress = internalUtil.CanonicalNetworkAddress(config.Cluster.ServerAddress, ports.HTTPSDefaultPort) config.Cluster.ClusterAddress = internalUtil.CanonicalNetworkAddress(config.Cluster.ClusterAddress, ports.HTTPSDefaultPort) op, err := d.UpdateCluster(config.Cluster.ClusterPut, "") if err != nil { return fmt.Errorf(i18n.G("Failed to join cluster: %w"), err) } err = op.Wait() if err != nil { return fmt.Errorf(i18n.G("Failed to join cluster: %w"), err) } return nil } return d.ApplyServerPreseed(*config) } func (c *cmdAdminInit) defaultHostname() string { if c.hostname != "" { return c.hostname } // Cluster server name hostName, err := os.Hostname() if err != nil { hostName = "incus" } c.hostname = hostName return hostName } incus-6.0.4/cmd/incus/admin_init_auto.go000066400000000000000000000136351477363751000202450ustar00rootroot00000000000000//go:build linux package main import ( "fmt" "slices" "github.com/spf13/cobra" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/internal/linux" "github.com/lxc/incus/v6/internal/ports" internalUtil "github.com/lxc/incus/v6/internal/util" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/util" ) func (c *cmdAdminInit) RunAuto(cmd *cobra.Command, args []string, d incus.InstanceServer, server *api.Server) (*api.InitPreseed, error) { // Quick checks. if c.flagStorageBackend != "" && !slices.Contains([]string{"dir", "btrfs", "lvm", "zfs"}, c.flagStorageBackend) { return nil, fmt.Errorf(i18n.G("The requested backend '%s' isn't supported by init"), c.flagStorageBackend) } if c.flagStorageBackend != "" && !slices.Contains(linux.AvailableStorageDrivers(internalUtil.VarPath(), server.Environment.StorageSupportedDrivers, internalUtil.PoolTypeAny), c.flagStorageBackend) { return nil, fmt.Errorf(i18n.G("The requested backend '%s' isn't available on your system (missing tools)"), c.flagStorageBackend) } if c.flagStorageBackend == "dir" || c.flagStorageBackend == "" { if c.flagStorageLoopSize != -1 || c.flagStorageDevice != "" || c.flagStoragePool != "" { return nil, fmt.Errorf(i18n.G("None of --storage-pool, --storage-create-device or --storage-create-loop may be used with the 'dir' backend")) } } else { if c.flagStorageLoopSize != -1 && c.flagStorageDevice != "" { return nil, fmt.Errorf(i18n.G("Only one of --storage-create-device or --storage-create-loop can be specified")) } } if c.flagNetworkAddress == "" { if c.flagNetworkPort != -1 { return nil, fmt.Errorf(i18n.G("--network-port can't be used without --network-address")) } } storagePools, err := d.GetStoragePoolNames() if err != nil { return nil, fmt.Errorf(i18n.G("Failed to retrieve list of storage pools: %w"), err) } if len(storagePools) > 0 && (c.flagStorageBackend != "" || c.flagStorageDevice != "" || c.flagStorageLoopSize != -1 || c.flagStoragePool != "") { return nil, fmt.Errorf(i18n.G("Storage has already been configured")) } // Detect the backing filesystem. backingFs, err := linux.DetectFilesystem(internalUtil.VarPath()) if err != nil { backingFs = "dir" } // Get the possible local storage drivers. storageDrivers := linux.AvailableStorageDrivers(internalUtil.VarPath(), server.Environment.StorageSupportedDrivers, internalUtil.PoolTypeLocal) // Defaults if c.flagNetworkPort == -1 { c.flagNetworkPort = ports.HTTPSDefaultPort } if c.flagStorageBackend == "" && c.flagStoragePool == "" && backingFs == "btrfs" && slices.Contains(storageDrivers, "btrfs") { // Use btrfs subvol if running on btrfs. c.flagStoragePool = internalUtil.VarPath("storage-pools", "default") c.flagStorageBackend = "btrfs" } else if c.flagStorageBackend == "" { c.flagStorageBackend = "dir" } // Fill in the node configuration config := api.InitLocalPreseed{} config.Config = map[string]string{} // Network listening if c.flagNetworkAddress != "" { config.Config["core.https_address"] = internalUtil.CanonicalNetworkAddressFromAddressAndPort(c.flagNetworkAddress, c.flagNetworkPort, ports.HTTPSDefaultPort) } // Storage configuration if len(storagePools) == 0 { // Storage pool pool := api.StoragePoolsPost{ Name: "default", Driver: c.flagStorageBackend, } pool.Config = map[string]string{} if c.flagStorageDevice != "" { pool.Config["source"] = c.flagStorageDevice } else if c.flagStorageLoopSize > 0 { pool.Config["size"] = fmt.Sprintf("%dGiB", c.flagStorageLoopSize) } else { pool.Config["source"] = c.flagStoragePool } // If using a device or loop, --storage-pool refers to the name of the new pool if c.flagStoragePool != "" && (c.flagStorageDevice != "" || c.flagStorageLoopSize != -1) { pool.Name = c.flagStoragePool } config.StoragePools = []api.StoragePoolsPost{pool} // Profile entry config.Profiles = []api.InitProfileProjectPost{{ ProfilesPost: api.ProfilesPost{ Name: "default", ProfilePut: api.ProfilePut{ Devices: map[string]map[string]string{ "root": { "type": "disk", "path": "/", "pool": pool.Name, }, }, }, }, Project: api.ProjectDefaultName, }} } // Network configuration networks, err := d.GetNetworks() if err != nil { return nil, fmt.Errorf(i18n.G("Failed to retrieve list of networks: %w"), err) } // Extract managed networks managedNetworks := []api.Network{} for _, network := range networks { if network.Managed { managedNetworks = append(managedNetworks, network) } } // Look for an existing network device in the profile defaultProfileNetwork := false defaultProfile, _, err := d.GetProfile("default") if err == nil { for _, dev := range defaultProfile.Devices { if dev["type"] == "nic" { defaultProfileNetwork = true break } } } // Define a new network if len(managedNetworks) == 0 && !defaultProfileNetwork { // Find a new name idx := 0 for { if util.PathExists(fmt.Sprintf("/sys/class/net/incusbr%d", idx)) { idx++ continue } break } // Define the new network network := api.InitNetworksProjectPost{} network.Name = fmt.Sprintf("incusbr%d", idx) network.Project = api.ProjectDefaultName config.Networks = append(config.Networks, network) // Add it to the profile if config.Profiles == nil { config.Profiles = []api.InitProfileProjectPost{{ ProfilesPost: api.ProfilesPost{ Name: "default", ProfilePut: api.ProfilePut{ Devices: map[string]map[string]string{ "eth0": { "type": "nic", "network": network.Name, "name": "eth0", }, }, }, }, Project: api.ProjectDefaultName, }} } else { config.Profiles[0].Devices["eth0"] = map[string]string{ "type": "nic", "network": network.Name, "name": "eth0", } } } return &api.InitPreseed{Server: config}, nil } incus-6.0.4/cmd/incus/admin_init_dump.go000066400000000000000000000053641477363751000202420ustar00rootroot00000000000000//go:build linux package main import ( "fmt" yaml "gopkg.in/yaml.v2" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/shared/api" ) func (c *cmdAdminInit) RunDump(d incus.InstanceServer) error { currentServer, _, err := d.GetServer() if err != nil { return fmt.Errorf(i18n.G("Failed to retrieve current server configuration: %w"), err) } var config api.InitLocalPreseed config.Config = currentServer.Config // Only retrieve networks in the default project as the preseed format doesn't support creating // projects at this time. networks, err := d.UseProject(api.ProjectDefaultName).GetNetworks() if err != nil { return fmt.Errorf(i18n.G("Failed to retrieve current server network configuration for project %q: %w"), api.ProjectDefaultName, err) } for _, network := range networks { // Only list managed networks. if !network.Managed { continue } networksPost := api.InitNetworksProjectPost{} networksPost.Config = network.Config networksPost.Description = network.Description networksPost.Name = network.Name networksPost.Type = network.Type networksPost.Project = api.ProjectDefaultName config.Networks = append(config.Networks, networksPost) } storagePools, err := d.GetStoragePools() if err != nil { return fmt.Errorf(i18n.G("Failed to retrieve current server configuration: %w"), err) } for _, storagePool := range storagePools { storagePoolsPost := api.StoragePoolsPost{} storagePoolsPost.Config = storagePool.Config storagePoolsPost.Description = storagePool.Description storagePoolsPost.Name = storagePool.Name storagePoolsPost.Driver = storagePool.Driver config.StoragePools = append(config.StoragePools, storagePoolsPost) } profiles, err := d.GetProfiles() if err != nil { return fmt.Errorf(i18n.G("Failed to retrieve current server configuration: %w"), err) } for _, profile := range profiles { profilesPost := api.InitProfileProjectPost{} profilesPost.Config = profile.Config profilesPost.Description = profile.Description profilesPost.Devices = profile.Devices profilesPost.Name = profile.Name config.Profiles = append(config.Profiles, profilesPost) } projects, err := d.GetProjects() if err != nil { return fmt.Errorf(i18n.G("Failed to retrieve current server configuration: %w"), err) } for _, project := range projects { projectsPost := api.ProjectsPost{} projectsPost.Config = project.Config projectsPost.Description = project.Description projectsPost.Name = project.Name config.Projects = append(config.Projects, projectsPost) } out, err := yaml.Marshal(config) if err != nil { return fmt.Errorf(i18n.G("Failed to retrieve current server configuration: %w"), err) } fmt.Printf("%s\n", out) return nil } incus-6.0.4/cmd/incus/admin_init_interactive.go000066400000000000000000000643241477363751000216130ustar00rootroot00000000000000//go:build linux package main import ( "encoding/pem" "fmt" "net" "net/http" "os" "os/exec" "slices" "strconv" "strings" "github.com/spf13/cobra" "golang.org/x/sys/unix" "gopkg.in/yaml.v2" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/internal/linux" "github.com/lxc/incus/v6/internal/ports" internalUtil "github.com/lxc/incus/v6/internal/util" "github.com/lxc/incus/v6/internal/version" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/subprocess" localtls "github.com/lxc/incus/v6/shared/tls" "github.com/lxc/incus/v6/shared/util" "github.com/lxc/incus/v6/shared/validate" ) func (c *cmdAdminInit) RunInteractive(cmd *cobra.Command, args []string, d incus.InstanceServer, server *api.Server) (*api.InitPreseed, error) { // Initialize config config := api.InitPreseed{} config.Server.Config = map[string]string{} config.Server.Networks = []api.InitNetworksProjectPost{} config.Server.StoragePools = []api.StoragePoolsPost{} config.Server.Profiles = []api.InitProfileProjectPost{ { ProfilesPost: api.ProfilesPost{ Name: "default", ProfilePut: api.ProfilePut{ Config: map[string]string{}, Devices: map[string]map[string]string{}, }, }, Project: api.ProjectDefaultName, }, } // Clustering err := c.askClustering(&config, d, server) if err != nil { return nil, err } // Ask all the other questions if config.Cluster == nil || config.Cluster.ClusterAddress == "" { // Storage err = c.askStorage(&config, d, server) if err != nil { return nil, err } // Networking err = c.askNetworking(&config, d) if err != nil { return nil, err } // Daemon config err = c.askDaemon(&config, d, server) if err != nil { return nil, err } } // Print the YAML preSeedPrint, err := c.global.asker.AskBool(i18n.G("Would you like a YAML \"init\" preseed to be printed?")+" (yes/no) [default=no]: ", "no") if err != nil { return nil, err } if preSeedPrint { var object api.InitPreseed // If the user has chosen to join an existing cluster, print // only YAML for the cluster section, which is the only // relevant one. Otherwise print the regular config. if config.Cluster != nil && config.Cluster.ClusterAddress != "" { object = api.InitPreseed{} object.Cluster = config.Cluster } else { object = config } out, err := yaml.Marshal(object) if err != nil { return nil, fmt.Errorf(i18n.G("Failed to render the config: %w"), err) } fmt.Printf("%s\n", out) } return &config, nil } func (c *cmdAdminInit) askClustering(config *api.InitPreseed, d incus.InstanceServer, server *api.Server) error { clustering, err := c.global.asker.AskBool(i18n.G("Would you like to use clustering?")+" (yes/no) [default=no]: ", "no") if err != nil { return err } if clustering { config.Cluster = &api.InitClusterPreseed{} config.Cluster.Enabled = true askForServerName := func() error { config.Cluster.ServerName, err = c.global.asker.AskString(fmt.Sprintf(i18n.G("What member name should be used to identify this server in the cluster?")+" [default=%s]: ", c.defaultHostname()), c.defaultHostname(), nil) if err != nil { return err } return nil } // Cluster server address address := internalUtil.NetworkInterfaceAddress() validateServerAddress := func(value string) error { address := internalUtil.CanonicalNetworkAddress(value, ports.HTTPSDefaultPort) host, _, _ := net.SplitHostPort(address) if slices.Contains([]string{"", "[::]", "0.0.0.0"}, host) { return fmt.Errorf(i18n.G("Invalid IP address or DNS name")) } if err == nil { if server.Config["cluster.https_address"] == address || server.Config["core.https_address"] == address { // We already own the address, just move on. return nil } } listener, err := net.Listen("tcp", address) if err != nil { return fmt.Errorf(i18n.G("Can't bind address %q: %w"), address, err) } _ = listener.Close() return nil } serverAddress, err := c.global.asker.AskString(fmt.Sprintf(i18n.G("What IP address or DNS name should be used to reach this server?")+" [default=%s]: ", address), address, validateServerAddress) if err != nil { return err } serverAddress = internalUtil.CanonicalNetworkAddress(serverAddress, ports.HTTPSDefaultPort) config.Server.Config["core.https_address"] = serverAddress clusterJoin, err := c.global.asker.AskBool(i18n.G("Are you joining an existing cluster?")+" (yes/no) [default=no]: ", "no") if err != nil { return err } if clusterJoin { // Existing cluster config.Cluster.ServerAddress = serverAddress // Root is required to access the certificate files if os.Geteuid() != 0 { return fmt.Errorf(i18n.G("Joining an existing cluster requires root privileges")) } var joinToken *api.ClusterMemberJoinToken validJoinToken := func(input string) error { j, err := internalUtil.JoinTokenDecode(input) if err != nil { return fmt.Errorf(i18n.G("Invalid join token: %w"), err) } joinToken = j // Store valid decoded join token return nil } clusterJoinToken, err := c.global.asker.AskString(i18n.G("Please provide join token:")+" ", "", validJoinToken) if err != nil { return err } // Set server name from join token config.Cluster.ServerName = joinToken.ServerName // Attempt to find a working cluster member to use for joining by retrieving the // cluster certificate from each address in the join token until we succeed. for _, clusterAddress := range joinToken.Addresses { config.Cluster.ClusterAddress = internalUtil.CanonicalNetworkAddress(clusterAddress, ports.HTTPSDefaultPort) // Cluster certificate cert, err := localtls.GetRemoteCertificate(fmt.Sprintf("https://%s", config.Cluster.ClusterAddress), version.UserAgent) if err != nil { fmt.Printf(i18n.G("Error connecting to existing cluster member %q: %v")+"\n", clusterAddress, err) continue } certDigest := localtls.CertFingerprint(cert) if joinToken.Fingerprint != certDigest { return fmt.Errorf(i18n.G("Certificate fingerprint mismatch between join token and cluster member %q"), clusterAddress) } config.Cluster.ClusterCertificate = string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})) break // We've found a working cluster member. } if config.Cluster.ClusterCertificate == "" { return fmt.Errorf(i18n.G("Unable to connect to any of the cluster members specified in join token")) } // Pass the raw join token. config.Cluster.ClusterToken = clusterJoinToken // Confirm wiping clusterWipeMember, err := c.global.asker.AskBool(i18n.G("All existing data is lost when joining a cluster, continue?")+" (yes/no) [default=no] ", "no") if err != nil { return err } if !clusterWipeMember { return fmt.Errorf(i18n.G("User aborted configuration")) } // Connect to existing cluster serverCert, err := internalUtil.LoadServerCert(internalUtil.VarPath("")) if err != nil { return err } err = c.setupClusterTrust(serverCert, config.Cluster.ServerName, config.Cluster.ClusterAddress, config.Cluster.ClusterCertificate, config.Cluster.ClusterToken) if err != nil { return fmt.Errorf(i18n.G("Failed to setup trust relationship with cluster: %w"), err) } // Now we have setup trust, don't send to server, otherwise it will try and setup trust // again and if using a one-time join token, will fail. config.Cluster.ClusterToken = "" // Client parameters to connect to the target cluster member. args := &incus.ConnectionArgs{ TLSClientCert: string(serverCert.PublicKey()), TLSClientKey: string(serverCert.PrivateKey()), TLSServerCert: string(config.Cluster.ClusterCertificate), UserAgent: version.UserAgent, } client, err := incus.ConnectIncus(fmt.Sprintf("https://%s", config.Cluster.ClusterAddress), args) if err != nil { return err } // Get the list of required member config keys. cluster, _, err := client.GetCluster() if err != nil { return fmt.Errorf(i18n.G("Failed to retrieve cluster information: %w"), err) } for i, config := range cluster.MemberConfig { question := fmt.Sprintf(i18n.G("Choose %s:")+" ", config.Description) // Allow for empty values. configValue, err := c.global.asker.AskString(question, "", validate.Optional()) if err != nil { return err } cluster.MemberConfig[i].Value = configValue } config.Cluster.MemberConfig = cluster.MemberConfig } else { // Ask for server name since no token is provided err = askForServerName() if err != nil { return err } } } return nil } func (c *cmdAdminInit) askNetworking(config *api.InitPreseed, d incus.InstanceServer) error { var err error localBridgeCreate := false if config.Cluster == nil { localBridgeCreate, err = c.global.asker.AskBool(i18n.G("Would you like to create a new local network bridge?")+" (yes/no) [default=yes]: ", "yes") if err != nil { return err } } if !localBridgeCreate { useExistingInterface, err := c.global.asker.AskBool(i18n.G("Would you like to use an existing bridge or host interface?")+" (yes/no) [default=no]: ", "no") if err != nil { return err } if useExistingInterface { for { interfaceName, err := c.global.asker.AskString(i18n.G("Name of the existing bridge or host interface:")+" ", "", nil) if err != nil { return err } if !util.PathExists(fmt.Sprintf("/sys/class/net/%s", interfaceName)) { fmt.Println(i18n.G("The requested interface doesn't exist. Please choose another one.")) continue } // Add to the default profile config.Server.Profiles[0].Devices["eth0"] = map[string]string{ "type": "nic", "nictype": "macvlan", "name": "eth0", "parent": interfaceName, } if util.PathExists(fmt.Sprintf("/sys/class/net/%s/bridge", interfaceName)) { config.Server.Profiles[0].Devices["eth0"]["nictype"] = "bridged" } break } } return nil } for { // Define the network net := api.InitNetworksProjectPost{} net.Config = map[string]string{} net.Project = api.ProjectDefaultName // Network name net.Name, err = c.global.asker.AskString(i18n.G("What should the new bridge be called?")+" [default=incusbr0]: ", "incusbr0", validate.IsNetworkName) if err != nil { return err } _, _, err = d.GetNetwork(net.Name) if err == nil { fmt.Printf(i18n.G("The requested network bridge \"%s\" already exists. Please choose another name.")+"\n", net.Name) continue } // Add to the default profile config.Server.Profiles[0].Devices["eth0"] = map[string]string{ "type": "nic", "name": "eth0", "network": net.Name, } // IPv4 net.Config["ipv4.address"], err = c.global.asker.AskString(i18n.G("What IPv4 address should be used?")+" (CIDR subnet notation, “auto†or “noneâ€) [default=auto]: ", "auto", func(value string) error { if slices.Contains([]string{"auto", "none"}, value) { return nil } return validate.Optional(validate.IsNetworkAddressCIDRV4)(value) }) if err != nil { return err } if !slices.Contains([]string{"auto", "none"}, net.Config["ipv4.address"]) { netIPv4UseNAT, err := c.global.asker.AskBool(i18n.G("Would you like to NAT IPv4 traffic on your bridge?")+" [default=yes]: ", "yes") if err != nil { return err } net.Config["ipv4.nat"] = fmt.Sprintf("%v", netIPv4UseNAT) } // IPv6 net.Config["ipv6.address"], err = c.global.asker.AskString(i18n.G("What IPv6 address should be used?")+" (CIDR subnet notation, “auto†or “noneâ€) [default=auto]: ", "auto", func(value string) error { if slices.Contains([]string{"auto", "none"}, value) { return nil } return validate.Optional(validate.IsNetworkAddressCIDRV6)(value) }) if err != nil { return err } if !slices.Contains([]string{"auto", "none"}, net.Config["ipv6.address"]) { netIPv6UseNAT, err := c.global.asker.AskBool(i18n.G("Would you like to NAT IPv6 traffic on your bridge?")+" [default=yes]: ", "yes") if err != nil { return err } net.Config["ipv6.nat"] = fmt.Sprintf("%v", netIPv6UseNAT) } // Add the new network config.Server.Networks = append(config.Server.Networks, net) break } return nil } func (c *cmdAdminInit) askStorage(config *api.InitPreseed, d incus.InstanceServer, server *api.Server) error { if config.Cluster != nil { localStoragePool, err := c.global.asker.AskBool(i18n.G("Do you want to configure a new local storage pool?")+" (yes/no) [default=yes]: ", "yes") if err != nil { return err } if localStoragePool { err := c.askStoragePool(config, d, server, internalUtil.PoolTypeLocal) if err != nil { return err } } remoteStoragePool, err := c.global.asker.AskBool(i18n.G("Do you want to configure a new remote storage pool?")+" (yes/no) [default=no]: ", "no") if err != nil { return err } if remoteStoragePool { err := c.askStoragePool(config, d, server, internalUtil.PoolTypeRemote) if err != nil { return err } } return nil } storagePool, err := c.global.asker.AskBool(i18n.G("Do you want to configure a new storage pool?")+" (yes/no) [default=yes]: ", "yes") if err != nil { return err } if !storagePool { return nil } return c.askStoragePool(config, d, server, internalUtil.PoolTypeAny) } func (c *cmdAdminInit) setupClusterTrust(serverCert *localtls.CertInfo, serverName string, targetAddress string, targetCert string, targetToken string) error { // Connect to the target cluster node. args := &incus.ConnectionArgs{ TLSServerCert: targetCert, UserAgent: version.UserAgent, } target, err := incus.ConnectIncus(fmt.Sprintf("https://%s", targetAddress), args) if err != nil { return fmt.Errorf(i18n.G("Failed to connect to target cluster node %q: %w"), targetAddress, err) } cert, err := localtls.GenerateTrustCertificate(serverCert, serverName) if err != nil { return fmt.Errorf(i18n.G("Failed generating trust certificate: %w"), err) } post := api.CertificatesPost{ CertificatePut: cert.CertificatePut, TrustToken: targetToken, } err = target.CreateCertificate(post) if err != nil && !api.StatusErrorCheck(err, http.StatusConflict) { return fmt.Errorf(i18n.G("Failed to add server cert to cluster: %w"), err) } return nil } func (c *cmdAdminInit) askStoragePool(config *api.InitPreseed, d incus.InstanceServer, server *api.Server, poolType internalUtil.PoolType) error { // Figure out the preferred storage driver availableBackends := linux.AvailableStorageDrivers(internalUtil.VarPath(), server.Environment.StorageSupportedDrivers, poolType) if len(availableBackends) == 0 { if poolType != internalUtil.PoolTypeAny { return fmt.Errorf(i18n.G("No storage backends available")) } return fmt.Errorf(i18n.G("No %s storage backends available"), poolType) } backingFs, err := linux.DetectFilesystem(internalUtil.VarPath()) if err != nil { backingFs = "dir" } defaultStorage := "dir" if backingFs == "btrfs" && slices.Contains(availableBackends, "btrfs") { defaultStorage = "btrfs" } else if slices.Contains(availableBackends, "zfs") { defaultStorage = "zfs" } else if slices.Contains(availableBackends, "btrfs") { defaultStorage = "btrfs" } for { // Define the pool pool := api.StoragePoolsPost{} pool.Config = map[string]string{} if poolType == internalUtil.PoolTypeAny { pool.Name, err = c.global.asker.AskString(i18n.G("Name of the new storage pool")+" [default=default]: ", "default", nil) if err != nil { return err } } else { pool.Name = string(poolType) } _, _, err := d.GetStoragePool(pool.Name) if err == nil { if poolType == internalUtil.PoolTypeAny { fmt.Printf(i18n.G("The requested storage pool \"%s\" already exists. Please choose another name.")+"\n", pool.Name) continue } return fmt.Errorf(i18n.G("The %s storage pool already exists"), poolType) } // Add to the default profile if config.Server.Profiles[0].Devices["root"] == nil { config.Server.Profiles[0].Devices["root"] = map[string]string{ "type": "disk", "path": "/", "pool": pool.Name, } } // Storage backend if len(availableBackends) > 1 { defaultBackend := defaultStorage if poolType == internalUtil.PoolTypeRemote { if slices.Contains(availableBackends, "ceph") { defaultBackend = "ceph" } else { defaultBackend = availableBackends[0] // Default to first remote driver. } } pool.Driver, err = c.global.asker.AskChoice(fmt.Sprintf(i18n.G("Name of the storage backend to use (%s)")+" [default=%s]: ", strings.Join(availableBackends, ", "), defaultBackend), availableBackends, defaultBackend) if err != nil { return err } } else { pool.Driver = availableBackends[0] } // Optimization for dir if pool.Driver == "dir" { source, err := c.global.asker.AskString(fmt.Sprintf(i18n.G("Where should this storage pool store its data?")+" [default=%s]: ", internalUtil.VarPath("storage-pools", pool.Name)), "", validate.IsAny) if err != nil { return err } if source != "" { pool.Config["source"] = source } config.Server.StoragePools = append(config.Server.StoragePools, pool) break } // Optimization for btrfs on btrfs if pool.Driver == "btrfs" && backingFs == "btrfs" { btrfsSubvolume, err := c.global.asker.AskBool(fmt.Sprintf(i18n.G("Would you like to create a new btrfs subvolume under %s?")+" (yes/no) [default=yes]: ", internalUtil.VarPath("")), "yes") if err != nil { return err } if btrfsSubvolume { pool.Config["source"] = internalUtil.VarPath("storage-pools", pool.Name) config.Server.StoragePools = append(config.Server.StoragePools, pool) break } } // Optimization for zfs on zfs (when using Ubuntu's bpool/rpool) if pool.Driver == "zfs" && backingFs == "zfs" { poolName, _ := subprocess.RunCommand("zpool", "get", "-H", "-o", "value", "name", "rpool") if strings.TrimSpace(poolName) == "rpool" { zfsDataset, err := c.global.asker.AskBool(i18n.G("Would you like to create a new zfs dataset under rpool/incus?")+" (yes/no) [default=yes]: ", "yes") if err != nil { return err } if zfsDataset { pool.Config["source"] = "rpool/incus" config.Server.StoragePools = append(config.Server.StoragePools, pool) break } } } poolCreate, err := c.global.asker.AskBool(fmt.Sprintf(i18n.G("Create a new %s pool?")+" (yes/no) [default=yes]: ", strings.ToUpper(pool.Driver)), "yes") if err != nil { return err } if poolCreate { if pool.Driver == "ceph" { // Ask for the name of the cluster pool.Config["ceph.cluster_name"], err = c.global.asker.AskString(i18n.G("Name of the existing CEPH cluster")+" [default=ceph]: ", "ceph", nil) if err != nil { return err } // Ask for the name of the osd pool pool.Config["ceph.osd.pool_name"], err = c.global.asker.AskString(i18n.G("Name of the OSD storage pool")+" [default=incus]: ", "incus", nil) if err != nil { return err } // Ask for the number of placement groups pool.Config["ceph.osd.pg_num"], err = c.global.asker.AskString(i18n.G("Number of placement groups")+" [default=32]: ", "32", nil) if err != nil { return err } } else if pool.Driver == "cephfs" { // Ask for the name of the cluster pool.Config["cephfs.cluster_name"], err = c.global.asker.AskString(i18n.G("Name of the existing CEPHfs cluster")+" [default=ceph]: ", "ceph", nil) if err != nil { return err } // Ask for the name of the cluster pool.Config["source"], err = c.global.asker.AskString(i18n.G("Name of the CEPHfs volume:")+" ", "", nil) if err != nil { return err } } else if pool.Driver == "lvmcluster" { // Ask for the volume group pool.Config["source"], err = c.global.asker.AskString(i18n.G("Name of the shared LVM volume group:")+" ", "", nil) if err != nil { return err } } else { useEmptyBlockDev, err := c.global.asker.AskBool(i18n.G("Would you like to use an existing empty block device (e.g. a disk or partition)?")+" (yes/no) [default=no]: ", "no") if err != nil { return err } if useEmptyBlockDev { pool.Config["source"], err = c.global.asker.AskString(i18n.G("Path to the existing block device:")+" ", "", func(path string) error { if !linux.IsBlockdevPath(path) { return fmt.Errorf(i18n.G("%q is not a block device"), path) } return nil }) if err != nil { return err } } else { st := unix.Statfs_t{} err := unix.Statfs(internalUtil.VarPath(), &st) if err != nil { return fmt.Errorf(i18n.G("Couldn't statfs %s: %w"), internalUtil.VarPath(), err) } /* choose 5 GiB < x < 30GiB, where x is 20% of the disk size */ defaultSize := uint64(st.Frsize) * st.Blocks / (1024 * 1024 * 1024) / 5 if defaultSize > 30 { defaultSize = 30 } if defaultSize < 5 { defaultSize = 5 } pool.Config["size"], err = c.global.asker.AskString( fmt.Sprintf(i18n.G("Size in GiB of the new loop device")+" (1GiB minimum) [default=%dGiB]: ", defaultSize), fmt.Sprintf("%dGiB", defaultSize), func(input string) error { input = strings.Split(input, "GiB")[0] result, err := strconv.ParseInt(input, 10, 64) if err != nil { return err } if result < 1 { return fmt.Errorf(i18n.G("Minimum size is 1GiB")) } return nil }, ) if err != nil { return err } if !strings.HasSuffix(pool.Config["size"], "GiB") { pool.Config["size"] = fmt.Sprintf("%sGiB", pool.Config["size"]) } } } } else { if pool.Driver == "ceph" { // ask for the name of the cluster pool.Config["ceph.cluster_name"], err = c.global.asker.AskString(i18n.G("Name of the existing CEPH cluster")+" [default=ceph]: ", "ceph", nil) if err != nil { return err } // ask for the name of the existing pool pool.Config["source"], err = c.global.asker.AskString(i18n.G("Name of the existing OSD storage pool")+" [default=incus]: ", "incus", nil) if err != nil { return err } pool.Config["ceph.osd.pool_name"] = pool.Config["source"] } else { question := fmt.Sprintf(i18n.G("Name of the existing %s pool or dataset:")+" ", strings.ToUpper(pool.Driver)) pool.Config["source"], err = c.global.asker.AskString(question, "", nil) if err != nil { return err } } } if pool.Driver == "lvm" { _, err := exec.LookPath("thin_check") if err != nil { fmt.Print("\n" + i18n.G(`The LVM thin provisioning tools couldn't be found. LVM can still be used without thin provisioning but this will disable over-provisioning, increase the space requirements and creation time of images, instances and snapshots. If you wish to use thin provisioning, abort now, install the tools from your Linux distribution and make sure that your user can see and run the "thin_check" command before running "init" again.`) + "\n\n") lvmContinueNoThin, err := c.global.asker.AskBool(i18n.G("Do you want to continue without thin provisioning?")+" (yes/no) [default=yes]: ", "yes") if err != nil { return err } if !lvmContinueNoThin { return fmt.Errorf(i18n.G("The LVM thin provisioning tools couldn't be found on the system")) } pool.Config["lvm.use_thinpool"] = "false" } } config.Server.StoragePools = append(config.Server.StoragePools, pool) break } return nil } func (c *cmdAdminInit) askDaemon(config *api.InitPreseed, d incus.InstanceServer, server *api.Server) error { // Detect lack of uid/gid if linux.RunningInUserNS() { fmt.Print("\n" + i18n.G(`We detected that you are running inside an unprivileged container. This means that unless you manually configured your host otherwise, you will not have enough uids and gids to allocate to your containers. Your container's own allocation can be reused to avoid the problem. Doing so makes your nested containers slightly less safe as they could in theory attack their parent container and gain more privileges than they otherwise would.`) + "\n\n") shareParentAllocation, err := c.global.asker.AskBool(i18n.G("Would you like to have your containers share their parent's allocation?")+" (yes/no) [default=yes]: ", "yes") if err != nil { return err } if shareParentAllocation { config.Server.Profiles[0].Config["security.privileged"] = "true" } } // Network listener if config.Cluster == nil { overNetwork, err := c.global.asker.AskBool(i18n.G("Would you like the server to be available over the network?")+" (yes/no) [default=no]: ", "no") if err != nil { return err } if overNetwork { isIPAddress := func(s string) error { if s != "all" && net.ParseIP(s) == nil { return fmt.Errorf(i18n.G("%q is not an IP address"), s) } return nil } netAddr, err := c.global.asker.AskString(i18n.G("Address to bind to (not including port)")+" [default=all]: ", "all", isIPAddress) if err != nil { return err } if netAddr == "all" { netAddr = "::" } if net.ParseIP(netAddr).To4() == nil { netAddr = fmt.Sprintf("[%s]", netAddr) } netPort, err := c.global.asker.AskInt(fmt.Sprintf(i18n.G("Port to bind to")+" [default=%d]: ", ports.HTTPSDefaultPort), 1, 65535, fmt.Sprintf("%d", ports.HTTPSDefaultPort), func(netPort int64) error { address := internalUtil.CanonicalNetworkAddressFromAddressAndPort(netAddr, int(netPort), ports.HTTPSDefaultPort) if err == nil { if server.Config["cluster.https_address"] == address || server.Config["core.https_address"] == address { // We already own the address, just move on. return nil } } listener, err := net.Listen("tcp", address) if err != nil { return fmt.Errorf(i18n.G("Can't bind address %q: %w"), address, err) } _ = listener.Close() return nil }) if err != nil { return err } config.Server.Config["core.https_address"] = internalUtil.CanonicalNetworkAddressFromAddressAndPort(netAddr, int(netPort), ports.HTTPSDefaultPort) } } // Ask if the user wants images to be automatically refreshed imageStaleRefresh, err := c.global.asker.AskBool(i18n.G("Would you like stale cached images to be updated automatically?")+" (yes/no) [default=yes]: ", "yes") if err != nil { return err } if !imageStaleRefresh { config.Server.Config["images.auto_update_interval"] = "0" } return nil } incus-6.0.4/cmd/incus/admin_init_preseed.go000066400000000000000000000013701477363751000207150ustar00rootroot00000000000000//go:build linux package main import ( "fmt" "io" "os" "github.com/spf13/cobra" "gopkg.in/yaml.v2" incus "github.com/lxc/incus/v6/client" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/shared/api" ) func (c *cmdAdminInit) RunPreseed(cmd *cobra.Command, args []string, d incus.InstanceServer) (*api.InitPreseed, error) { // Read the YAML bytes, err := io.ReadAll(os.Stdin) if err != nil { return nil, fmt.Errorf(i18n.G("Failed to read from stdin: %w"), err) } // Parse the YAML config := api.InitPreseed{} // Use strict checking to notify about unknown keys. err = yaml.UnmarshalStrict(bytes, &config) if err != nil { return nil, fmt.Errorf(i18n.G("Failed to parse the preseed: %w"), err) } return &config, nil } incus-6.0.4/cmd/incus/admin_other.go000066400000000000000000000006741477363751000173720ustar00rootroot00000000000000//go:build !linux package main import ( "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" ) type cmdAdmin struct { global *cmdGlobal } func (c *cmdAdmin) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("admin") cmd.Short = i18n.G("Manage incus daemon") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Manage incus daemon`)) return cmd } incus-6.0.4/cmd/incus/admin_recover.go000066400000000000000000000174321477363751000177160ustar00rootroot00000000000000//go:build linux package main import ( "fmt" "strings" "github.com/spf13/cobra" "golang.org/x/text/cases" "golang.org/x/text/language" incus "github.com/lxc/incus/v6/client" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/internal/recover" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/validate" ) type cmdAdminRecover struct { global *cmdGlobal } func (c *cmdAdminRecover) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("recover") cmd.Short = i18n.G("Recover missing instances and volumes from existing and unknown storage pools") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Recover missing instances and volumes from existing and unknown storage pools This command is mostly used for disaster recovery. It will ask you about unknown storage pools and attempt to access them, along with existing storage pools, and identify any missing instances and volumes that exist on the pools but are not in the database. It will then offer to recreate these database records.`)) cmd.RunE = c.Run return cmd } func (c *cmdAdminRecover) Run(cmd *cobra.Command, args []string) error { // Quick checks. if len(args) > 0 { return fmt.Errorf(i18n.G("Invalid arguments")) } d, err := incus.ConnectIncusUnix("", nil) if err != nil { return err } server, _, err := d.GetServer() if err != nil { return err } isClustered := d.IsClustered() // Get list of existing storage pools to scan. existingPools, err := d.GetStoragePools() if err != nil { return fmt.Errorf(i18n.G("Failed getting existing storage pools: %w"), err) } fmt.Println(i18n.G("This server currently has the following storage pools:")) for _, existingPool := range existingPools { fmt.Printf(" - "+i18n.G("%s (backend=%q, source=%q)")+"\n", existingPool.Name, existingPool.Driver, existingPool.Config["source"]) } unknownPools := make([]api.StoragePoolsPost, 0, len(existingPools)) // Build up a list of unknown pools to scan. // We don't offer this option if the server is clustered because we don't allow creating storage pools on // an individual server when clustered. if !isClustered { var supportedDriverNames []string for { addUnknownPool, err := c.global.asker.AskBool(i18n.G("Would you like to recover another storage pool?")+" (yes/no) [default=no]: ", "no") if err != nil { return err } if !addUnknownPool { break } // Get available storage drivers if not done already. if supportedDriverNames == nil { for _, supportedDriver := range server.Environment.StorageSupportedDrivers { supportedDriverNames = append(supportedDriverNames, supportedDriver.Name) } } unknownPool := api.StoragePoolsPost{ StoragePoolPut: api.StoragePoolPut{ Config: make(map[string]string), }, } unknownPool.Name, err = c.global.asker.AskString(i18n.G("Name of the storage pool:")+" ", "", validate.Required(func(value string) error { if value == "" { return fmt.Errorf(i18n.G("Pool name cannot be empty")) } for _, p := range unknownPools { if value == p.Name { return fmt.Errorf(i18n.G("Storage pool %q is already on recover list"), value) } } return nil })) if err != nil { return err } unknownPool.Driver, err = c.global.asker.AskString(fmt.Sprintf(i18n.G("Name of the storage backend (%s):")+" ", strings.Join(supportedDriverNames, ", ")), "", validate.IsOneOf(supportedDriverNames...)) if err != nil { return err } unknownPool.Config["source"], err = c.global.asker.AskString(i18n.G("Source of the storage pool (block device, volume group, dataset, path, ... as applicable):")+" ", "", validate.IsNotEmpty) if err != nil { return err } for { var configKey, configValue string _, _ = c.global.asker.AskString(i18n.G("Additional storage pool configuration property (KEY=VALUE, empty when done):")+" ", "", validate.Optional(func(value string) error { configParts := strings.SplitN(value, "=", 2) if len(configParts) < 2 { return fmt.Errorf(i18n.G("Config option should be in the format KEY=VALUE")) } configKey = configParts[0] configValue = configParts[1] return nil })) if configKey == "" { break } unknownPool.Config[configKey] = configValue } unknownPools = append(unknownPools, unknownPool) } } fmt.Println(i18n.G("The recovery process will be scanning the following storage pools:")) for _, p := range existingPools { fmt.Printf(" - "+i18n.G("EXISTING: %q (backend=%q, source=%q)")+"\n", p.Name, p.Driver, p.Config["source"]) } for _, p := range unknownPools { fmt.Printf(" - "+i18n.G("NEW: %q (backend=%q, source=%q)")+"\n", p.Name, p.Driver, p.Config["source"]) } proceed, err := c.global.asker.AskBool(i18n.G("Would you like to continue with scanning for lost volumes?")+" (yes/no) [default=yes]: ", "yes") if err != nil { return err } if !proceed { return nil } fmt.Println(i18n.G("Scanning for unknown volumes...")) // Send /internal/recover/validate request to the daemon. reqValidate := recover.ValidatePost{ Pools: make([]api.StoragePoolsPost, 0, len(existingPools)+len(unknownPools)), } // Add existing pools to request. for _, p := range existingPools { reqValidate.Pools = append(reqValidate.Pools, api.StoragePoolsPost{ Name: p.Name, // Only send existing pool name, the rest will be looked up on server. }) } // Add unknown pools to request. reqValidate.Pools = append(reqValidate.Pools, unknownPools...) for { resp, _, err := d.RawQuery("POST", "/internal/recover/validate", reqValidate, "") if err != nil { return fmt.Errorf(i18n.G("Failed validation request: %w"), err) } var res recover.ValidateResult err = resp.MetadataAsStruct(&res) if err != nil { return fmt.Errorf(i18n.G("Failed parsing validation response: %w"), err) } if len(unknownPools) > 0 { fmt.Println(i18n.G("The following unknown storage pools have been found:")) for _, unknownPool := range unknownPools { fmt.Printf(" - "+i18n.G("Storage pool %q of type %q")+"\n", unknownPool.Name, unknownPool.Driver) } } if len(res.UnknownVolumes) > 0 { fmt.Println(i18n.G("The following unknown volumes have been found:")) for _, unknownVol := range res.UnknownVolumes { fmt.Printf(" - "+i18n.G("%s %q on pool %q in project %q (includes %d snapshots)")+"\n", cases.Title(language.English).String(unknownVol.Type), unknownVol.Name, unknownVol.Pool, unknownVol.Project, unknownVol.SnapshotCount) } } if len(res.DependencyErrors) > 0 { fmt.Println(i18n.G("You are currently missing the following:")) for _, depErr := range res.DependencyErrors { fmt.Printf(" - %s\n", depErr) } _, _ = c.global.asker.AskString(i18n.G("Please create those missing entries and then hit ENTER:")+" ", "", validate.Optional()) } else { if len(unknownPools) == 0 && len(res.UnknownVolumes) == 0 { fmt.Println(i18n.G("No unknown storage pools or volumes found. Nothing to do.")) return nil } break // Dependencies met. } } proceed, err = c.global.asker.AskBool(i18n.G("Would you like those to be recovered?")+" (yes/no) [default=no]: ", "no") if err != nil { return err } if !proceed { return nil } fmt.Println(i18n.G("Starting recovery...")) // Send /internal/recover/import request to the daemon. // Don't lint next line with gosimple. It says we should convert reqValidate directly to an RecoverImportPost // because their types are identical. This is less clear and will not work if either type changes in the future. reqImport := recover.ImportPost{ //nolint:gosimple Pools: reqValidate.Pools, } _, _, err = d.RawQuery("POST", "/internal/recover/import", reqImport, "") if err != nil { return fmt.Errorf(i18n.G("Failed import request: %w"), err) } return nil } incus-6.0.4/cmd/incus/admin_shutdown.go000066400000000000000000000042771477363751000201270ustar00rootroot00000000000000//go:build linux package main import ( "fmt" "net/http" "net/url" "strconv" "time" "github.com/spf13/cobra" incus "github.com/lxc/incus/v6/client" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" ) type cmdAdminShutdown struct { global *cmdGlobal flagForce bool flagTimeout int } func (c *cmdAdminShutdown) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("shutdown") cmd.Short = i18n.G("Tell the daemon to shutdown all instances and exit") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Tell the daemon to shutdown all instances and exit This will tell the daemon to start a clean shutdown of all instances, followed by having itself shutdown and exit. This can take quite a while as instances can take a long time to shutdown, especially if a non-standard timeout was configured for them.`)) cmd.RunE = c.Run cmd.Flags().IntVarP(&c.flagTimeout, "timeout", "t", 0, "Number of seconds to wait before giving up"+"``") cmd.Flags().BoolVarP(&c.flagForce, "force", "f", false, "Force shutdown instead of waiting for running operations to finish"+"``") return cmd } func (c *cmdAdminShutdown) Run(cmd *cobra.Command, args []string) error { connArgs := &incus.ConnectionArgs{ SkipGetServer: true, } d, err := incus.ConnectIncusUnix("", connArgs) if err != nil { return err } v := url.Values{} v.Set("force", strconv.FormatBool(c.flagForce)) chResult := make(chan error, 1) go func() { defer close(chResult) httpClient, err := d.GetHTTPClient() if err != nil { chResult <- err return } // Request shutdown, this shouldn't return until daemon has stopped so use a large request timeout. httpTransport := httpClient.Transport.(*http.Transport) httpTransport.ResponseHeaderTimeout = 3600 * time.Second _, _, err = d.RawQuery("PUT", fmt.Sprintf("/internal/shutdown?%s", v.Encode()), nil, "") if err != nil { chResult <- err return } }() if c.flagTimeout > 0 { select { case err = <-chResult: return err case <-time.After(time.Second * time.Duration(c.flagTimeout)): return fmt.Errorf(i18n.G("Daemon still running after %ds timeout"), c.flagTimeout) } } return <-chResult } incus-6.0.4/cmd/incus/admin_sql.go000066400000000000000000000103611477363751000170420ustar00rootroot00000000000000//go:build linux package main import ( "encoding/json" "fmt" "io" "os" "slices" "github.com/spf13/cobra" incus "github.com/lxc/incus/v6/client" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" internalSQL "github.com/lxc/incus/v6/internal/sql" ) type cmdAdminSQL struct { global *cmdGlobal flagFormat string } func (c *cmdAdminSQL) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("sql", i18n.G(" ")) cmd.Short = i18n.G("Execute a SQL query against the local or global database") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Execute a SQL query against the local or global database The local database is specific to the cluster member you target the command to, and contains member-specific data (such as the member network address). The global database is common to all members in the cluster, and contains cluster-specific data (such as profiles, containers, etc). If you are running a non-clustered server, the same applies, as that instance is effectively a single-member cluster. If is the special value "-", then the query is read from standard input. If is the special value ".dump", the command returns a SQL text dump of the given database. If is the special value ".schema", the command returns the SQL text schema of the given database. This internal command is mostly useful for debugging and disaster recovery. The development team will occasionally provide hotfixes to users as a set of database queries to fix some data inconsistency.`)) cmd.RunE = c.Run cmd.Flags().StringVarP(&c.flagFormat, "format", "f", "table", i18n.G(`Format (csv|json|table|yaml|compact), use suffix ",noheader" to disable headers and ",header" to enable it if missing, e.g. csv,header`)+"``") cmd.PreRunE = func(cmd *cobra.Command, args []string) error { return cli.ValidateFlagFormatForListOutput(cmd.Flag("format").Value.String()) } return cmd } func (c *cmdAdminSQL) Run(cmd *cobra.Command, args []string) error { if len(args) != 2 { _ = cmd.Help() if len(args) == 0 { return nil } return fmt.Errorf(i18n.G("Missing required arguments")) } database := args[0] query := args[1] if !slices.Contains([]string{"local", "global"}, database) { _ = cmd.Help() return fmt.Errorf(i18n.G("Invalid database type")) } if query == "-" { // Read from stdin bytes, err := io.ReadAll(os.Stdin) if err != nil { return fmt.Errorf(i18n.G("Failed to read from stdin: %w"), err) } query = string(bytes) } // Connect to daemon clientArgs := incus.ConnectionArgs{ SkipGetServer: true, } d, err := incus.ConnectIncusUnix("", &clientArgs) if err != nil { return err } if query == ".dump" || query == ".schema" { url := fmt.Sprintf("/internal/sql?database=%s", database) if query == ".schema" { url += "&schema=1" } response, _, err := d.RawQuery("GET", url, nil, "") if err != nil { return fmt.Errorf(i18n.G("Failed to request dump: %w"), err) } dump := internalSQL.SQLDump{} err = json.Unmarshal(response.Metadata, &dump) if err != nil { return fmt.Errorf(i18n.G("Failed to parse dump response: %w"), err) } fmt.Print(dump.Text) return nil } data := internalSQL.SQLQuery{ Database: database, Query: query, } response, _, err := d.RawQuery("POST", "/internal/sql", data, "") if err != nil { return err } batch := internalSQL.SQLBatch{} err = json.Unmarshal(response.Metadata, &batch) if err != nil { return err } for i, result := range batch.Results { if len(batch.Results) > 1 { fmt.Printf(i18n.G("=> Query %d:")+"\n\n", i) } if result.Type == "select" { err := c.sqlPrintSelectResult(result) if err != nil { return err } } else { fmt.Printf(i18n.G("Rows affected: %d")+"\n", result.RowsAffected) } if len(batch.Results) > 1 { fmt.Println("") } } return nil } func (c *cmdAdminSQL) sqlPrintSelectResult(result internalSQL.SQLResult) error { data := [][]string{} for _, row := range result.Rows { rowData := []string{} for _, col := range row { rowData = append(rowData, fmt.Sprintf("%v", col)) } data = append(data, rowData) } return cli.RenderTable(os.Stdout, c.flagFormat, result.Columns, data, result) } incus-6.0.4/cmd/incus/admin_waitready.go000066400000000000000000000045601477363751000202400ustar00rootroot00000000000000//go:build linux package main import ( "fmt" "time" "github.com/spf13/cobra" incus "github.com/lxc/incus/v6/client" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/shared/logger" ) type cmdAdminWaitready struct { global *cmdGlobal flagTimeout int } func (c *cmdAdminWaitready) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("waitready") cmd.Short = i18n.G("Wait for the daemon to be ready to process requests") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Wait for the daemon to be ready to process requests This command will block until the daemon is reachable over its REST API and is done with early start tasks like re-starting previously started containers.`)) cmd.RunE = c.Run cmd.Flags().IntVarP(&c.flagTimeout, "timeout", "t", 0, "Number of seconds to wait before giving up"+"``") return cmd } func (c *cmdAdminWaitready) Run(cmd *cobra.Command, args []string) error { finger := make(chan error, 1) var errLast error go func() { for i := 0; ; i++ { // Start logging only after the 10'th attempt (about 5 // seconds). Then after the 30'th attempt (about 15 // seconds), log only only one attempt every 10 // attempts (about 5 seconds), to avoid being too // verbose. doLog := false if i > 10 { doLog = i < 30 || ((i % 10) == 0) } if doLog { logger.Debugf(i18n.G("Connecting to the daemon (attempt %d)"), i) } d, err := incus.ConnectIncusUnix("", nil) if err != nil { errLast = err if doLog { logger.Debugf(i18n.G("Failed connecting to the daemon (attempt %d): %v"), i, err) } time.Sleep(500 * time.Millisecond) continue } if doLog { logger.Debugf(i18n.G("Checking if the daemon is ready (attempt %d)"), i) } _, _, err = d.RawQuery("GET", "/internal/ready", nil, "") if err != nil { errLast = err if doLog { logger.Debugf(i18n.G("Failed to check if the daemon is ready (attempt %d): %v"), i, err) } time.Sleep(500 * time.Millisecond) continue } finger <- nil return } }() if c.flagTimeout > 0 { select { case <-finger: break case <-time.After(time.Second * time.Duration(c.flagTimeout)): return fmt.Errorf(i18n.G("Daemon still not running after %ds timeout (%v)"), c.flagTimeout, errLast) } } else { <-finger } return nil } incus-6.0.4/cmd/incus/alias.go000066400000000000000000000167251477363751000161760ustar00rootroot00000000000000package main import ( "fmt" "os" "sort" "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" ) type cmdAlias struct { global *cmdGlobal } // Command is a method of the cmdAlias structure that returns a new cobra Command for managing command aliases. // This includes commands for adding, listing, renaming, and removing aliases, along with their usage and descriptions. func (c *cmdAlias) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("alias") cmd.Short = i18n.G("Manage command aliases") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Manage command aliases`)) cmd.Hidden = true // Add aliasAddCmd := cmdAliasAdd{global: c.global, alias: c} cmd.AddCommand(aliasAddCmd.Command()) // List aliasListCmd := cmdAliasList{global: c.global, alias: c} cmd.AddCommand(aliasListCmd.Command()) // Rename aliasRenameCmd := cmdAliasRename{global: c.global, alias: c} cmd.AddCommand(aliasRenameCmd.Command()) // Remove aliasRemoveCmd := cmdAliasRemove{global: c.global, alias: c} cmd.AddCommand(aliasRemoveCmd.Command()) // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } return cmd } // Add. type cmdAliasAdd struct { global *cmdGlobal alias *cmdAlias } // Command is a method of the cmdAliasAdd structure that returns a new cobra Command for adding new command aliases. // It specifies the command usage, description, and examples, and links it to the RunE method for execution logic. func (c *cmdAliasAdd) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("add", i18n.G(" ")) cmd.Short = i18n.G("Add new aliases") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Add new aliases`)) cmd.Example = cli.FormatSection("", i18n.G( `incus alias add list "list -c ns46S" Overwrite the "list" command to pass -c ns46S.`)) cmd.RunE = c.Run return cmd } // Run is a method of the cmdAliasAdd structure. It implements the logic to add a new alias command. // The function checks for valid arguments, verifies if the alias already exists, and if not, adds the new alias to the configuration. func (c *cmdAliasAdd) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } // Look for an existing alias _, ok := conf.Aliases[args[0]] if ok { return fmt.Errorf(i18n.G("Alias %s already exists"), args[0]) } // Add the new alias conf.Aliases[args[0]] = args[1] // Save the config return conf.SaveConfig(c.global.confPath) } // List. type cmdAliasList struct { global *cmdGlobal alias *cmdAlias flagFormat string } // Command is a method of the cmdAliasList structure that returns a new cobra Command for listing command aliases. // It specifies the command usage, description, aliases, and output formatting options, and links it to the RunE method for execution logic. func (c *cmdAliasList) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("list") cmd.Aliases = []string{"ls"} cmd.Short = i18n.G("List aliases") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `List aliases`)) cmd.Flags().StringVarP(&c.flagFormat, "format", "f", "table", i18n.G(`Format (csv|json|table|yaml|compact), use suffix ",noheader" to disable headers and ",header" to enable it if missing, e.g. csv,header`)+"``") cmd.PreRunE = func(cmd *cobra.Command, args []string) error { return cli.ValidateFlagFormatForListOutput(cmd.Flag("format").Value.String()) } cmd.RunE = c.Run return cmd } // Run is a method of the cmdAliasList structure. It implements the logic to list existing command aliases. // The function checks for valid arguments, collects all the aliases, sorts them, and renders them in the specified format. func (c *cmdAliasList) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 0) if exit { return err } // List the aliases data := [][]string{} for k, v := range conf.Aliases { data = append(data, []string{k, v}) } // Apply default entries. for k, v := range defaultAliases { _, ok := conf.Aliases[k] if !ok { data = append(data, []string{k, v}) } } sort.Sort(cli.SortColumnsNaturally(data)) header := []string{ i18n.G("ALIAS"), i18n.G("TARGET"), } return cli.RenderTable(os.Stdout, c.flagFormat, header, data, conf.Aliases) } // Rename. type cmdAliasRename struct { global *cmdGlobal alias *cmdAlias } // Command is a method of the cmdAliasRename structure. It returns a new cobra.Command object. // This command allows a user to rename existing aliases in the CLI application. func (c *cmdAliasRename) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("rename", i18n.G(" ")) cmd.Aliases = []string{"mv"} cmd.Short = i18n.G("Rename aliases") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Rename aliases`)) cmd.Example = cli.FormatSection("", i18n.G( `incus alias rename list my-list Rename existing alias "list" to "my-list".`)) cmd.RunE = c.Run return cmd } // Run is a method of the cmdAliasRename structure. It takes a cobra command and a slice of strings as arguments. // This method checks the validity of arguments, ensures the existence of the old alias, verifies the non-existence of the new alias, and then proceeds to rename the alias in the configuration. func (c *cmdAliasRename) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } // Check for the existing alias target, ok := conf.Aliases[args[0]] if !ok { return fmt.Errorf(i18n.G("Alias %s doesn't exist"), args[0]) } // Check for the new alias _, ok = conf.Aliases[args[1]] if ok { return fmt.Errorf(i18n.G("Alias %s already exists"), args[1]) } // Rename the alias conf.Aliases[args[1]] = target delete(conf.Aliases, args[0]) // Save the config return conf.SaveConfig(c.global.confPath) } // Remove. type cmdAliasRemove struct { global *cmdGlobal alias *cmdAlias } // Command is a method of the cmdAliasRemove structure. It configures and returns a cobra.Command object. // This command enables the removal of a given alias from the command line interface. func (c *cmdAliasRemove) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("remove", i18n.G("")) cmd.Aliases = []string{"rm"} cmd.Short = i18n.G("Remove aliases") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Remove aliases`)) cmd.Example = cli.FormatSection("", i18n.G( `incus alias remove my-list Remove the "my-list" alias.`)) cmd.RunE = c.Run return cmd } // Run is a method of the cmdAliasRemove structure that executes the actual operation of the alias removal command. // It takes as input the name of the alias to be removed and updates the global configuration file to reflect this change. func (c *cmdAliasRemove) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Look for the alias _, ok := conf.Aliases[args[0]] if !ok { return fmt.Errorf(i18n.G("Alias %s doesn't exist"), args[0]) } // Delete the alias delete(conf.Aliases, args[0]) // Save the config return conf.SaveConfig(c.global.confPath) } incus-6.0.4/cmd/incus/cluster.go000066400000000000000000001147421477363751000165640ustar00rootroot00000000000000package main import ( "bufio" "fmt" "io" "os" "slices" "sort" "strings" "github.com/spf13/cobra" yaml "gopkg.in/yaml.v2" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/termios" "github.com/lxc/incus/v6/shared/util" ) type clusterColumn struct { Name string Data func(api.ClusterMember) string } type cmdCluster struct { global *cmdGlobal } func (c *cmdCluster) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("cluster") cmd.Short = i18n.G("Manage cluster members") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Manage cluster members`)) // List clusterListCmd := cmdClusterList{global: c.global, cluster: c} cmd.AddCommand(clusterListCmd.Command()) // Rename clusterRenameCmd := cmdClusterRename{global: c.global, cluster: c} cmd.AddCommand(clusterRenameCmd.Command()) // Remove clusterRemoveCmd := cmdClusterRemove{global: c.global, cluster: c} cmd.AddCommand(clusterRemoveCmd.Command()) // Show clusterShowCmd := cmdClusterShow{global: c.global, cluster: c} cmd.AddCommand(clusterShowCmd.Command()) // Info clusterInfoCmd := cmdClusterInfo{global: c.global, cluster: c} cmd.AddCommand(clusterInfoCmd.Command()) // Get clusterGetCmd := cmdClusterGet{global: c.global, cluster: c} cmd.AddCommand(clusterGetCmd.Command()) // Set clusterSetCmd := cmdClusterSet{global: c.global, cluster: c} cmd.AddCommand(clusterSetCmd.Command()) // Unset clusterUnsetCmd := cmdClusterUnset{global: c.global, cluster: c, clusterSet: &clusterSetCmd} cmd.AddCommand(clusterUnsetCmd.Command()) // Enable clusterEnableCmd := cmdClusterEnable{global: c.global, cluster: c} cmd.AddCommand(clusterEnableCmd.Command()) // Edit clusterEditCmd := cmdClusterEdit{global: c.global, cluster: c} cmd.AddCommand(clusterEditCmd.Command()) // Add token cmdClusterAdd := cmdClusterAdd{global: c.global, cluster: c} cmd.AddCommand(cmdClusterAdd.Command()) // List tokens cmdClusterListTokens := cmdClusterListTokens{global: c.global, cluster: c} cmd.AddCommand(cmdClusterListTokens.Command()) // Revoke tokens cmdClusterRevokeToken := cmdClusterRevokeToken{global: c.global, cluster: c} cmd.AddCommand(cmdClusterRevokeToken.Command()) // Update certificate cmdClusterUpdateCertificate := cmdClusterUpdateCertificate{global: c.global, cluster: c} cmd.AddCommand(cmdClusterUpdateCertificate.Command()) // Evacuate cluster member cmdClusterEvacuate := cmdClusterEvacuate{global: c.global, cluster: c} cmd.AddCommand(cmdClusterEvacuate.Command()) // Restore cluster member cmdClusterRestore := cmdClusterRestore{global: c.global, cluster: c} cmd.AddCommand(cmdClusterRestore.Command()) clusterGroupCmd := cmdClusterGroup{global: c.global, cluster: c} cmd.AddCommand(clusterGroupCmd.Command()) clusterRoleCmd := cmdClusterRole{global: c.global, cluster: c} cmd.AddCommand(clusterRoleCmd.Command()) // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } return cmd } // List. type cmdClusterList struct { global *cmdGlobal cluster *cmdCluster flagColumns string flagFormat string flagAllProjects bool } func (c *cmdClusterList) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("list", i18n.G("[:]")) cmd.Aliases = []string{"ls"} cmd.Short = i18n.G("List all the cluster members") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `List all the cluster members The -c option takes a (optionally comma-separated) list of arguments that control which image attributes to output when displaying in table or csv format. Default column layout is: nurafdsm Column shorthand chars: n - Server name u - URL r - Roles a - Architecture f - Failure Domain d - Description s - Status m - Message`)) cmd.Flags().StringVarP(&c.flagColumns, "columns", "c", defaultClusterColumns, i18n.G("Columns")+"``") cmd.Flags().StringVarP(&c.flagFormat, "format", "f", "table", i18n.G(`Format (csv|json|table|yaml|compact), use suffix ",noheader" to disable headers and ",header" to enable it if missing, e.g. csv,header`)+"``") cmd.Flags().BoolVar(&c.flagAllProjects, "all-projects", false, i18n.G("Display clusters from all projects")) cmd.PreRunE = func(cmd *cobra.Command, args []string) error { return cli.ValidateFlagFormatForListOutput(cmd.Flag("format").Value.String()) } cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpRemotes(toComplete, false) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } const defaultClusterColumns = "nurafdsm" func (c *cmdClusterList) parseColumns() ([]clusterColumn, error) { columnsShorthandMap := map[rune]clusterColumn{ 'n': {i18n.G("NAME"), c.serverColumnData}, 'u': {i18n.G("URL"), c.urlColumnData}, 'r': {i18n.G("ROLES"), c.rolesColumnData}, 'a': {i18n.G("ARCHITECTURE"), c.architectureColumnData}, 'f': {i18n.G("FAILURE DOMAIN"), c.failureDomainColumnData}, 'd': {i18n.G("DESCRIPTION"), c.descriptionColumnData}, 's': {i18n.G("STATUS"), c.statusColumnData}, 'm': {i18n.G("MESSAGE"), c.messageColumnData}, } columnList := strings.Split(c.flagColumns, ",") columns := []clusterColumn{} for _, columnEntry := range columnList { if columnEntry == "" { return nil, fmt.Errorf(i18n.G("Empty column entry (redundant, leading or trailing command) in '%s'"), c.flagColumns) } for _, columnRune := range columnEntry { column, ok := columnsShorthandMap[columnRune] if !ok { return nil, fmt.Errorf(i18n.G("Unknown column shorthand char '%c' in '%s'"), columnRune, columnEntry) } columns = append(columns, column) } } return columns, nil } func (c *cmdClusterList) serverColumnData(cluster api.ClusterMember) string { return cluster.ServerName } func (c *cmdClusterList) urlColumnData(cluster api.ClusterMember) string { return cluster.URL } func (c *cmdClusterList) rolesColumnData(cluster api.ClusterMember) string { roles := cluster.Roles rolesDelimiter := "\n" if c.flagFormat == "csv" { rolesDelimiter = "," } return strings.Join(roles, rolesDelimiter) } func (c *cmdClusterList) architectureColumnData(cluster api.ClusterMember) string { return cluster.Architecture } func (c *cmdClusterList) failureDomainColumnData(cluster api.ClusterMember) string { return cluster.FailureDomain } func (c *cmdClusterList) descriptionColumnData(cluster api.ClusterMember) string { return cluster.Description } func (c *cmdClusterList) statusColumnData(cluster api.ClusterMember) string { return strings.ToUpper(cluster.Status) } func (c *cmdClusterList) messageColumnData(cluster api.ClusterMember) string { return cluster.Message } func (c *cmdClusterList) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 1) if exit { return err } if c.global.flagProject != "" && c.flagAllProjects { return fmt.Errorf(i18n.G("Can't specify --project with --all-projects")) } // Parse remote remote := "" if len(args) == 1 { remote = args[0] } resources, err := c.global.ParseServers(remote) if err != nil { return err } resource := resources[0] // Check if clustered cluster, _, err := resource.server.GetCluster() if err != nil { return err } if !cluster.Enabled { return fmt.Errorf(i18n.G("Server isn't part of a cluster")) } // Get the cluster members members, err := resource.server.GetClusterMembers() if err != nil { return err } // Process the columns columns, err := c.parseColumns() if err != nil { return err } // Render the table data := [][]string{} for _, member := range members { line := []string{} for _, column := range columns { line = append(line, column.Data(member)) } data = append(data, line) } sort.Sort(cli.SortColumnsNaturally(data)) header := []string{} for _, column := range columns { header = append(header, column.Name) } return cli.RenderTable(os.Stdout, c.flagFormat, header, data, members) } // Show. type cmdClusterShow struct { global *cmdGlobal cluster *cmdCluster } func (c *cmdClusterShow) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("show", i18n.G("[:]")) cmd.Short = i18n.G("Show details of a cluster member") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Show details of a cluster member`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterShow) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] // Get the member information member, _, err := resource.server.GetClusterMember(resource.name) if err != nil { return err } // Render as YAML data, err := yaml.Marshal(&member) if err != nil { return err } fmt.Printf("%s", data) return nil } // Info. type cmdClusterInfo struct { global *cmdGlobal cluster *cmdCluster } func (c *cmdClusterInfo) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("info", i18n.G("[:]")) cmd.Short = i18n.G("Show useful information about a cluster member") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Show useful information about a cluster member`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterInfo) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote. resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] // Get the member state information. member, _, err := resource.server.GetClusterMemberState(resource.name) if err != nil { return err } // Render as YAML. data, err := yaml.Marshal(&member) if err != nil { return err } fmt.Printf("%s", data) return nil } // Get. type cmdClusterGet struct { global *cmdGlobal cluster *cmdCluster flagIsProperty bool } func (c *cmdClusterGet) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("get", i18n.G("[:] ")) cmd.Short = i18n.G("Get values for cluster member configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), cmd.Short) cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as a cluster property")) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } if len(args) == 1 { return c.global.cmpClusterMemberConfigs(args[0]) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterGet) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] // Get the member information member, _, err := resource.server.GetClusterMember(resource.name) if err != nil { return err } if c.flagIsProperty { w := member.Writable() res, err := getFieldByJsonTag(&w, args[1]) if err != nil { return fmt.Errorf(i18n.G("The property %q does not exist on the cluster member %q: %v"), args[1], resource.name, err) } fmt.Printf("%v\n", res) return nil } value, ok := member.Config[args[1]] if !ok { return fmt.Errorf(i18n.G("The key %q does not exist on cluster member %q"), args[1], resource.name) } fmt.Printf("%s\n", value) return nil } // Set. type cmdClusterSet struct { global *cmdGlobal cluster *cmdCluster flagIsProperty bool } func (c *cmdClusterSet) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("set", i18n.G("[:] =...")) cmd.Short = i18n.G("Set a cluster member's configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), cmd.Short) cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as a cluster property")) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterSet) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, -1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] // Get the member information member, _, err := resource.server.GetClusterMember(resource.name) if err != nil { return err } // Get the new config keys keys, err := getConfig(args[1:]...) if err != nil { return err } writable := member.Writable() if c.flagIsProperty { if cmd.Name() == "unset" { for k := range keys { err := unsetFieldByJsonTag(&writable, k) if err != nil { return fmt.Errorf(i18n.G("Error unsetting property: %v"), err) } } } else { err := unpackKVToWritable(&writable, keys) if err != nil { return fmt.Errorf(i18n.G("Error setting properties: %v"), err) } } } else { for k, v := range keys { writable.Config[k] = v } } return resource.server.UpdateClusterMember(resource.name, writable, "") } // Unset. type cmdClusterUnset struct { global *cmdGlobal cluster *cmdCluster clusterSet *cmdClusterSet flagIsProperty bool } func (c *cmdClusterUnset) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("unset", i18n.G("[:] ")) cmd.Short = i18n.G("Unset a cluster member's configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), cmd.Short) cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as a cluster property")) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } if len(args) == 1 { return c.global.cmpClusterMemberConfigs(args[0]) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterUnset) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } c.clusterSet.flagIsProperty = c.flagIsProperty args = append(args, "") return c.clusterSet.Run(cmd, args) } // Rename. type cmdClusterRename struct { global *cmdGlobal cluster *cmdCluster } func (c *cmdClusterRename) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("rename", i18n.G("[:] ")) cmd.Aliases = []string{"mv"} cmd.Short = i18n.G("Rename a cluster member") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Rename a cluster member`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterRename) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] // Perform the rename err = resource.server.RenameClusterMember(resource.name, api.ClusterMemberPost{ServerName: args[1]}) if err != nil { return err } if !c.global.flagQuiet { fmt.Printf(i18n.G("Member %s renamed to %s")+"\n", resource.name, args[1]) } return nil } // Remove. type cmdClusterRemove struct { global *cmdGlobal cluster *cmdCluster flagForce bool flagNonInteractive bool } func (c *cmdClusterRemove) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("remove", i18n.G("[:]")) cmd.Aliases = []string{"rm"} cmd.Short = i18n.G("Remove a member from the cluster") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Remove a member from the cluster`)) cmd.RunE = c.Run cmd.Flags().BoolVarP(&c.flagForce, "force", "f", false, i18n.G("Force removing a member, even if degraded")) cmd.Flags().BoolVar(&c.flagNonInteractive, "yes", false, i18n.G("Don't require user confirmation for using --force")) cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterRemove) promptConfirmation(name string) error { reader := bufio.NewReader(os.Stdin) fmt.Printf(i18n.G(`Forcefully removing a server from the cluster should only be done as a last resort. The removed server will not be functional after this action and will require a full reset, losing any remaining instance, image or storage volume that the server may have held. When possible, a graceful removal should be preferred, this will require you to move any affected instance, image or storage volume to another server prior to the server being cleanly removed from the cluster. The --force flag should only be used if the server has died, been reinstalled or is otherwise never expected to come back up. Are you really sure you want to force removing %s? (yes/no): `), name) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") if !slices.Contains([]string{i18n.G("yes")}, strings.ToLower(input)) { return fmt.Errorf(i18n.G("User aborted delete operation")) } return nil } func (c *cmdClusterRemove) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] // Prompt for confirmation if --force is used. if !c.flagNonInteractive && c.flagForce { err := c.promptConfirmation(resource.name) if err != nil { return err } } // Delete the cluster member err = resource.server.DeleteClusterMember(resource.name, c.flagForce) if err != nil { return err } if !c.global.flagQuiet { fmt.Printf(i18n.G("Member %s removed")+"\n", resource.name) } return nil } // Enable. type cmdClusterEnable struct { global *cmdGlobal cluster *cmdCluster } func (c *cmdClusterEnable) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("enable", i18n.G("[:] ")) cmd.Short = i18n.G("Enable clustering on a single non-clustered server") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Enable clustering on a single non-clustered server This command turns a non-clustered server into the first member of a new cluster, which will have the given name. It's required that the server is already available on the network. You can check that by running 'incus config get core.https_address', and possibly set a value for the address if not yet set.`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpRemotes(toComplete, false) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterEnable) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 2) if exit { return err } // Parse remote remote := "" name := args[0] if len(args) == 2 { remote = args[0] name = args[1] } resources, err := c.global.ParseServers(remote) if err != nil { return err } resource := resources[0] // Check if the server is available on the network. server, _, err := resource.server.GetServer() if err != nil { return fmt.Errorf(i18n.G("Failed to retrieve current server config: %w"), err) } if server.Config["core.https_address"] == "" && server.Config["cluster.https_address"] == "" { return fmt.Errorf(i18n.G("This server is not available on the network")) } // Check if already enabled currentCluster, etag, err := resource.server.GetCluster() if err != nil { return fmt.Errorf(i18n.G("Failed to retrieve current cluster config: %w"), err) } if currentCluster.Enabled { return fmt.Errorf(i18n.G("This server is already clustered")) } // Enable clustering. req := api.ClusterPut{} req.ServerName = name req.Enabled = true op, err := resource.server.UpdateCluster(req, etag) if err != nil { return fmt.Errorf(i18n.G("Failed to configure cluster: %w"), err) } err = op.Wait() if err != nil { return fmt.Errorf(i18n.G("Failed to configure cluster: %w"), err) } fmt.Println(i18n.G("Clustering enabled")) return nil } // Edit. type cmdClusterEdit struct { global *cmdGlobal cluster *cmdCluster } func (c *cmdClusterEdit) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("edit", i18n.G("[:]")) cmd.Short = i18n.G("Edit cluster member configurations as YAML") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Edit cluster member configurations as YAML`)) cmd.Example = cli.FormatSection("", i18n.G( `incus cluster edit < member.yaml Update a cluster member using the content of member.yaml`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterEdit) helpTemplate() string { return i18n.G( `### This is a yaml representation of the cluster member. ### Any line starting with a '# will be ignored.`) } func (c *cmdClusterEdit) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster member name")) } // If stdin isn't a terminal, read text from it if !termios.IsTerminal(getStdinFd()) { contents, err := io.ReadAll(os.Stdin) if err != nil { return err } newdata := api.ClusterMemberPut{} err = yaml.Unmarshal(contents, &newdata) if err != nil { return err } return resource.server.UpdateClusterMember(resource.name, newdata, "") } // Extract the current value member, etag, err := resource.server.GetClusterMember(resource.name) if err != nil { return err } memberWritable := member.Writable() data, err := yaml.Marshal(&memberWritable) if err != nil { return err } // Spawn the editor content, err := textEditor("", []byte(c.helpTemplate()+"\n\n"+string(data))) if err != nil { return err } for { // Parse the text received from the editor newdata := api.ClusterMemberPut{} err = yaml.Unmarshal(content, &newdata) if err == nil { err = resource.server.UpdateClusterMember(resource.name, newdata, etag) } // Respawn the editor if err != nil { fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err) fmt.Println(i18n.G("Press enter to open the editor again or ctrl+c to abort change")) _, err := os.Stdin.Read(make([]byte, 1)) if err != nil { return err } content, err = textEditor("", content) if err != nil { return err } continue } break } return nil } // Add. type cmdClusterAdd struct { global *cmdGlobal cluster *cmdCluster } func (c *cmdClusterAdd) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("add", i18n.G("[[:]]")) cmd.Short = i18n.G("Request a join token for adding a cluster member") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Request a join token for adding a cluster member`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterAdd) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote. resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] // Determine the machine name. if resource.name == "" { return fmt.Errorf(i18n.G("A cluster member name must be provided")) } // Request the join token. member := api.ClusterMembersPost{ ServerName: resource.name, } op, err := resource.server.CreateClusterMember(member) if err != nil { return err } opAPI := op.Get() joinToken, err := opAPI.ToClusterJoinToken() if err != nil { return fmt.Errorf(i18n.G("Failed converting token operation to join token: %w"), err) } if !c.global.flagQuiet { fmt.Printf(i18n.G("Member %s join token:")+"\n", resource.name) } fmt.Println(joinToken.String()) return nil } // List Tokens. type cmdClusterListTokens struct { global *cmdGlobal cluster *cmdCluster flagFormat string flagColumns string } type clusterListTokenColumn struct { Name string Data func(*api.ClusterMemberJoinToken) string } func (c *cmdClusterListTokens) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("list-tokens", i18n.G("[:]")) cmd.Short = i18n.G("List all active cluster member join tokens") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `List all active cluster member join tokens Default column layout: nte == Columns == The -c option takes a comma separated list of arguments that control which network zone attributes to output when displaying in table or csv format. Column arguments are either pre-defined shorthand chars (see below), or (extended) config keys. Commas between consecutive shorthand chars are optional. Pre-defined column shorthand chars: n - Name t - Token E - Expires At`)) cmd.Flags().StringVarP(&c.flagFormat, "format", "f", "table", i18n.G(`Format (csv|json|table|yaml|compact), use suffix ",noheader" to disable headers and ",header" to enable if demanded, e.g. csv,header`)+"``") cmd.Flags().StringVarP(&c.flagColumns, "columns", "c", defaultclusterTokensColumns, i18n.G("Columns")+"``") cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpRemotes(toComplete, false) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } const defaultclusterTokensColumns = "ntE" func (c *cmdClusterListTokens) parseColumns() ([]clusterListTokenColumn, error) { columnsShorthandMap := map[rune]clusterListTokenColumn{ 'n': {i18n.G("NAME"), c.serverNameColumnData}, 't': {i18n.G("TOKEN"), c.tokenColumnData}, 'E': {i18n.G("EXPIRES AT"), c.expiresAtColumnData}, } columnList := strings.Split(c.flagColumns, ",") columns := []clusterListTokenColumn{} for _, columnEntry := range columnList { if columnEntry == "" { return nil, fmt.Errorf(i18n.G("Empty column entry (redundant, leading or trailing command) in '%s'"), c.flagColumns) } for _, columnRune := range columnEntry { column, ok := columnsShorthandMap[columnRune] if !ok { return nil, fmt.Errorf(i18n.G("Unknown column shorthand char '%c' in '%s'"), columnRune, columnEntry) } columns = append(columns, column) } } return columns, nil } func (c *cmdClusterListTokens) serverNameColumnData(token *api.ClusterMemberJoinToken) string { return token.ServerName } func (c *cmdClusterListTokens) tokenColumnData(token *api.ClusterMemberJoinToken) string { return token.String() } func (c *cmdClusterListTokens) expiresAtColumnData(token *api.ClusterMemberJoinToken) string { return token.ExpiresAt.Local().Format(dateLayout) } func (c *cmdClusterListTokens) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 1) if exit { return err } // Parse remote. remote := "" if len(args) == 1 { remote = args[0] } resources, err := c.global.ParseServers(remote) if err != nil { return err } resource := resources[0] // Check if clustered. cluster, _, err := resource.server.GetCluster() if err != nil { return err } if !cluster.Enabled { return fmt.Errorf(i18n.G("Server isn't part of a cluster")) } // Get the cluster member join tokens. Use default project as join tokens are created in default project. ops, err := resource.server.UseProject(api.ProjectDefaultName).GetOperations() if err != nil { return err } data := [][]string{} joinTokens := []*api.ClusterMemberJoinToken{} // Parse column flags. columns, err := c.parseColumns() if err != nil { return err } for _, op := range ops { if op.Class != api.OperationClassToken { continue } if op.StatusCode != api.Running { continue // Tokens are single use, so if cancelled but not deleted yet its not available. } joinToken, err := op.ToClusterJoinToken() if err != nil { continue // Operation is not a valid cluster member join token operation. } line := []string{} for _, column := range columns { line = append(line, column.Data(joinToken)) } joinTokens = append(joinTokens, joinToken) data = append(data, line) } sort.Sort(cli.SortColumnsNaturally(data)) header := []string{} for _, column := range columns { header = append(header, column.Name) } return cli.RenderTable(os.Stdout, c.flagFormat, header, data, joinTokens) } // Revoke Tokens. type cmdClusterRevokeToken struct { global *cmdGlobal cluster *cmdCluster } func (c *cmdClusterRevokeToken) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("revoke-token", i18n.G("[:]")) cmd.Short = i18n.G("Revoke cluster member join token") cmd.Long = cli.FormatSection(i18n.G("Description"), cmd.Short) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterRevokeToken) Run(cmd *cobra.Command, args []string) error { exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] // Check if clustered. cluster, _, err := resource.server.GetCluster() if err != nil { return err } if !cluster.Enabled { return fmt.Errorf(i18n.G("Server isn't part of a cluster")) } // Get the cluster member join tokens. Use default project as join tokens are created in default project. ops, err := resource.server.UseProject(api.ProjectDefaultName).GetOperations() if err != nil { return err } for _, op := range ops { if op.Class != api.OperationClassToken { continue } if op.StatusCode != api.Running { continue // Tokens are single use, so if cancelled but not deleted yet its not available. } joinToken, err := op.ToClusterJoinToken() if err != nil { continue // Operation is not a valid cluster member join token operation. } if joinToken.ServerName == resource.name { // Delete the operation err = resource.server.DeleteOperation(op.ID) if err != nil { return err } if !c.global.flagQuiet { fmt.Printf(i18n.G("Cluster join token for %s:%s deleted")+"\n", resource.remote, resource.name) } return nil } } return fmt.Errorf(i18n.G("No cluster join token for member %s on remote: %s"), resource.name, resource.remote) } // Update Certificates. type cmdClusterUpdateCertificate struct { global *cmdGlobal cluster *cmdCluster } func (c *cmdClusterUpdateCertificate) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("update-certificate", i18n.G("[:] ")) cmd.Aliases = []string{"update-cert"} cmd.Short = i18n.G("Update cluster certificate") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G("Update cluster certificate with PEM certificate and key read from input files.")) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } if len(args) == 1 { return nil, cobra.ShellCompDirectiveDefault } if len(args) == 2 { return nil, cobra.ShellCompDirectiveDefault } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterUpdateCertificate) Run(cmd *cobra.Command, args []string) error { conf := c.global.conf exit, err := c.global.CheckArgs(cmd, args, 2, 3) if exit { return err } // Parse remote remote := "" certFile := args[0] keyFile := args[1] if len(args) == 3 { remote = args[0] certFile = args[1] keyFile = args[2] } resources, err := c.global.ParseServers(remote) if err != nil { return err } resource := resources[0] // Check if clustered. cluster, _, err := resource.server.GetCluster() if err != nil { return err } if !cluster.Enabled { return fmt.Errorf(i18n.G("Server isn't part of a cluster")) } if !util.PathExists(certFile) { return fmt.Errorf(i18n.G("Could not find certificate file path: %s"), certFile) } if !util.PathExists(keyFile) { return fmt.Errorf(i18n.G("Could not find certificate key file path: %s"), keyFile) } cert, err := os.ReadFile(certFile) if err != nil { return fmt.Errorf(i18n.G("Could not read certificate file: %s with error: %v"), certFile, err) } key, err := os.ReadFile(keyFile) if err != nil { return fmt.Errorf(i18n.G("Could not read certificate key file: %s with error: %v"), keyFile, err) } certificates := api.ClusterCertificatePut{ ClusterCertificate: string(cert), ClusterCertificateKey: string(key), } err = resource.server.UpdateClusterCertificate(certificates, "") if err != nil { return err } certf := conf.ServerCertPath(resource.remote) if util.PathExists(certf) { err = os.WriteFile(certf, cert, 0o644) if err != nil { return fmt.Errorf(i18n.G("Could not write new remote certificate for remote '%s' with error: %v"), resource.remote, err) } } if !c.global.flagQuiet { fmt.Println(i18n.G("Successfully updated cluster certificates")) } return nil } type cmdClusterEvacuateAction struct { global *cmdGlobal flagAction string flagForce bool } // Cluster member evacuation. type cmdClusterEvacuate struct { global *cmdGlobal cluster *cmdCluster action *cmdClusterEvacuateAction } func (c *cmdClusterEvacuate) Command() *cobra.Command { cmdAction := cmdClusterEvacuateAction{global: c.global} c.action = &cmdAction cmd := c.action.Command("evacuate") cmd.Aliases = []string{"evac"} cmd.Use = usage("evacuate", i18n.G("[:]")) cmd.Short = i18n.G("Evacuate cluster member") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Evacuate cluster member`)) cmd.Flags().StringVar(&c.action.flagAction, "action", "", i18n.G(`Force a particular evacuation action`)+"``") cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // Cluster member restore. type cmdClusterRestore struct { global *cmdGlobal cluster *cmdCluster action *cmdClusterEvacuateAction } func (c *cmdClusterRestore) Command() *cobra.Command { cmdAction := cmdClusterEvacuateAction{global: c.global} c.action = &cmdAction cmd := c.action.Command("restore") cmd.Use = usage("restore", i18n.G("[:]")) cmd.Short = i18n.G("Restore cluster member") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Restore cluster member`)) cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterEvacuateAction) Command(action string) *cobra.Command { cmd := &cobra.Command{} cmd.RunE = c.Run cmd.Flags().BoolVar(&c.flagForce, "force", false, i18n.G(`Force evacuation without user confirmation`)+"``") return cmd } func (c *cmdClusterEvacuateAction) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote. resources, err := c.global.ParseServers(args[0]) if err != nil { return fmt.Errorf(i18n.G("Failed to parse servers: %w"), err) } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster member name")) } if !c.flagForce { evacuate, err := c.global.asker.AskBool(fmt.Sprintf(i18n.G("Are you sure you want to %s cluster member %q? (yes/no) [default=no]: "), cmd.Name(), resource.name), "no") if err != nil { return err } if !evacuate { return nil } } state := api.ClusterMemberStatePost{ Action: cmd.Name(), Mode: c.flagAction, } op, err := resource.server.UpdateClusterMemberState(resource.name, state) if err != nil { return fmt.Errorf(i18n.G("Failed to update cluster member state: %w"), err) } var format string if cmd.Name() == "restore" { format = i18n.G("Restoring cluster member: %s") } else { format = i18n.G("Evacuating cluster member: %s") } progress := cli.ProgressRenderer{ Format: format, Quiet: c.global.flagQuiet, } _, err = op.AddHandler(progress.UpdateOp) if err != nil { progress.Done("") return err } err = op.Wait() if err != nil { progress.Done("") return err } progress.Done("") return nil } incus-6.0.4/cmd/incus/cluster_group.go000066400000000000000000000527421477363751000200010ustar00rootroot00000000000000package main import ( "fmt" "io" "os" "slices" "sort" "strings" "github.com/spf13/cobra" yaml "gopkg.in/yaml.v2" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/termios" ) type cmdClusterGroup struct { global *cmdGlobal cluster *cmdCluster } type clusterGroupColumn struct { Name string Data func(api.ClusterGroup) string } // Cluster management including assignment, creation, deletion, editing, listing, removal, renaming, and showing details. func (c *cmdClusterGroup) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("group") cmd.Short = i18n.G("Manage cluster groups") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Manage cluster groups`)) // Assign clusterGroupAssignCmd := cmdClusterGroupAssign{global: c.global, cluster: c.cluster} cmd.AddCommand(clusterGroupAssignCmd.Command()) // Create clusterGroupCreateCmd := cmdClusterGroupCreate{global: c.global, cluster: c.cluster} cmd.AddCommand(clusterGroupCreateCmd.Command()) // Delete clusterGroupDeleteCmd := cmdClusterGroupDelete{global: c.global, cluster: c.cluster} cmd.AddCommand(clusterGroupDeleteCmd.Command()) // Edit clusterGroupEditCmd := cmdClusterGroupEdit{global: c.global, cluster: c.cluster} cmd.AddCommand(clusterGroupEditCmd.Command()) // List clusterGroupListCmd := cmdClusterGroupList{global: c.global, cluster: c.cluster} cmd.AddCommand(clusterGroupListCmd.Command()) // Remove clusterGroupRemoveCmd := cmdClusterGroupRemove{global: c.global, cluster: c.cluster} cmd.AddCommand(clusterGroupRemoveCmd.Command()) // Rename clusterGroupRenameCmd := cmdClusterGroupRename{global: c.global, cluster: c.cluster} cmd.AddCommand(clusterGroupRenameCmd.Command()) // Show clusterGroupShowCmd := cmdClusterGroupShow{global: c.global, cluster: c.cluster} cmd.AddCommand(clusterGroupShowCmd.Command()) // Add clusterGroupAddCmd := cmdClusterGroupAdd{global: c.global, cluster: c.cluster} cmd.AddCommand(clusterGroupAddCmd.Command()) return cmd } // Assign. type cmdClusterGroupAssign struct { global *cmdGlobal cluster *cmdCluster } // Setting a groups to cluster members, setting usage, description, examples, and the RunE method. func (c *cmdClusterGroupAssign) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("assign", i18n.G("[:] ")) cmd.Aliases = []string{"apply"} cmd.Short = i18n.G("Assign sets of groups to cluster members") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Assign sets of groups to cluster members`)) cmd.Example = cli.FormatSection("", i18n.G( `incus cluster group assign foo default,bar Set the groups for "foo" to "default" and "bar". incus cluster group assign foo default Reset "foo" to only using the "default" cluster group.`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } if len(args) == 1 { return c.global.cmpClusterGroupNames(args[0]) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // Groups assigning to a cluster member, performing checks, parsing arguments, and updating the member's group configuration. func (c *cmdClusterGroupAssign) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] // Assign the cluster group if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster member name")) } member, etag, err := resource.server.GetClusterMember(resource.name) if err != nil { return err } if args[1] != "" { member.Groups = strings.Split(args[1], ",") } else { member.Groups = nil } err = resource.server.UpdateClusterMember(resource.name, member.Writable(), etag) if err != nil { return err } if args[1] == "" { args[1] = i18n.G("(none)") } if !c.global.flagQuiet { fmt.Printf(i18n.G("Cluster member %s added to cluster groups %s")+"\n", resource.name, args[1]) } return nil } // Create. type cmdClusterGroupCreate struct { global *cmdGlobal cluster *cmdCluster flagDescription string } // Creation of a new cluster group, defining its usage, short and long descriptions, and the RunE method. func (c *cmdClusterGroupCreate) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("create", i18n.G("[:]")) cmd.Short = i18n.G("Create a cluster group") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Create a cluster group`)) cmd.Example = cli.FormatSection("", i18n.G(`incus cluster group create g1 incus cluster group create g1 < config.yaml Create a cluster group with configuration from config.yaml`)) cmd.Flags().StringVar(&c.flagDescription, "description", "", i18n.G("Cluster group description")+"``") cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpRemotes(toComplete, false) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // It creates new cluster group after performing checks, parsing arguments, and making the server call for creation. func (c *cmdClusterGroupCreate) Run(cmd *cobra.Command, args []string) error { var stdinData api.ClusterGroupPut // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // If stdin isn't a terminal, read text from it if !termios.IsTerminal(getStdinFd()) { contents, err := io.ReadAll(os.Stdin) if err != nil { return err } err = yaml.Unmarshal(contents, &stdinData) if err != nil { return err } } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster group name")) } // Create the cluster group group := api.ClusterGroupsPost{ Name: resource.name, ClusterGroupPut: stdinData, } if c.flagDescription != "" { group.Description = c.flagDescription } err = resource.server.CreateClusterGroup(group) if err != nil { return err } if !c.global.flagQuiet { fmt.Printf(i18n.G("Cluster group %s created")+"\n", resource.name) } return nil } // Delete. type cmdClusterGroupDelete struct { global *cmdGlobal cluster *cmdCluster } // It deletes a cluster group, setting up usage, descriptions, aliases, and the RunE method. func (c *cmdClusterGroupDelete) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("delete", i18n.G("[:]")) cmd.Aliases = []string{"rm"} cmd.Short = i18n.G("Delete a cluster group") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Delete a cluster group`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterGroups(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // It's the deletion of a cluster group after argument checks, parsing, and making the server call for deletion. func (c *cmdClusterGroupDelete) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster group name")) } // Delete the cluster group err = resource.server.DeleteClusterGroup(resource.name) if err != nil { return err } if !c.global.flagQuiet { fmt.Printf(i18n.G("Cluster group %s deleted")+"\n", resource.name) } return nil } // Edit. type cmdClusterGroupEdit struct { global *cmdGlobal cluster *cmdCluster } // This Command generates the cobra command that enables the editing of a cluster group's attributes. func (c *cmdClusterGroupEdit) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("edit", i18n.G("[:]")) cmd.Short = i18n.G("Edit a cluster group") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Edit a cluster group`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterGroups(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // The modification of a cluster group's configuration, either through an editor or via the terminal. func (c *cmdClusterGroupEdit) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster group name")) } // If stdin isn't a terminal, read text from it if !termios.IsTerminal(getStdinFd()) { contents, err := io.ReadAll(os.Stdin) if err != nil { return err } newdata := api.ClusterGroupPut{} err = yaml.Unmarshal(contents, &newdata) if err != nil { return err } return resource.server.UpdateClusterGroup(resource.name, newdata, "") } // Extract the current value group, etag, err := resource.server.GetClusterGroup(resource.name) if err != nil { return err } data, err := yaml.Marshal(group) if err != nil { return err } // Spawn the editor content, err := textEditor("", []byte(c.helpTemplate()+"\n\n"+string(data))) if err != nil { return err } for { // Parse the text received from the editor newdata := api.ClusterGroupPut{} err = yaml.Unmarshal(content, &newdata) if err == nil { err = resource.server.UpdateClusterGroup(resource.name, newdata, etag) } // Respawn the editor if err != nil { fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err) fmt.Println(i18n.G("Press enter to open the editor again or ctrl+c to abort change")) _, err := os.Stdin.Read(make([]byte, 1)) if err != nil { return err } content, err = textEditor("", content) if err != nil { return err } continue } break } return nil } // Returns a string explaining the expected YAML structure for a cluster group configuration. func (c *cmdClusterGroupEdit) helpTemplate() string { return i18n.G( `### This is a YAML representation of the cluster group. ### Any line starting with a '# will be ignored.`) } // List. type cmdClusterGroupList struct { global *cmdGlobal cluster *cmdCluster flagFormat string flagColumns string } // Command returns a cobra command to list all the cluster groups in a specified format. func (c *cmdClusterGroupList) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("list", i18n.G("[:]")) cmd.Aliases = []string{"ls"} cmd.Short = i18n.G("List all the cluster groups") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `List all the cluster groups Default column layout: ndm == Columns == The -c option takes a comma separated list of arguments that control which instance attributes to output when displaying in table or csv format. Column arguments are either pre-defined shorthand chars (see below), or (extended) config keys. Commas between consecutive shorthand chars are optional. Pre-defined column shorthand chars: n - Name d - Description m - Member`)) cmd.Flags().StringVarP(&c.flagColumns, "columns", "c", defaultClusterGroupColumns, i18n.G("Columns")+"``") cmd.Flags().StringVarP(&c.flagFormat, "format", "f", "table", i18n.G(`Format (csv|json|table|yaml|compact), use suffix ",noheader" to disable headers and ",header" to enable it if missing, e.g. csv,header`)+"``") cmd.PreRunE = func(cmd *cobra.Command, args []string) error { return cli.ValidateFlagFormatForListOutput(cmd.Flag("format").Value.String()) } cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpRemotes(toComplete, false) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } const defaultClusterGroupColumns = "ndm" func (c *cmdClusterGroupList) parseColumns() ([]clusterGroupColumn, error) { columnsShorthandMap := map[rune]clusterGroupColumn{ 'n': {i18n.G("NAME"), c.clusterGroupNameColumnData}, 'm': {i18n.G("MEMBERS"), c.membersColumnData}, 'd': {i18n.G("DESCRIPTION"), c.descriptionColumnData}, } columnList := strings.Split(c.flagColumns, ",") columns := []clusterGroupColumn{} for _, columnEntry := range columnList { if columnEntry == "" { return nil, fmt.Errorf(i18n.G("Empty column entry (redundant, leading or trailing command) in '%s'"), c.flagColumns) } for _, columnRune := range columnEntry { column, ok := columnsShorthandMap[columnRune] if !ok { return nil, fmt.Errorf(i18n.G("Unknown column shorthand char '%c' in '%s'"), columnRune, columnEntry) } columns = append(columns, column) } } return columns, nil } func (c *cmdClusterGroupList) clusterGroupNameColumnData(group api.ClusterGroup) string { return group.Name } func (c *cmdClusterGroupList) descriptionColumnData(group api.ClusterGroup) string { return group.Description } func (c *cmdClusterGroupList) membersColumnData(group api.ClusterGroup) string { return fmt.Sprintf("%d", len(group.Members)) } // Run executes the command to list all the cluster groups, their descriptions, and number of members. func (c *cmdClusterGroupList) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 1) if exit { return err } // Parse remote remote := "" if len(args) == 1 { remote = args[0] } resources, err := c.global.ParseServers(remote) if err != nil { return err } resource := resources[0] // Check if clustered cluster, _, err := resource.server.GetCluster() if err != nil { return err } if !cluster.Enabled { return fmt.Errorf(i18n.G("Server isn't part of a cluster")) } groups, err := resource.server.GetClusterGroups() if err != nil { return err } // Parse column flags. columns, err := c.parseColumns() if err != nil { return err } // Render the table data := [][]string{} for _, group := range groups { line := []string{} for _, column := range columns { line = append(line, column.Data(group)) } data = append(data, line) } sort.Sort(cli.SortColumnsNaturally(data)) header := []string{} for _, column := range columns { header = append(header, column.Name) } return cli.RenderTable(os.Stdout, c.flagFormat, header, data, groups) } // Remove. type cmdClusterGroupRemove struct { global *cmdGlobal cluster *cmdCluster } // Removal of a specified member from a specific cluster group. func (c *cmdClusterGroupRemove) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("remove", i18n.G("[:] ")) cmd.Short = i18n.G("Remove member from group") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Remove a cluster member from a cluster group`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } if len(args) == 1 { return c.global.cmpClusterGroupNames(args[0]) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // The removal process of a cluster member from a specific cluster group, with verbose output unless the 'quiet' flag is set. func (c *cmdClusterGroupRemove) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster member name")) } // Remove the cluster group member, etag, err := resource.server.GetClusterMember(resource.name) if err != nil { return err } if !slices.Contains(member.Groups, args[1]) { return fmt.Errorf(i18n.G("Cluster group %s isn't currently applied to %s"), args[1], resource.name) } groups := []string{} for _, group := range member.Groups { if group == args[1] { continue } groups = append(groups, group) } member.Groups = groups err = resource.server.UpdateClusterMember(resource.name, member.Writable(), etag) if err != nil { return err } if !c.global.flagQuiet { fmt.Printf(i18n.G("Cluster member %s removed from group %s")+"\n", resource.name, args[1]) } return nil } // Rename. type cmdClusterGroupRename struct { global *cmdGlobal cluster *cmdCluster } // Renaming a cluster group, defining usage, aliases, and linking the associated runtime function. func (c *cmdClusterGroupRename) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("rename", i18n.G("[:] ")) cmd.Aliases = []string{"mv"} cmd.Short = i18n.G("Rename a cluster group") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Rename a cluster group`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterGroups(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // Renaming operation of a cluster group after checking arguments and parsing the remote server, and provides appropriate output. func (c *cmdClusterGroupRename) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] // Perform the rename err = resource.server.RenameClusterGroup(resource.name, api.ClusterGroupPost{Name: args[1]}) if err != nil { return err } if !c.global.flagQuiet { fmt.Printf(i18n.G("Cluster group %s renamed to %s")+"\n", resource.name, args[1]) } return nil } // Show. type cmdClusterGroupShow struct { global *cmdGlobal cluster *cmdCluster } // Setting up the 'show' command to display the configurations of a specified cluster group in a remote server. func (c *cmdClusterGroupShow) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("show", i18n.G("[:]")) cmd.Short = i18n.G("Show cluster group configurations") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Show cluster group configurations`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterGroups(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // This retrieves and prints the configuration details of a specified cluster group from a remote server in YAML format. func (c *cmdClusterGroupShow) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster group name")) } // Show the cluster group group, _, err := resource.server.GetClusterGroup(resource.name) if err != nil { return err } data, err := yaml.Marshal(&group) if err != nil { return err } fmt.Printf("%s", data) return nil } // Add. type cmdClusterGroupAdd struct { global *cmdGlobal cluster *cmdCluster } func (c *cmdClusterGroupAdd) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("add", i18n.G("[:] ")) cmd.Short = i18n.G("Add member to group") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Add a cluster member to a cluster group`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } if len(args) == 1 { return c.global.cmpClusterGroupNames(args[0]) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdClusterGroupAdd) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster member name")) } // Retrieve cluster member information. member, etag, err := resource.server.GetClusterMember(resource.name) if err != nil { return err } if slices.Contains(member.Groups, args[1]) { return fmt.Errorf(i18n.G("Cluster member %s is already in group %s"), resource.name, args[1]) } member.Groups = append(member.Groups, args[1]) err = resource.server.UpdateClusterMember(resource.name, member.Writable(), etag) if err != nil { return err } if !c.global.flagQuiet { fmt.Printf(i18n.G("Cluster member %s added to group %s")+"\n", resource.name, args[1]) } return nil } incus-6.0.4/cmd/incus/cluster_role.go000066400000000000000000000115201477363751000175730ustar00rootroot00000000000000package main import ( "fmt" "slices" "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/shared/util" ) type cmdClusterRole struct { global *cmdGlobal cluster *cmdCluster } // It uses the cmdGlobal, cmdCluster, and cmdClusterRole structs for context and operation. func (c *cmdClusterRole) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("role") cmd.Short = i18n.G("Manage cluster roles") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G(`Manage cluster roles`)) // Add clusterRoleAddCmd := cmdClusterRoleAdd{global: c.global, cluster: c.cluster, clusterRole: c} cmd.AddCommand(clusterRoleAddCmd.Command()) // Remove clusterRoleRemoveCmd := cmdClusterRoleRemove{global: c.global, cluster: c.cluster, clusterRole: c} cmd.AddCommand(clusterRoleRemoveCmd.Command()) // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } return cmd } type cmdClusterRoleAdd struct { global *cmdGlobal cluster *cmdCluster clusterRole *cmdClusterRole } // Setting up the usage, short description, and long description of the command, as well as its RunE method. func (c *cmdClusterRoleAdd) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("add", i18n.G("[:] ")) cmd.Short = i18n.G("Add roles to a cluster member") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Add roles to a cluster member`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // It checks and parses input arguments, verifies role assignment, and updates the member's roles. func (c *cmdClusterRoleAdd) Run(cmd *cobra.Command, args []string) error { exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster member name")) } // Extract the current value member, etag, err := resource.server.GetClusterMember(resource.name) if err != nil { return err } memberWritable := member.Writable() newRoles := util.SplitNTrimSpace(args[1], ",", -1, false) for _, newRole := range newRoles { if slices.Contains(memberWritable.Roles, newRole) { return fmt.Errorf(i18n.G("Member %q already has role %q"), resource.name, newRole) } } memberWritable.Roles = append(memberWritable.Roles, newRoles...) return resource.server.UpdateClusterMember(resource.name, memberWritable, etag) } type cmdClusterRoleRemove struct { global *cmdGlobal cluster *cmdCluster clusterRole *cmdClusterRole } // Removing the roles from a cluster member, setting up usage, descriptions, and the RunE method. func (c *cmdClusterRoleRemove) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("remove", i18n.G("[:] ")) cmd.Short = i18n.G("Remove roles from a cluster member") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Remove roles from a cluster member`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpClusterMembers(toComplete) } if len(args) == 1 { return c.global.cmpClusterMemberRoles(args[0]) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // Run executes the removal of specified roles from a cluster member, checking inputs, validating role assignment, and updating the member's roles. func (c *cmdClusterRoleRemove) Run(cmd *cobra.Command, args []string) error { exit, err := c.global.CheckArgs(cmd, args, 2, 2) if exit { return err } resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing cluster member name")) } // Extract the current value member, etag, err := resource.server.GetClusterMember(resource.name) if err != nil { return err } memberWritable := member.Writable() rolesToRemove := util.SplitNTrimSpace(args[1], ",", -1, false) for _, roleToRemove := range rolesToRemove { if !slices.Contains(memberWritable.Roles, roleToRemove) { return fmt.Errorf(i18n.G("Member %q does not have role %q"), resource.name, roleToRemove) } } memberWritable.Roles = removeElementsFromSlice(memberWritable.Roles, rolesToRemove...) return resource.server.UpdateClusterMember(resource.name, memberWritable, etag) } incus-6.0.4/cmd/incus/completion.go000066400000000000000000000773411477363751000172570ustar00rootroot00000000000000package main import ( "fmt" "io/fs" "os" "path/filepath" "regexp" "strings" "github.com/spf13/cobra" "github.com/lxc/incus/v6/internal/instance" "github.com/lxc/incus/v6/shared/api" ) func (g *cmdGlobal) cmpClusterGroupNames(toComplete string) ([]string, cobra.ShellCompDirective) { var results []string cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(toComplete) if len(resources) <= 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] cluster, _, err := resource.server.GetCluster() if err != nil || !cluster.Enabled { return nil, cobra.ShellCompDirectiveError } results, err = resource.server.GetClusterGroupNames() if err != nil { return nil, cobra.ShellCompDirectiveError } return results, cmpDirectives } func (g *cmdGlobal) cmpClusterGroups(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(toComplete) if len(resources) <= 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] cluster, _, err := resource.server.GetCluster() if err != nil || !cluster.Enabled { return nil, cobra.ShellCompDirectiveError } groups, err := resource.server.GetClusterGroupNames() if err != nil { return nil, cobra.ShellCompDirectiveError } for _, group := range groups { var name string if resource.remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = group } else { name = fmt.Sprintf("%s:%s", resource.remote, group) } results = append(results, name) } if !strings.Contains(toComplete, ":") { remotes, directives := g.cmpRemotes(toComplete, false) results = append(results, remotes...) cmpDirectives |= directives } return results, cmpDirectives } func (g *cmdGlobal) cmpClusterMemberConfigs(memberName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(memberName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server cluster, _, err := client.GetCluster() if err != nil || !cluster.Enabled { return nil, cobra.ShellCompDirectiveError } member, _, err := client.GetClusterMember(memberName) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for k := range member.Config { results = append(results, k) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpClusterMemberRoles(memberName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(memberName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server cluster, _, err := client.GetCluster() if err != nil || !cluster.Enabled { return nil, cobra.ShellCompDirectiveError } member, _, err := client.GetClusterMember(memberName) if err != nil { return nil, cobra.ShellCompDirectiveError } return member.Roles, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpClusterMembers(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(toComplete) if len(resources) > 0 { resource := resources[0] cluster, _, err := resource.server.GetCluster() if err != nil || !cluster.Enabled { return nil, cobra.ShellCompDirectiveError } // Get the cluster members members, err := resource.server.GetClusterMembers() if err != nil { return nil, cobra.ShellCompDirectiveError } for _, member := range members { var name string if resource.remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = member.ServerName } else { name = fmt.Sprintf("%s:%s", resource.remote, member.ServerName) } results = append(results, name) } } if !strings.Contains(toComplete, ":") { remotes, directives := g.cmpRemotes(toComplete, false) results = append(results, remotes...) cmpDirectives |= directives } return results, cmpDirectives } func (g *cmdGlobal) cmpImages(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} var remote string cmpDirectives := cobra.ShellCompDirectiveNoFileComp if strings.Contains(toComplete, ":") { remote = strings.Split(toComplete, ":")[0] } else { remote = g.conf.DefaultRemote } remoteServer, _ := g.conf.GetImageServer(remote) images, _ := remoteServer.GetImages() for _, image := range images { for _, alias := range image.Aliases { var name string if remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = alias.Name } else { name = fmt.Sprintf("%s:%s", remote, alias.Name) } results = append(results, name) } } if !strings.Contains(toComplete, ":") { remotes, directives := g.cmpRemotes(toComplete, true) results = append(results, remotes...) cmpDirectives |= directives } return results, cmpDirectives } func (g *cmdGlobal) cmpImageFingerprintsFromRemote(toComplete string, remote string) ([]string, cobra.ShellCompDirective) { results := []string{} if remote == "" { remote = g.conf.DefaultRemote } remoteServer, _ := g.conf.GetImageServer(remote) images, _ := remoteServer.GetImages() for _, image := range images { if !strings.HasPrefix(image.Fingerprint, toComplete) { continue } results = append(results, image.Fingerprint) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpInstanceAllKeys() ([]string, cobra.ShellCompDirective) { keys := []string{} for k := range instance.InstanceConfigKeysAny { keys = append(keys, k) } return keys, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpInstanceConfigTemplates(instanceName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(instanceName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server instanceNameOnly := instanceName if strings.Contains(instanceName, ":") { instanceNameOnly = strings.Split(instanceName, ":")[1] } results, err := client.GetInstanceTemplateFiles(instanceNameOnly) if err != nil { cobra.CompDebug(fmt.Sprintf("%v", err), true) return nil, cobra.ShellCompDirectiveError } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpInstanceDeviceNames(instanceName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(instanceName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server instanceNameOnly, _, err := client.GetInstance(instanceName) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for k := range instanceNameOnly.Devices { results = append(results, k) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpInstanceSnapshots(instanceName string) ([]string, cobra.ShellCompDirective) { resources, err := g.ParseServers(instanceName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server snapshots, err := client.GetInstanceSnapshotNames(instanceName) if err != nil { return nil, cobra.ShellCompDirectiveError } return snapshots, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpInstances(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(toComplete) if len(resources) > 0 { resource := resources[0] instances, _ := resource.server.GetInstanceNames(api.InstanceTypeAny) for _, instName := range instances { var name string if resource.remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = instName } else { name = fmt.Sprintf("%s:%s", resource.remote, instName) } if !strings.HasPrefix(name, toComplete) { continue } results = append(results, name) } } if !strings.Contains(toComplete, ":") { remotes, directives := g.cmpRemotes(toComplete, false) results = append(results, remotes...) cmpDirectives |= directives } return results, cmpDirectives } func (g *cmdGlobal) cmpInstancesAndSnapshots(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(toComplete) if len(resources) > 0 { resource := resources[0] if strings.Contains(resource.name, instance.SnapshotDelimiter) { instName := strings.SplitN(resource.name, instance.SnapshotDelimiter, 2)[0] snapshots, _ := resource.server.GetInstanceSnapshotNames(instName) for _, snapshot := range snapshots { results = append(results, fmt.Sprintf("%s/%s", instName, snapshot)) } } else { instances, _ := resource.server.GetInstanceNames(api.InstanceTypeAny) for _, instName := range instances { var name string if resource.remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = instName } else { name = fmt.Sprintf("%s:%s", resource.remote, instName) } results = append(results, name) } } } if !strings.Contains(toComplete, ":") { remotes, directives := g.cmpRemotes(toComplete, false) results = append(results, remotes...) cmpDirectives |= directives } return results, cmpDirectives } func (g *cmdGlobal) cmpInstanceNamesFromRemote(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} resources, _ := g.ParseServers(toComplete) if len(resources) > 0 { resource := resources[0] containers, _ := resource.server.GetInstanceNames("container") results = append(results, containers...) vms, _ := resource.server.GetInstanceNames("virtual-machine") results = append(results, vms...) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpNetworkACLConfigs(aclName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(aclName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server acl, _, err := client.GetNetworkACL(resource.name) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for k := range acl.Config { results = append(results, k) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpNetworkACLs(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(toComplete) if len(resources) <= 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] acls, err := resource.server.GetNetworkACLNames() if err != nil { return nil, cobra.ShellCompDirectiveError } for _, acl := range acls { var name string if resource.remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = acl } else { name = fmt.Sprintf("%s:%s", resource.remote, acl) } results = append(results, name) } if !strings.Contains(toComplete, ":") { remotes, directives := g.cmpRemotes(toComplete, false) results = append(results, remotes...) cmpDirectives |= directives } return results, cmpDirectives } func (g *cmdGlobal) cmpNetworkACLRuleProperties() ([]string, cobra.ShellCompDirective) { var results []string allowedKeys := networkACLRuleJSONStructFieldMap() for key := range allowedKeys { results = append(results, fmt.Sprintf("%s=", key)) } return results, cobra.ShellCompDirectiveNoSpace } func (g *cmdGlobal) cmpNetworkForwardConfigs(networkName string, listenAddress string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(networkName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server forward, _, err := client.GetNetworkForward(networkName, listenAddress) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for k := range forward.Config { results = append(results, k) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpNetworkForwards(networkName string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(networkName) if len(resources) <= 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] results, err := resource.server.GetNetworkForwardAddresses(networkName) if err != nil { return nil, cobra.ShellCompDirectiveError } return results, cmpDirectives } func (g *cmdGlobal) cmpNetworkLoadBalancers(networkName string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(networkName) if len(resources) <= 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] results, err := resource.server.GetNetworkForwardAddresses(networkName) if err != nil { return nil, cobra.ShellCompDirectiveError } return results, cmpDirectives } func (g *cmdGlobal) cmpNetworkPeerConfigs(networkName string, peerName string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(networkName) if len(resources) <= 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] peer, _, err := resource.server.GetNetworkPeer(resource.name, peerName) if err != nil { return nil, cobra.ShellCompDirectiveError } for k := range peer.Config { results = append(results, k) } return results, cmpDirectives } func (g *cmdGlobal) cmpNetworkPeers(networkName string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(networkName) if len(resources) <= 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] results, err := resource.server.GetNetworkPeerNames(networkName) if err != nil { return nil, cobra.ShellCompDirectiveError } return results, cmpDirectives } func (g *cmdGlobal) cmpNetworks(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(toComplete) if len(resources) > 0 { resource := resources[0] networks, err := resource.server.GetNetworkNames() if err != nil { return nil, cobra.ShellCompDirectiveError } for _, network := range networks { var name string if resource.remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = network } else { name = fmt.Sprintf("%s:%s", resource.remote, network) } results = append(results, name) } } if !strings.Contains(toComplete, ":") { remotes, directives := g.cmpRemotes(toComplete, false) results = append(results, remotes...) cmpDirectives |= directives } return results, cmpDirectives } func (g *cmdGlobal) cmpNetworkConfigs(networkName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(networkName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server network, _, err := client.GetNetwork(networkName) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for k := range network.Config { results = append(results, k) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpNetworkInstances(networkName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(networkName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server network, _, err := client.GetNetwork(networkName) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for _, i := range network.UsedBy { r := regexp.MustCompile(`/1.0/instances/(.*)`) match := r.FindStringSubmatch(i) if len(match) == 2 { results = append(results, match[1]) } } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpNetworkProfiles(networkName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(networkName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server network, _, err := client.GetNetwork(networkName) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for _, i := range network.UsedBy { r := regexp.MustCompile(`/1.0/profiles/(.*)`) match := r.FindStringSubmatch(i) if len(match) == 2 { results = append(results, match[1]) } } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpNetworkZoneConfigs(zoneName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(zoneName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server zone, _, err := client.GetNetworkZone(zoneName) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for k := range zone.Config { results = append(results, k) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpNetworkZoneRecordConfigs(zoneName string, recordName string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(zoneName) if len(resources) <= 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] peer, _, err := resource.server.GetNetworkZoneRecord(resource.name, recordName) if err != nil { return nil, cobra.ShellCompDirectiveError } for k := range peer.Config { results = append(results, k) } return results, cmpDirectives } func (g *cmdGlobal) cmpNetworkZoneRecords(zoneName string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(zoneName) if len(resources) <= 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] results, err := resource.server.GetNetworkZoneRecordNames(zoneName) if err != nil { return nil, cobra.ShellCompDirectiveError } return results, cmpDirectives } func (g *cmdGlobal) cmpNetworkZones(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(toComplete) if len(resources) > 0 { resource := resources[0] zones, err := resource.server.GetNetworkZoneNames() if err != nil { return nil, cobra.ShellCompDirectiveError } for _, project := range zones { var name string if resource.remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = project } else { name = fmt.Sprintf("%s:%s", resource.remote, project) } results = append(results, name) } } if !strings.Contains(toComplete, ":") { remotes, directives := g.cmpRemotes(toComplete, false) results = append(results, remotes...) cmpDirectives |= directives } return results, cmpDirectives } func (g *cmdGlobal) cmpProfileConfigs(profileName string) ([]string, cobra.ShellCompDirective) { resources, err := g.ParseServers(profileName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server profile, _, err := client.GetProfile(resource.name) if err != nil { return nil, cobra.ShellCompDirectiveError } var configs []string for c := range profile.Config { configs = append(configs, c) } return configs, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpProfileDeviceNames(instanceName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(instanceName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server profile, _, err := client.GetProfile(resource.name) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for k := range profile.Devices { results = append(results, k) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpProfileNamesFromRemote(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} resources, _ := g.ParseServers(toComplete) if len(resources) > 0 { resource := resources[0] profiles, _ := resource.server.GetProfileNames() results = append(results, profiles...) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpProfiles(toComplete string, includeRemotes bool) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(toComplete) if len(resources) > 0 { resource := resources[0] profiles, _ := resource.server.GetProfileNames() for _, profile := range profiles { var name string if resource.remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = profile } else { name = fmt.Sprintf("%s:%s", resource.remote, profile) } results = append(results, name) } } if includeRemotes && !strings.Contains(toComplete, ":") { remotes, directives := g.cmpRemotes(toComplete, false) results = append(results, remotes...) cmpDirectives |= directives } return results, cmpDirectives } func (g *cmdGlobal) cmpProjectConfigs(projectName string) ([]string, cobra.ShellCompDirective) { resources, err := g.ParseServers(projectName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server project, _, err := client.GetProject(resource.name) if err != nil { return nil, cobra.ShellCompDirectiveError } var configs []string for c := range project.Config { configs = append(configs, c) } return configs, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpProjects(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} cmpDirectives := cobra.ShellCompDirectiveNoFileComp resources, _ := g.ParseServers(toComplete) if len(resources) > 0 { resource := resources[0] projects, err := resource.server.GetProjectNames() if err != nil { return nil, cobra.ShellCompDirectiveError } for _, project := range projects { var name string if resource.remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = project } else { name = fmt.Sprintf("%s:%s", resource.remote, project) } results = append(results, name) } } if !strings.Contains(toComplete, ":") { remotes, directives := g.cmpRemotes(toComplete, false) results = append(results, remotes...) cmpDirectives |= directives } return results, cmpDirectives } func (g *cmdGlobal) cmpRemotes(toComplete string, includeAll bool) ([]string, cobra.ShellCompDirective) { results := []string{} for remoteName, rc := range g.conf.Remotes { if !includeAll && rc.Protocol != "incus" && rc.Protocol != "" { continue } if !strings.HasPrefix(remoteName, toComplete) { continue } results = append(results, fmt.Sprintf("%s:", remoteName)) } if len(results) > 0 { return results, cobra.ShellCompDirectiveNoSpace } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpRemoteNames() ([]string, cobra.ShellCompDirective) { results := []string{} for remoteName := range g.conf.Remotes { results = append(results, remoteName) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpStoragePoolConfigs(poolName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(poolName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server if strings.Contains(poolName, ":") { poolName = strings.Split(poolName, ":")[1] } pool, _, err := client.GetStoragePool(poolName) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for k := range pool.Config { results = append(results, k) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpStoragePoolWithVolume(toComplete string) ([]string, cobra.ShellCompDirective) { if !strings.Contains(toComplete, "/") { pools, compdir := g.cmpStoragePools(toComplete) if compdir == cobra.ShellCompDirectiveError { return nil, compdir } results := []string{} for _, pool := range pools { if strings.HasSuffix(pool, ":") { results = append(results, pool) } else { results = append(results, fmt.Sprintf("%s/", pool)) } } return results, cobra.ShellCompDirectiveNoSpace } pool := strings.Split(toComplete, "/")[0] volumes, compdir := g.cmpStoragePoolVolumes(pool) if compdir == cobra.ShellCompDirectiveError { return nil, compdir } results := []string{} for _, volume := range volumes { results = append(results, fmt.Sprintf("%s/%s", pool, volume)) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpStoragePools(toComplete string) ([]string, cobra.ShellCompDirective) { results := []string{} resources, _ := g.ParseServers(toComplete) if len(resources) > 0 { resource := resources[0] storagePools, _ := resource.server.GetStoragePoolNames() for _, storage := range storagePools { var name string if resource.remote == g.conf.DefaultRemote && !strings.Contains(toComplete, g.conf.DefaultRemote) { name = storage } else { name = fmt.Sprintf("%s:%s", resource.remote, storage) } results = append(results, name) } } if !strings.Contains(toComplete, ":") { remotes, _ := g.cmpRemotes(toComplete, false) results = append(results, remotes...) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpStoragePoolVolumeConfigs(poolName string, volumeName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(poolName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server pool := poolName if strings.Contains(poolName, ":") { pool = strings.Split(poolName, ":")[1] } volName, volType := parseVolume("custom", volumeName) volume, _, err := client.GetStoragePoolVolume(pool, volType, volName) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for k := range volume.Config { results = append(results, k) } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpStoragePoolVolumeInstances(poolName string, volumeName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(poolName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server pool := poolName if strings.Contains(poolName, ":") { pool = strings.Split(poolName, ":")[1] } volName, volType := parseVolume("custom", volumeName) volume, _, err := client.GetStoragePoolVolume(pool, volType, volName) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for _, i := range volume.UsedBy { r := regexp.MustCompile(`/1.0/instances/(.*)`) match := r.FindStringSubmatch(i) if len(match) == 2 { results = append(results, match[1]) } } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpStoragePoolVolumeProfiles(poolName string, volumeName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(poolName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server pool := poolName if strings.Contains(poolName, ":") { pool = strings.Split(poolName, ":")[1] } volName, volType := parseVolume("custom", volumeName) volume, _, err := client.GetStoragePoolVolume(pool, volType, volName) if err != nil { return nil, cobra.ShellCompDirectiveError } var results []string for _, i := range volume.UsedBy { r := regexp.MustCompile(`/1.0/profiles/(.*)`) match := r.FindStringSubmatch(i) if len(match) == 2 { results = append(results, match[1]) } } return results, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpStoragePoolVolumeSnapshots(poolName string, volumeName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(poolName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server pool := poolName if strings.Contains(poolName, ":") { pool = strings.Split(poolName, ":")[1] } volName, volType := parseVolume("custom", volumeName) snapshots, err := client.GetStoragePoolVolumeSnapshotNames(pool, volType, volName) if err != nil { return nil, cobra.ShellCompDirectiveError } return snapshots, cobra.ShellCompDirectiveNoFileComp } func (g *cmdGlobal) cmpStoragePoolVolumes(poolName string) ([]string, cobra.ShellCompDirective) { // Parse remote resources, err := g.ParseServers(poolName) if err != nil || len(resources) == 0 { return nil, cobra.ShellCompDirectiveError } resource := resources[0] client := resource.server pool := poolName if strings.Contains(poolName, ":") { pool = strings.Split(poolName, ":")[1] } volumes, err := client.GetStoragePoolVolumeNames(pool) if err != nil { return nil, cobra.ShellCompDirectiveError } return volumes, cobra.ShellCompDirectiveNoFileComp } func isSymlinkToDir(path string, d fs.DirEntry) bool { if d.Type()&fs.ModeSymlink == 0 { return false } info, err := os.Stat(path) if err != nil || !info.IsDir() { return false } return true } func (g *cmdGlobal) cmpFiles(toComplete string, includeLocalFiles bool) ([]string, cobra.ShellCompDirective) { instances, directives := g.cmpInstances(toComplete) for i := range instances { if strings.HasSuffix(instances[i], ":") { continue } instances[i] += "/" } if len(instances) == 0 { if includeLocalFiles { return nil, cobra.ShellCompDirectiveDefault } return instances, directives } directives |= cobra.ShellCompDirectiveNoSpace if !includeLocalFiles { return instances, directives } var files []string sep := string(filepath.Separator) dir, prefix := filepath.Split(toComplete) switch prefix { case ".": files = append(files, dir+"."+sep) fallthrough case "..": files = append(files, dir+".."+sep) directives |= cobra.ShellCompDirectiveNoSpace } root, err := filepath.EvalSymlinks(filepath.Dir(dir)) if err != nil { return append(instances, files...), directives } _ = filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { if err != nil || path == root { return err } base := filepath.Base(path) if strings.HasPrefix(base, prefix) { file := dir + base switch { case d.IsDir(): directives |= cobra.ShellCompDirectiveNoSpace file += sep case isSymlinkToDir(path, d): directives |= cobra.ShellCompDirectiveNoSpace if base == prefix { file += sep } } files = append(files, file) } if d.IsDir() { return fs.SkipDir } return nil }) return append(instances, files...), directives } incus-6.0.4/cmd/incus/config.go000066400000000000000000000546741477363751000163570ustar00rootroot00000000000000package main import ( "fmt" "io" "os" "strings" "github.com/spf13/cobra" "gopkg.in/yaml.v2" incus "github.com/lxc/incus/v6/client" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/internal/instance" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/termios" ) type cmdConfig struct { global *cmdGlobal flagTarget string } // Command creates a Cobra command for managing instance and server configurations, // including options for device, edit, get, metadata, profile, set, show, template, trust, and unset. func (c *cmdConfig) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("config") cmd.Short = i18n.G("Manage instance and server configuration options") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Manage instance and server configuration options`)) // Device configDeviceCmd := cmdConfigDevice{global: c.global, config: c} cmd.AddCommand(configDeviceCmd.Command()) // Edit configEditCmd := cmdConfigEdit{global: c.global, config: c} cmd.AddCommand(configEditCmd.Command()) // Get configGetCmd := cmdConfigGet{global: c.global, config: c} cmd.AddCommand(configGetCmd.Command()) // Metadata configMetadataCmd := cmdConfigMetadata{global: c.global, config: c} cmd.AddCommand(configMetadataCmd.Command()) // Profile configProfileCmd := cmdProfile{global: c.global} profileCmd := configProfileCmd.Command() profileCmd.Hidden = true profileCmd.Deprecated = i18n.G("please use `incus profile`") cmd.AddCommand(profileCmd) // Set configSetCmd := cmdConfigSet{global: c.global, config: c} cmd.AddCommand(configSetCmd.Command()) // Show configShowCmd := cmdConfigShow{global: c.global, config: c} cmd.AddCommand(configShowCmd.Command()) // Template configTemplateCmd := cmdConfigTemplate{global: c.global, config: c} cmd.AddCommand(configTemplateCmd.Command()) // Trust configTrustCmd := cmdConfigTrust{global: c.global, config: c} cmd.AddCommand(configTrustCmd.Command()) // Unset configUnsetCmd := cmdConfigUnset{global: c.global, config: c, configSet: &configSetCmd} cmd.AddCommand(configUnsetCmd.Command()) // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } return cmd } // Edit. type cmdConfigEdit struct { global *cmdGlobal config *cmdConfig } // Command creates a Cobra command to edit instance or server configurations using YAML, with optional flags for targeting cluster members. func (c *cmdConfigEdit) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("edit", i18n.G("[:][[/]]")) cmd.Short = i18n.G("Edit instance or server configurations as YAML") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Edit instance or server configurations as YAML`)) cmd.Example = cli.FormatSection("", i18n.G( `incus config edit < instance.yaml Update the instance configuration from config.yaml.`)) cmd.Flags().StringVar(&c.config.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpInstances(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // helpTemplate returns a sample YAML configuration and guidelines for editing instance configurations. func (c *cmdConfigEdit) helpTemplate() string { return i18n.G( `### This is a YAML representation of the configuration. ### Any line starting with a '# will be ignored. ### ### A sample configuration looks like: ### name: instance1 ### profiles: ### - default ### config: ### volatile.eth0.hwaddr: 10:66:6a:e9:f8:7f ### devices: ### homedir: ### path: /extra ### source: /home/user ### type: disk ### ephemeral: false ### ### Note that the name is shown but cannot be changed`) } // Run executes the config edit command, allowing users to edit instance or server configurations via an interactive YAML editor. func (c *cmdConfigEdit) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 1) if exit { return err } // Parse remote remote := "" if len(args) > 0 { remote = args[0] } resources, err := c.global.ParseServers(remote) if err != nil { return err } resource := resources[0] fields := strings.SplitN(resource.name, "/", 2) isSnapshot := len(fields) == 2 // Edit the config if resource.name != "" { // Quick checks. if c.config.flagTarget != "" { return fmt.Errorf(i18n.G("--target cannot be used with instances")) } // If stdin isn't a terminal, read text from it if !termios.IsTerminal(getStdinFd()) { contents, err := io.ReadAll(os.Stdin) if err != nil { return err } var op incus.Operation if isSnapshot { newdata := api.InstanceSnapshotPut{} err = yaml.Unmarshal(contents, &newdata) if err != nil { return err } op, err = resource.server.UpdateInstanceSnapshot(fields[0], fields[1], newdata, "") if err != nil { return err } } else { newdata := api.InstancePut{} err = yaml.Unmarshal(contents, &newdata) if err != nil { return err } op, err = resource.server.UpdateInstance(resource.name, newdata, "") if err != nil { return err } } return op.Wait() } var data []byte var etag string // Extract the current value if isSnapshot { var inst *api.InstanceSnapshot inst, etag, err = resource.server.GetInstanceSnapshot(fields[0], fields[1]) if err != nil { return err } // Empty expanded config so it isn't shown in edit screen (relies on omitempty tag). inst.ExpandedConfig = nil inst.ExpandedDevices = nil data, err = yaml.Marshal(&inst) if err != nil { return err } } else { var inst *api.Instance inst, etag, err = resource.server.GetInstance(resource.name) if err != nil { return err } // Empty expanded config so it isn't shown in edit screen (relies on omitempty tag). inst.ExpandedConfig = nil inst.ExpandedDevices = nil data, err = yaml.Marshal(&inst) if err != nil { return err } } // Spawn the editor content, err := textEditor("", []byte(c.helpTemplate()+"\n\n"+string(data))) if err != nil { return err } for { // Parse the text received from the editor if isSnapshot { newdata := api.InstanceSnapshotPut{} err = yaml.Unmarshal(content, &newdata) if err == nil { var op incus.Operation op, err = resource.server.UpdateInstanceSnapshot(fields[0], fields[1], newdata, etag) if err == nil { err = op.Wait() } } } else { newdata := api.InstancePut{} err = yaml.Unmarshal(content, &newdata) if err == nil { var op incus.Operation op, err = resource.server.UpdateInstance(resource.name, newdata, etag) if err == nil { err = op.Wait() } } } // Respawn the editor if err != nil { fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err) fmt.Println(i18n.G("Press enter to open the editor again or ctrl+c to abort change")) _, err := os.Stdin.Read(make([]byte, 1)) if err != nil { return err } content, err = textEditor("", content) if err != nil { return err } continue } break } return nil } // Targeting if c.config.flagTarget != "" { if !resource.server.IsClustered() { return fmt.Errorf(i18n.G("To use --target, the destination remote must be a cluster")) } resource.server = resource.server.UseTarget(c.config.flagTarget) } // If stdin isn't a terminal, read text from it if !termios.IsTerminal(getStdinFd()) { contents, err := io.ReadAll(os.Stdin) if err != nil { return err } newdata := api.ServerPut{} err = yaml.Unmarshal(contents, &newdata) if err != nil { return err } return resource.server.UpdateServer(newdata, "") } // Extract the current value server, etag, err := resource.server.GetServer() if err != nil { return err } brief := server.Writable() data, err := yaml.Marshal(&brief) if err != nil { return err } // Spawn the editor content, err := textEditor("", data) if err != nil { return err } for { // Parse the text received from the editor newdata := api.ServerPut{} err = yaml.Unmarshal(content, &newdata) if err == nil { err = resource.server.UpdateServer(newdata, etag) } // Respawn the editor if err != nil { fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err) fmt.Println(i18n.G("Press enter to open the editor again or ctrl+c to abort change")) _, err := os.Stdin.Read(make([]byte, 1)) if err != nil { return err } content, err = textEditor("", content) if err != nil { return err } continue } break } return nil } // Get. type cmdConfigGet struct { global *cmdGlobal config *cmdConfig flagExpanded bool flagIsProperty bool } // Command creates a Cobra command to fetch values for given instance or server configuration keys, // with optional flags for expanded configuration and cluster targeting. func (c *cmdConfigGet) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("get", i18n.G("[:][] ")) cmd.Short = i18n.G("Get values for instance or server configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Get values for instance or server configuration keys`)) cmd.Flags().BoolVarP(&c.flagExpanded, "expanded", "e", false, i18n.G("Access the expanded configuration")) cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Get the key as an instance property")) cmd.Flags().StringVar(&c.config.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpInstances(toComplete) } if len(args) == 1 { return c.global.cmpInstanceAllKeys() } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // Run fetches and prints the specified configuration key's value for an instance or server, also handling target and expansion flags. func (c *cmdConfigGet) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 2) if exit { return err } // Parse remote remote := "" if len(args) > 1 { remote = args[0] } resources, err := c.global.ParseServers(remote) if err != nil { return err } resource := resources[0] fields := strings.SplitN(resource.name, "/", 2) isSnapshot := len(fields) == 2 // Get the config key if resource.name != "" { // Quick checks. if c.config.flagTarget != "" { return fmt.Errorf(i18n.G("--target cannot be used with instances")) } if isSnapshot { inst, _, err := resource.server.GetInstanceSnapshot(fields[0], fields[1]) if err != nil { return err } if c.flagIsProperty { res, err := getFieldByJsonTag(inst, args[len(args)-1]) if err != nil { return fmt.Errorf(i18n.G("The property %q does not exist on the instance snapshot %s/%s: %v"), args[len(args)-1], fields[0], fields[1], err) } fmt.Printf("%v\n", res) } else { if c.flagExpanded { fmt.Println(inst.ExpandedConfig[args[len(args)-1]]) } else { fmt.Println(inst.Config[args[len(args)-1]]) } } return nil } resp, _, err := resource.server.GetInstance(resource.name) if err != nil { return err } if c.flagIsProperty { w := resp.Writable() res, err := getFieldByJsonTag(&w, args[len(args)-1]) if err != nil { return fmt.Errorf(i18n.G("The property %q does not exist on the instance %q: %v"), args[len(args)-1], resource.name, err) } fmt.Printf("%v\n", res) } else { if c.flagExpanded { fmt.Println(resp.ExpandedConfig[args[len(args)-1]]) } else { fmt.Println(resp.Config[args[len(args)-1]]) } } } else { // Quick check. if c.flagExpanded { return fmt.Errorf(i18n.G("--expanded cannot be used with a server")) } // Targeting if c.config.flagTarget != "" { if !resource.server.IsClustered() { return fmt.Errorf(i18n.G("To use --target, the destination remote must be a cluster")) } resource.server = resource.server.UseTarget(c.config.flagTarget) } resp, _, err := resource.server.GetServer() if err != nil { return err } value := resp.Config[args[len(args)-1]] fmt.Println(value) } return nil } // Set. type cmdConfigSet struct { global *cmdGlobal config *cmdConfig flagIsProperty bool } // Command creates a new Cobra command to set instance or server configuration keys and returns it. func (c *cmdConfigSet) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("set", i18n.G("[:][] =...")) cmd.Short = i18n.G("Set instance or server configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Set instance or server configuration keys For backward compatibility, a single configuration key may still be set with: incus config set [:][] `)) cmd.Example = cli.FormatSection("", i18n.G( `incus config set [:] limits.cpu=2 Will set a CPU limit of "2" for the instance. incus config set core.https_address=[::]:8443 Will have the server listen on IPv4 and IPv6 port 8443.`)) cmd.Flags().StringVar(&c.config.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Set the key as an instance property")) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpInstances(toComplete) } if len(args) == 1 { return c.global.cmpInstanceAllKeys() } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // Run executes the "set" command, updating instance or server configuration keys based on provided arguments. func (c *cmdConfigSet) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, -1) if exit { return err } hasKeyValue := func(args []string) bool { for _, arg := range args { if strings.Contains(arg, "=") { return true } } return false } onlyKeyValue := func(args []string) bool { for _, arg := range args { if !strings.Contains(arg, "=") { return false } } return true } isConfig := func(value string) bool { fields := strings.SplitN(value, ":", 2) key := fields[len(fields)-1] return strings.Contains(key, ".") } // Parse remote remote := "" if onlyKeyValue(args) || isConfig(args[0]) { // server set with: =... remote = "" } else if len(args) == 2 && !hasKeyValue(args) { // server set with: remote = "" } else { remote = args[0] } resources, err := c.global.ParseServers(remote) if err != nil { return err } resource := resources[0] fields := strings.SplitN(resource.name, "/", 2) isSnapshot := len(fields) == 2 // Set the config keys if resource.name != "" { // Quick checks. if c.config.flagTarget != "" { return fmt.Errorf(i18n.G("--target cannot be used with instances")) } keys, err := getConfig(args[1:]...) if err != nil { return err } if isSnapshot { inst, etag, err := resource.server.GetInstanceSnapshot(fields[0], fields[1]) if err != nil { return err } writable := inst.Writable() if c.flagIsProperty { if cmd.Name() == "unset" { for k := range keys { err := unsetFieldByJsonTag(&writable, k) if err != nil { return fmt.Errorf(i18n.G("Error unsetting properties: %v"), err) } } } else { err := unpackKVToWritable(&writable, keys) if err != nil { return fmt.Errorf(i18n.G("Error setting properties: %v"), err) } } op, err := resource.server.UpdateInstanceSnapshot(fields[0], fields[1], writable, etag) if err != nil { return err } return op.Wait() } else { return fmt.Errorf(i18n.G("The is no config key to set on an instance snapshot.")) } } inst, etag, err := resource.server.GetInstance(resource.name) if err != nil { return err } writable := inst.Writable() if c.flagIsProperty { if cmd.Name() == "unset" { for k := range keys { err := unsetFieldByJsonTag(&writable, k) if err != nil { return fmt.Errorf(i18n.G("Error unsetting properties: %v"), err) } } } else { err := unpackKVToWritable(&writable, keys) if err != nil { return fmt.Errorf(i18n.G("Error setting properties: %v"), err) } } } else { for k, v := range keys { if cmd.Name() == "unset" { _, ok := writable.Config[k] if !ok { return fmt.Errorf(i18n.G("Can't unset key '%s', it's not currently set"), k) } delete(writable.Config, k) } else { writable.Config[k] = v } } } op, err := resource.server.UpdateInstance(resource.name, writable, etag) if err != nil { return err } return op.Wait() } // Targeting if c.config.flagTarget != "" { if !resource.server.IsClustered() { return fmt.Errorf(i18n.G("To use --target, the destination remote must be a cluster")) } resource.server = resource.server.UseTarget(c.config.flagTarget) } // Server keys server, etag, err := resource.server.GetServer() if err != nil { return err } var keys map[string]string if remote == "" { keys, err = getConfig(args[0:]...) if err != nil { return err } } else { keys, err = getConfig(args[1:]...) if err != nil { return err } } if server.Config == nil { server.Config = map[string]string{} } for k, v := range keys { server.Config[k] = v } return resource.server.UpdateServer(server.Writable(), etag) } // Show. type cmdConfigShow struct { global *cmdGlobal config *cmdConfig flagExpanded bool } // Command sets up the "show" command, which displays instance or server configurations based on the provided arguments. func (c *cmdConfigShow) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("show", i18n.G("[:][[/]]")) cmd.Short = i18n.G("Show instance or server configurations") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Show instance or server configurations`)) cmd.Flags().BoolVarP(&c.flagExpanded, "expanded", "e", false, i18n.G("Show the expanded configuration")) cmd.Flags().StringVar(&c.config.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) != 0 { return nil, cobra.ShellCompDirectiveNoFileComp } return c.global.cmpInstances(toComplete) } return cmd } // Run executes the "show" command, displaying the YAML-formatted configuration of a specified server or instance. func (c *cmdConfigShow) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 0, 1) if exit { return err } // Parse remote remote := "" if len(args) > 0 { remote = args[0] } resources, err := c.global.ParseServers(remote) if err != nil { return err } resource := resources[0] // Show configuration var data []byte if resource.name == "" { // Quick check. if c.flagExpanded { return fmt.Errorf(i18n.G("--expanded cannot be used with a server")) } // Targeting if c.config.flagTarget != "" { if !resource.server.IsClustered() { return fmt.Errorf(i18n.G("To use --target, the destination remote must be a cluster")) } resource.server = resource.server.UseTarget(c.config.flagTarget) } // Server config server, _, err := resource.server.GetServer() if err != nil { return err } brief := server.Writable() data, err = yaml.Marshal(&brief) if err != nil { return err } } else { // Quick checks. if c.config.flagTarget != "" { return fmt.Errorf(i18n.G("--target cannot be used with instances")) } // Instance or snapshot config var brief any if instance.IsSnapshot(resource.name) { // Snapshot fields := strings.Split(resource.name, instance.SnapshotDelimiter) snap, _, err := resource.server.GetInstanceSnapshot(fields[0], fields[1]) if err != nil { return err } brief = snap if c.flagExpanded { brief.(*api.InstanceSnapshot).Config = snap.ExpandedConfig brief.(*api.InstanceSnapshot).Devices = snap.ExpandedDevices } } else { // Instance inst, _, err := resource.server.GetInstance(resource.name) if err != nil { return err } writable := inst.Writable() brief = &writable if c.flagExpanded { brief.(*api.InstancePut).Config = inst.ExpandedConfig brief.(*api.InstancePut).Devices = inst.ExpandedDevices } } data, err = yaml.Marshal(&brief) if err != nil { return err } } fmt.Printf("%s", data) return nil } // Unset. type cmdConfigUnset struct { global *cmdGlobal config *cmdConfig configSet *cmdConfigSet flagIsProperty bool } // Command generates a new "unset" command to remove specific configuration keys for an instance or server. func (c *cmdConfigUnset) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("unset", i18n.G("[:][] ")) cmd.Short = i18n.G("Unset instance or server configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Unset instance or server configuration keys`)) cmd.Flags().StringVar(&c.config.flagTarget, "target", "", i18n.G("Cluster member name")+"``") cmd.Flags().BoolVarP(&c.flagIsProperty, "property", "p", false, i18n.G("Unset the key as an instance property")) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpInstances(toComplete) } if len(args) == 1 { return c.global.cmpInstanceAllKeys() } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } // Run executes the "unset" command, delegating to the "set" command to remove specific configuration keys. func (c *cmdConfigUnset) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 2) if exit { return err } c.configSet.flagIsProperty = c.flagIsProperty args = append(args, "") return c.configSet.Run(cmd, args) } incus-6.0.4/cmd/incus/config_device.go000066400000000000000000000513741477363751000176700ustar00rootroot00000000000000package main import ( "fmt" "strings" "github.com/spf13/cobra" "gopkg.in/yaml.v2" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" ) type cmdConfigDevice struct { global *cmdGlobal config *cmdConfig profile *cmdProfile } func (c *cmdConfigDevice) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("device") cmd.Short = i18n.G("Manage devices") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Manage devices`)) // Add configDeviceAddCmd := cmdConfigDeviceAdd{global: c.global, config: c.config, profile: c.profile, configDevice: c} cmd.AddCommand(configDeviceAddCmd.Command()) // Get configDeviceGetCmd := cmdConfigDeviceGet{global: c.global, config: c.config, profile: c.profile, configDevice: c} cmd.AddCommand(configDeviceGetCmd.Command()) // List configDeviceListCmd := cmdConfigDeviceList{global: c.global, config: c.config, profile: c.profile, configDevice: c} cmd.AddCommand(configDeviceListCmd.Command()) // Override if c.config != nil { configDeviceOverrideCmd := cmdConfigDeviceOverride{global: c.global, config: c.config, profile: c.profile, configDevice: c} cmd.AddCommand(configDeviceOverrideCmd.Command()) } // Remove configDeviceRemoveCmd := cmdConfigDeviceRemove{global: c.global, config: c.config, profile: c.profile, configDevice: c} cmd.AddCommand(configDeviceRemoveCmd.Command()) // Set configDeviceSetCmd := cmdConfigDeviceSet{global: c.global, config: c.config, profile: c.profile, configDevice: c} cmd.AddCommand(configDeviceSetCmd.Command()) // Show configDeviceShowCmd := cmdConfigDeviceShow{global: c.global, config: c.config, profile: c.profile, configDevice: c} cmd.AddCommand(configDeviceShowCmd.Command()) // Unset configDeviceUnsetCmd := cmdConfigDeviceUnset{global: c.global, config: c.config, profile: c.profile, configDevice: c, configDeviceSet: &configDeviceSetCmd} cmd.AddCommand(configDeviceUnsetCmd.Command()) // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } return cmd } // Add. type cmdConfigDeviceAdd struct { global *cmdGlobal config *cmdConfig configDevice *cmdConfigDevice profile *cmdProfile } func (c *cmdConfigDeviceAdd) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Short = i18n.G("Add instance devices") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Add instance devices`)) if c.config != nil { cmd.Use = usage("add", i18n.G("[:] [key=value...]")) cmd.Example = cli.FormatSection("", i18n.G( `incus config device add [:]instance1 disk source=/share/c1 path=/opt Will mount the host's /share/c1 onto /opt in the instance. incus config device add [:]instance1 disk pool=some-pool source=some-volume path=/opt Will mount the some-volume volume on some-pool onto /opt in the instance.`)) } else if c.profile != nil { cmd.Use = usage("add", i18n.G("[:] [key=value...]")) cmd.Example = cli.FormatSection("", i18n.G( `incus profile device add [:]profile1 disk source=/share/c1 path=/opt Will mount the host's /share/c1 onto /opt in the instance. incus profile device add [:]profile1 disk pool=some-pool source=some-volume path=/opt Will mount the some-volume volume on some-pool onto /opt in the instance.`)) } cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { if c.config != nil { return c.global.cmpInstances(toComplete) } else if c.profile != nil { return c.global.cmpProfiles(toComplete, true) } } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdConfigDeviceAdd) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 3, -1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing name")) } // Add the device devname := args[1] device := map[string]string{} device["type"] = args[2] if len(args) > 3 { for _, prop := range args[3:] { results := strings.SplitN(prop, "=", 2) if len(results) != 2 { return fmt.Errorf(i18n.G("No value found in %q"), prop) } k := results[0] v := results[1] device[k] = v } } if c.profile != nil { profile, etag, err := resource.server.GetProfile(resource.name) if err != nil { return err } if profile.Devices == nil { profile.Devices = make(map[string]map[string]string) } _, ok := profile.Devices[devname] if ok { return fmt.Errorf(i18n.G("The device already exists")) } profile.Devices[devname] = device err = resource.server.UpdateProfile(resource.name, profile.Writable(), etag) if err != nil { return err } } else { inst, etag, err := resource.server.GetInstance(resource.name) if err != nil { return err } _, ok := inst.Devices[devname] if ok { return fmt.Errorf(i18n.G("The device already exists")) } inst.Devices[devname] = device op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag) if err != nil { return err } err = op.Wait() if err != nil { return err } } if !c.global.flagQuiet { fmt.Printf(i18n.G("Device %s added to %s")+"\n", devname, resource.name) } return nil } // Get. type cmdConfigDeviceGet struct { global *cmdGlobal config *cmdConfig configDevice *cmdConfigDevice profile *cmdProfile } func (c *cmdConfigDeviceGet) Command() *cobra.Command { cmd := &cobra.Command{} if c.config != nil { cmd.Use = usage("get", i18n.G("[:] ")) } else if c.profile != nil { cmd.Use = usage("get", i18n.G("[:] ")) } cmd.Short = i18n.G("Get values for device configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Get values for device configuration keys`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { if c.config != nil { return c.global.cmpInstances(toComplete) } else if c.profile != nil { return c.global.cmpProfiles(toComplete, true) } } if len(args) == 1 { if c.config != nil { return c.global.cmpInstanceDeviceNames(args[0]) } else if c.profile != nil { return c.global.cmpProfileDeviceNames(args[0]) } } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdConfigDeviceGet) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 3, 3) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing name")) } // Get the config key devname := args[1] key := args[2] if c.profile != nil { profile, _, err := resource.server.GetProfile(resource.name) if err != nil { return err } dev, ok := profile.Devices[devname] if !ok { return fmt.Errorf(i18n.G("Device doesn't exist")) } fmt.Println(dev[key]) } else { inst, _, err := resource.server.GetInstance(resource.name) if err != nil { return err } dev, ok := inst.Devices[devname] if !ok { _, ok = inst.ExpandedDevices[devname] if !ok { return fmt.Errorf(i18n.G("Device doesn't exist")) } return fmt.Errorf(i18n.G("Device from profile(s) cannot be retrieved for individual instance")) } fmt.Println(dev[key]) } return nil } // List. type cmdConfigDeviceList struct { global *cmdGlobal config *cmdConfig configDevice *cmdConfigDevice profile *cmdProfile } func (c *cmdConfigDeviceList) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Aliases = []string{"ls"} cmd.Short = i18n.G("List instance devices") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `List instance devices`)) if c.config != nil { cmd.Use = usage("list", i18n.G("[:]")) } else if c.profile != nil { cmd.Use = usage("list", i18n.G("[:]")) } cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { if c.config != nil { return c.global.cmpInstances(toComplete) } else if c.profile != nil { return c.global.cmpProfiles(toComplete, true) } } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdConfigDeviceList) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing name")) } // List the devices var devices []string if c.profile != nil { profile, _, err := resource.server.GetProfile(resource.name) if err != nil { return err } for k := range profile.Devices { devices = append(devices, k) } } else { inst, _, err := resource.server.GetInstance(resource.name) if err != nil { return err } for k := range inst.Devices { devices = append(devices, k) } } fmt.Printf("%s\n", strings.Join(devices, "\n")) return nil } // Override. type cmdConfigDeviceOverride struct { global *cmdGlobal config *cmdConfig configDevice *cmdConfigDevice profile *cmdProfile } func (c *cmdConfigDeviceOverride) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("override", i18n.G("[:] [key=value...]")) cmd.Short = i18n.G("Copy profile inherited devices and override configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Copy profile inherited devices and override configuration keys`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpInstances(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdConfigDeviceOverride) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, -1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing name")) } // Override the device inst, etag, err := resource.server.GetInstance(resource.name) if err != nil { return err } devname := args[1] _, ok := inst.Devices[devname] if ok { return fmt.Errorf(i18n.G("The device already exists")) } device, ok := inst.ExpandedDevices[devname] if !ok { return fmt.Errorf(i18n.G("The profile device doesn't exist")) } if len(args) > 2 { for _, prop := range args[2:] { results := strings.SplitN(prop, "=", 2) if len(results) != 2 { return fmt.Errorf(i18n.G("No value found in %q"), prop) } k := results[0] v := results[1] device[k] = v } } inst.Devices[devname] = device op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag) if err != nil { return err } err = op.Wait() if err != nil { return err } if !c.global.flagQuiet { fmt.Printf(i18n.G("Device %s overridden for %s")+"\n", devname, resource.name) } return nil } // Remove. type cmdConfigDeviceRemove struct { global *cmdGlobal config *cmdConfig configDevice *cmdConfigDevice profile *cmdProfile } func (c *cmdConfigDeviceRemove) Command() *cobra.Command { cmd := &cobra.Command{} if c.config != nil { cmd.Use = usage("remove", i18n.G("[:] ...")) } else if c.profile != nil { cmd.Use = usage("remove", i18n.G("[:] ...")) } cmd.Aliases = []string{"rm"} cmd.Short = i18n.G("Remove instance devices") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Remove instance devices`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { if c.config != nil { return c.global.cmpInstances(toComplete) } else if c.profile != nil { return c.global.cmpProfiles(toComplete, true) } } if c.config != nil { return c.global.cmpInstanceDeviceNames(args[0]) } else if c.profile != nil { return c.global.cmpProfileDeviceNames(args[0]) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdConfigDeviceRemove) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 2, -1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing name")) } // Remove the device if c.profile != nil { profile, etag, err := resource.server.GetProfile(resource.name) if err != nil { return err } for _, devname := range args[1:] { _, ok := profile.Devices[devname] if !ok { return fmt.Errorf(i18n.G("Device doesn't exist")) } delete(profile.Devices, devname) } err = resource.server.UpdateProfile(resource.name, profile.Writable(), etag) if err != nil { return err } } else { inst, etag, err := resource.server.GetInstance(resource.name) if err != nil { return err } for _, devname := range args[1:] { _, ok := inst.Devices[devname] if !ok { _, ok := inst.ExpandedDevices[devname] if !ok { return fmt.Errorf(i18n.G("Device doesn't exist")) } return fmt.Errorf(i18n.G("Device from profile(s) cannot be removed from individual instance. Override device or modify profile instead")) } delete(inst.Devices, devname) } op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag) if err != nil { return err } err = op.Wait() if err != nil { return err } } if !c.global.flagQuiet { fmt.Printf(i18n.G("Device %s removed from %s")+"\n", strings.Join(args[1:], ", "), resource.name) } return nil } // Set. type cmdConfigDeviceSet struct { global *cmdGlobal config *cmdConfig configDevice *cmdConfigDevice profile *cmdProfile } func (c *cmdConfigDeviceSet) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Short = i18n.G("Set device configuration keys") if c.config != nil { cmd.Use = usage("set", i18n.G("[:] =...")) cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Set device configuration keys For backward compatibility, a single configuration key may still be set with: incus config device set [:] `)) } else if c.profile != nil { cmd.Use = usage("set", i18n.G("[:] =...")) cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Set device configuration keys For backward compatibility, a single configuration key may still be set with: incus profile device set [:] `)) } cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { if c.config != nil { return c.global.cmpInstances(toComplete) } else if c.profile != nil { return c.global.cmpProfiles(toComplete, true) } } if len(args) == 1 { if c.config != nil { return c.global.cmpInstanceDeviceNames(args[0]) } else if c.profile != nil { return c.global.cmpProfileDeviceNames(args[0]) } } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdConfigDeviceSet) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 3, -1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing name")) } // Set the device config key devname := args[1] keys, err := getConfig(args[2:]...) if err != nil { return err } if c.profile != nil { profile, etag, err := resource.server.GetProfile(resource.name) if err != nil { return err } dev, ok := profile.Devices[devname] if !ok { return fmt.Errorf(i18n.G("Device doesn't exist")) } for k, v := range keys { dev[k] = v } profile.Devices[devname] = dev err = resource.server.UpdateProfile(resource.name, profile.Writable(), etag) if err != nil { return err } } else { inst, etag, err := resource.server.GetInstance(resource.name) if err != nil { return err } dev, ok := inst.Devices[devname] if !ok { _, ok = inst.ExpandedDevices[devname] if !ok { return fmt.Errorf(i18n.G("Device doesn't exist")) } return fmt.Errorf(i18n.G("Device from profile(s) cannot be modified for individual instance. Override device or modify profile instead")) } for k, v := range keys { dev[k] = v } inst.Devices[devname] = dev op, err := resource.server.UpdateInstance(resource.name, inst.Writable(), etag) if err != nil { return err } err = op.Wait() if err != nil { return err } } return nil } // Show. type cmdConfigDeviceShow struct { global *cmdGlobal config *cmdConfig configDevice *cmdConfigDevice profile *cmdProfile } func (c *cmdConfigDeviceShow) Command() *cobra.Command { cmd := &cobra.Command{} if c.config != nil { cmd.Use = usage("show", i18n.G("[:]")) } else if c.profile != nil { cmd.Use = usage("show", i18n.G("[:]")) } cmd.Short = i18n.G("Show full device configuration") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Show full device configuration`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { if c.config != nil { return c.global.cmpInstances(toComplete) } else if c.profile != nil { return c.global.cmpProfiles(toComplete, true) } } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdConfigDeviceShow) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing name")) } // Show the devices var devices map[string]map[string]string if c.profile != nil { profile, _, err := resource.server.GetProfile(resource.name) if err != nil { return err } devices = profile.Devices } else { inst, _, err := resource.server.GetInstance(resource.name) if err != nil { return err } devices = inst.Devices } data, err := yaml.Marshal(&devices) if err != nil { return err } fmt.Print(string(data)) return nil } // Unset. type cmdConfigDeviceUnset struct { global *cmdGlobal config *cmdConfig configDevice *cmdConfigDevice configDeviceSet *cmdConfigDeviceSet profile *cmdProfile } func (c *cmdConfigDeviceUnset) Command() *cobra.Command { cmd := &cobra.Command{} if c.config != nil { cmd.Use = usage("unset", i18n.G("[:] ")) } else if c.profile != nil { cmd.Use = usage("unset", i18n.G("[:] ")) } cmd.Short = i18n.G("Unset device configuration keys") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Unset device configuration keys`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { if c.config != nil { return c.global.cmpInstances(toComplete) } else if c.profile != nil { return c.global.cmpProfiles(toComplete, true) } } if len(args) == 1 { if c.config != nil { return c.global.cmpInstanceDeviceNames(args[0]) } else if c.profile != nil { return c.global.cmpProfileDeviceNames(args[0]) } } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdConfigDeviceUnset) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 3, 3) if exit { return err } args = append(args, "") return c.configDeviceSet.Run(cmd, args) } incus-6.0.4/cmd/incus/config_metadata.go000066400000000000000000000124221477363751000202000ustar00rootroot00000000000000package main import ( "fmt" "io" "os" "github.com/spf13/cobra" "gopkg.in/yaml.v2" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/shared/api" "github.com/lxc/incus/v6/shared/termios" ) type cmdConfigMetadata struct { global *cmdGlobal config *cmdConfig } func (c *cmdConfigMetadata) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("metadata") cmd.Short = i18n.G("Manage instance metadata files") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Manage instance metadata files`)) // Edit configMetadataEditCmd := cmdConfigMetadataEdit{global: c.global, config: c.config, configMetadata: c} cmd.AddCommand(configMetadataEditCmd.Command()) // Show configMetadataShowCmd := cmdConfigMetadataShow{global: c.global, config: c.config, configMetadata: c} cmd.AddCommand(configMetadataShowCmd.Command()) // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } return cmd } // Edit. type cmdConfigMetadataEdit struct { global *cmdGlobal config *cmdConfig configMetadata *cmdConfigMetadata } func (c *cmdConfigMetadataEdit) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("edit", i18n.G("[:]")) cmd.Short = i18n.G("Edit instance metadata files") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Edit instance metadata files`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpInstances(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdConfigMetadataEdit) helpTemplate() string { return i18n.G( `### This is a YAML representation of the instance metadata. ### Any line starting with a '# will be ignored. ### ### A sample configuration looks like: ### ### architecture: x86_64 ### creation_date: 1477146654 ### expiry_date: 0 ### properties: ### architecture: x86_64 ### description: BusyBox x86_64 ### name: busybox-x86_64 ### os: BusyBox ### templates: ### /template: ### when: ### - "" ### create_only: false ### template: template.tpl ### properties: {}`) } func (c *cmdConfigMetadataEdit) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing instance name")) } // Edit the metadata if !termios.IsTerminal(getStdinFd()) { metadata := api.ImageMetadata{} content, err := io.ReadAll(os.Stdin) if err != nil { return err } err = yaml.Unmarshal(content, &metadata) if err != nil { return err } return resource.server.UpdateInstanceMetadata(resource.name, metadata, "") } metadata, etag, err := resource.server.GetInstanceMetadata(resource.name) if err != nil { return err } origContent, err := yaml.Marshal(metadata) if err != nil { return err } // Spawn the editor content, err := textEditor("", []byte(c.helpTemplate()+"\n\n"+string(origContent))) if err != nil { return err } for { metadata := api.ImageMetadata{} err = yaml.Unmarshal(content, &metadata) if err == nil { err = resource.server.UpdateInstanceMetadata(resource.name, metadata, etag) } // Respawn the editor if err != nil { fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err) fmt.Println(i18n.G("Press enter to open the editor again or ctrl+c to abort change")) _, err := os.Stdin.Read(make([]byte, 1)) if err != nil { return err } content, err = textEditor("", content) if err != nil { return err } continue } break } return nil } // Show. type cmdConfigMetadataShow struct { global *cmdGlobal config *cmdConfig configMetadata *cmdConfigMetadata } func (c *cmdConfigMetadataShow) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("show", i18n.G("[:]")) cmd.Short = i18n.G("Show instance metadata files") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Show instance metadata files`)) cmd.RunE = c.Run cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return c.global.cmpInstances(toComplete) } return nil, cobra.ShellCompDirectiveNoFileComp } return cmd } func (c *cmdConfigMetadataShow) Run(cmd *cobra.Command, args []string) error { // Quick checks. exit, err := c.global.CheckArgs(cmd, args, 1, 1) if exit { return err } // Parse remote resources, err := c.global.ParseServers(args[0]) if err != nil { return err } resource := resources[0] if resource.name == "" { return fmt.Errorf(i18n.G("Missing instance name")) } // Show the instance metadata metadata, _, err := resource.server.GetInstanceMetadata(resource.name) if err != nil { return err } content, err := yaml.Marshal(metadata) if err != nil { return err } fmt.Printf("%s", content) return nil } incus-6.0.4/cmd/incus/config_template.go000066400000000000000000000236771477363751000202510ustar00rootroot00000000000000package main import ( "bytes" "fmt" "io" "os" "sort" "github.com/spf13/cobra" cli "github.com/lxc/incus/v6/internal/cmd" "github.com/lxc/incus/v6/internal/i18n" "github.com/lxc/incus/v6/shared/termios" ) type cmdConfigTemplate struct { global *cmdGlobal config *cmdConfig } func (c *cmdConfigTemplate) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("template") cmd.Short = i18n.G("Manage instance file templates") cmd.Long = cli.FormatSection(i18n.G("Description"), i18n.G( `Manage instance file templates`)) // Create configTemplateCreateCmd := cmdConfigTemplateCreate{global: c.global, config: c.config, configTemplate: c} cmd.AddCommand(configTemplateCreateCmd.Command()) // Delete configTemplateDeleteCmd := cmdConfigTemplateDelete{global: c.global, config: c.config, configTemplate: c} cmd.AddCommand(configTemplateDeleteCmd.Command()) // Edit configTemplateEditCmd := cmdConfigTemplateEdit{global: c.global, config: c.config, configTemplate: c} cmd.AddCommand(configTemplateEditCmd.Command()) // List configTemplateListCmd := cmdConfigTemplateList{global: c.global, config: c.config, configTemplate: c} cmd.AddCommand(configTemplateListCmd.Command()) // Show configTemplateShowCmd := cmdConfigTemplateShow{global: c.global, config: c.config, configTemplate: c} cmd.AddCommand(configTemplateShowCmd.Command()) // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } return cmd } // Create. type cmdConfigTemplateCreate struct { global *cmdGlobal config *cmdConfig configTemplate *cmdConfigTemplate } func (c *cmdConfigTemplateCreate) Command() *cobra.Command { cmd := &cobra.Command{} cmd.Use = usage("create", i18n.G("[:]