pax_global_header00006660000000000000000000000064147645423500014524gustar00rootroot0000000000000052 comment=a8ba96aa59fba614fdc5f59cc28b5d112112a21c debos-1.1.5/000077500000000000000000000000001476454235000126245ustar00rootroot00000000000000debos-1.1.5/.dockerignore000066400000000000000000000000051476454235000152730ustar00rootroot00000000000000.git debos-1.1.5/.github/000077500000000000000000000000001476454235000141645ustar00rootroot00000000000000debos-1.1.5/.github/dependabot.yml000066400000000000000000000003151476454235000170130ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" - package-ecosystem: "gomod" directory: "/" schedule: interval: "daily" debos-1.1.5/.github/workflows/000077500000000000000000000000001476454235000162215ustar00rootroot00000000000000debos-1.1.5/.github/workflows/ci.yaml000066400000000000000000000261701476454235000175060ustar00rootroot00000000000000name: Build and Test env: GITHUB_TAG: ghcr.io/${{ github.repository }} DOCKER_TARGET_PLATFORMS: linux/amd64,linux/arm64 on: push: branches-ignore: - '*.tmp' tags: - '*' # Build at 04:00am every Monday schedule: - cron: "0 4 * * 1" pull_request: workflow_dispatch: jobs: test: strategy: fail-fast: false matrix: variant: - arch - bookworm - trixie runs-on: ubuntu-latest defaults: run: shell: bash container: image: ghcr.io/go-debos/test-containers/${{matrix.variant}}:main steps: - name: Checkout code uses: actions/checkout@v4 - name: Test build run: go build -o debos cmd/debos/debos.go - name: Run unit tests run: go test -v ./... | tee test.out - name: Ensure no tests were skipped run: "! grep -q SKIP test.out" build: name: Build Docker container runs-on: ubuntu-latest needs: test steps: - name: Repository checkout uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Setup Docker buildx uses: docker/setup-buildx-action@v3 - name: Use cache uses: actions/cache@v4 with: path: /tmp/.build-cache key: ${{ runner.os }}-docker - name: Build Docker image uses: docker/build-push-action@v6 with: context: . push: false pull: true file: docker/Dockerfile platforms: ${{ env.DOCKER_TARGET_PLATFORMS }} tags: debos cache-from: type=local,src=/tmp/.build-cache cache-to: type=local,dest=/tmp/.build-cache,mode=max # WORKAROUND: # Docker buildx cannot export multi-platform images with type=docker, # but only with type=oci. The latter cannot be imported without an # intermediate registry. Thus export only the amd64 image for usage # in the test stage. # # see https://docs.docker.com/reference/cli/docker/buildx/build/#docker - name: Export amd64 Docker image for later usage uses: docker/build-push-action@v6 with: context: . push: false pull: true file: docker/Dockerfile platforms: linux/amd64 tags: debos cache-from: type=local,src=/tmp/.build-cache cache-to: type=local,dest=/tmp/.build-cache,mode=max outputs: type=docker,dest=/tmp/debos-image.tar - name: Upload artifact uses: actions/upload-artifact@v4 with: name: debos-image path: /tmp/debos-image.tar unit-tests: name: Run unit tests needs: - build runs-on: ubuntu-latest steps: - name: Repository checkout uses: actions/checkout@v4 - name: Setup Docker buildx uses: docker/setup-buildx-action@v3 - name: Use cache uses: actions/cache@v4 with: path: /tmp/.build-cache key: ${{ runner.os }}-docker - name: Build Docker image uses: docker/build-push-action@v6 with: context: . push: false tags: debos-builder file: docker/Dockerfile cache-from: type=local,src=/tmp/.build-cache load: true target: builder - name: Run unit tests run: | docker compose -f docker/unit-tests.test.yml \ up --exit-code-from=sut recipe-tests: needs: build strategy: fail-fast: false matrix: backend: - nofakemachine - qemu - uml - kvm test: - { name: "recipes", case: "recipes" } - { name: "templating", case: "templating", variables: " -t escaped:\\$ba\\'d\\$gers\\ snakes" } - { name: "partitioning", case: "partitioning" } - { name: "msdos partitioning", case: "msdos" } - { name: "debian (amd64, debootstrap)", case: "debian", variables: "-t architecture:amd64" } - { name: "debian (amd64, mmdebstrap)", case: "debian", variables: "-t architecture:amd64 -t tool:mmdebstrap" } - { name: "raw", case: "raw" } exclude: - backend: nofakemachine test: { name: "partitioning", case: "partitioning" } - backend: nofakemachine test: { name: "msdos partitioning", case: "msdos" } - backend: nofakemachine test: { name: "raw", case: "raw" } include: - backend: kvm test: { name: "debian (arm64, debootstrap)", case: "debian", variables: "-t architecture:arm64" } - backend: kvm test: { name: "debian (arm64, mmdebstrap)", case: "debian", variables: "-t architecture:arm64 -t tool:mmdebstrap" } - backend: kvm test: { name: "debian (armhf, debootstrap)", case: "debian", variables: "-t architecture:armhf" } - backend: kvm test: { name: "debian (armhf, mmdebstrap)", case: "debian", variables: "-t architecture:armhf -t tool:mmdebstrap" } - backend: kvm test: { name: "raw (4096 sector size)", case: "raw", variables: "-t sectorsize:4096" } - backend: kvm test: { name: "arch", case: "arch" } - backend: kvm test: { name: "apertis", case: "apertis" } - backend: kvm test: { name: "512 sector size GPT partition table", case: "partitioning-sector-size", variables: "-t sectorsize:512" } - backend: kvm test: { name: "4096 sector size GPT partition table", case: "partitioning-sector-size", variables: "-t sectorsize:4096" } name: ${{matrix.test.name}} on ${{matrix.backend}} runs-on: 'ubuntu-latest' steps: - name: Repository checkout uses: actions/checkout@v4 - name: Install QEMU emulation binaries if: ${{ matrix.backend == 'nofakemachine' }} run: | sudo apt-get update && sudo apt-get install -y qemu-user-static - name: Download artifact uses: actions/download-artifact@v4 with: name: debos-image path: /tmp - name: Load image run: | docker load --input /tmp/debos-image.tar - name: run ${{matrix.test.case}} in docker image run: docker run --cgroupns=private -v $(pwd)/tests:/tests -w /tests --tmpfs /scratch:exec --tmpfs /run ${{ matrix.backend.name == 'kvm' && '--device /dev/kvm' || '' }} --privileged -e TMP=/scratch -e SYSTEMD_NSPAWN_UNIFIED_HIERARCHY=1 debos -v ${{matrix.backend == 'nofakemachine' && '--disable-fakemachine' || format('{0}{1}', '--fakemachine-backend=',matrix.backend) }} ${{matrix.test.variables}} ${{matrix.test.case}}/test.yaml # Job to key success status against allgreen: name: allgreen if: always() needs: - unit-tests - recipe-tests runs-on: ubuntu-latest steps: - name: Decide whether the needed jobs succeeded or failed uses: re-actors/alls-green@release/v1 with: jobs: ${{ toJSON(needs) }} publish-github: name: Publish to GHCR needs: - unit-tests - recipe-tests if: github.event_name != 'pull_request' runs-on: ubuntu-latest permissions: contents: read packages: write steps: - name: Checkout repository uses: actions/checkout@v4 - name: Extract Docker metadata id: meta uses: docker/metadata-action@v5 with: images: ${{ env.GITHUB_TAG }} tags: | "type=ref,event=branch" "type=ref,suffix=-{{sha}},event=branch" "type=ref,suffix=-{{date 'YYYYMMDD'}},event=branch" "type=ref,event=tag" "type=ref,event=pr" - name: Login to GitHub Container Registry uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Setup Docker buildx uses: docker/setup-buildx-action@v3 - name: Use cache uses: actions/cache@v4 with: path: /tmp/.build-cache key: ${{ runner.os }}-docker - name: Build and push Docker image uses: docker/build-push-action@v6 with: context: . push: true platforms: ${{ env.DOCKER_TARGET_PLATFORMS }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} file: docker/Dockerfile cache-from: type=local,src=/tmp/.build-cache check-dockerhub-secrets: name: Check DockerHub secrets exist runs-on: ubuntu-latest outputs: has-secrets: ${{ steps.check-secrets.outputs.has-secrets }} steps: - id: check-secrets name: Check secrets exist run: | if [[ "${{ secrets.DOCKERHUB_IMAGE }}" != "" && \ "${{ secrets.DOCKERHUB_USERNAME }}" != "" && \ "${{ secrets.DOCKERHUB_PASSWORD }}" != "" ]]; \ then echo "Dockerhub secrets exist" echo "has-secrets=true" >> $GITHUB_OUTPUT else echo "Dockerhub secrets do not exist; not pushing to Dockerhub" echo "Please set the following secrets on GitHub (settings > secrets > actions > new):" echo "DOCKERHUB_IMAGE, DOCKERHUB_USERNAME, DOCKERHUB_PASSWORD" echo "has-secrets=false" >> $GITHUB_OUTPUT fi publish-dockerhub: name: Publish to DockerHub needs: - check-dockerhub-secrets - unit-tests - recipe-tests if: | needs.check-dockerhub-secrets.outputs.has-secrets == 'true' && github.event_name != 'pull_request' runs-on: ubuntu-latest permissions: contents: read packages: write steps: - name: Checkout repository uses: actions/checkout@v4 - name: Extract Docker metadata id: meta uses: docker/metadata-action@v5 with: images: ${{ secrets.DOCKERHUB_IMAGE }} tags: | "type=ref,event=branch" "type=ref,suffix=-{{sha}},event=branch" "type=ref,suffix=-{{date 'YYYYMMDD'}},event=branch" "type=ref,event=tag" "type=ref,event=pr" - name: Login to DockerHub uses: docker/login-action@v3 continue-on-error: true with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - name: Setup Docker buildx uses: docker/setup-buildx-action@v3 - name: Use cache uses: actions/cache@v4 with: path: /tmp/.build-cache key: ${{ runner.os }}-docker - name: Build and push Docker image uses: docker/build-push-action@v6 continue-on-error: true with: context: . push: true platforms: ${{ env.DOCKER_TARGET_PLATFORMS }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} file: docker/Dockerfile cache-from: type=local,src=/tmp/.build-cache debos-1.1.5/.gitignore000066400000000000000000000016651476454235000146240ustar00rootroot00000000000000 # Created by https://www.gitignore.io/api/vim,linux,go ### Go ### # Binaries for programs and plugins *.exe *.dll *.so *.dylib # Test binary, build with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 .glide/ # Golang project vendor packages which should be ignored vendor/ ### Linux ### *~ # temporary files which can be created if a process still has a handle open of a deleted file .fuse_hidden* # KDE directory preferences .directory # Linux trash folder which might appear on any partition or disk .Trash-* # .nfs files are created when an open file is removed but is still being accessed .nfs* ### Vim ### # swap [._]*.s[a-v][a-z] [._]*.sw[a-p] [._]s[a-v][a-z] [._]sw[a-p] # session Session.vim # temporary .netrwhist # auto-generated tag files tags # End of https://www.gitignore.io/api/vim,linux,go debos-1.1.5/LICENSE000066400000000000000000000261251476454235000136370ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2017,2018 Collabora Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. debos-1.1.5/README.md000066400000000000000000000172011476454235000141040ustar00rootroot00000000000000# debos - Debian OS image builder ## Synopsis debos [options] debos [--help] Application Options: -b, --fakemachine-backend= Fakemachine backend to use (default: auto) --artifactdir= Directory for packed archives and ostree repositories (default: current directory) -t, --template-var= Template variables (use -t VARIABLE:VALUE syntax) --debug-shell Fall into interactive shell on error -s, --shell= Redefine interactive shell binary (default: bash) (default: /bin/bash) --scratchsize= Size of disk backed scratch space -c, --cpus= Number of CPUs to use for build VM (default: 2) -m, --memory= Amount of memory for build VM (default: 2048MB) --show-boot Show boot/console messages from the fake machine -e, --environ-var= Environment variables (use -e VARIABLE:VALUE syntax) -v, --verbose Verbose output --print-recipe Print final recipe --dry-run Compose final recipe to build but without any real work started --disable-fakemachine Do not use fakemachine. ## Description debos is a tool to make the creation of various Debian-based OS images simpler. While most other tools focus on specific use-cases, debos is designed to be a toolchain making common actions trivial while providing enough rope to do whatever tweaking which might be required behind the scenes. debos expects a YAML file as input and runs the actions listed in the file sequentially. These actions should be self-contained and independent of each other. Some of the actions provided by debos to customise and produce images are: * apt: install packages and their dependencies with 'apt' * debootstrap: construct the target rootfs with debootstrap * download: download a single file from the internet * filesystem-deploy: deploy a root filesystem to an image previously created * image-partition: create an image file, make partitions and format them * ostree-commit: create an OSTree commit from rootfs * ostree-deploy: deploy an OSTree branch to the image * overlay: do a recursive copy of directories or files to the target filesystem * pack: create a tarball with the target filesystem * pacman: install packages and their dependencies with pacman * pacstrap: construct the target rootfs with pacstrap * raw: directly write a file to the output image at a given offset * recipe: includes the recipe actions at the given path * run: allows to run a command or script in the filesystem or in the host * unpack: unpack files from archive in the filesystem A full syntax description of all the debos actions can be found at: https://godoc.org/github.com/go-debos/debos/actions ## Installation (Docker container) An official debos container is available: ``` docker pull godebos/debos ``` See [docker/README.md](docker/README.md) for usage. ## Installation from source (under Debian) sudo apt install golang git libglib2.0-dev libostree-dev qemu-system-x86 \ qemu-user-static debootstrap systemd-container export GOPATH=/opt/src/gocode # or whatever suits your needs go install -v github.com/go-debos/debos/cmd/debos@latest /opt/src/gocode/bin/debos --help ## Simple example The following example will create an arm64 image, install several packages in it, change the file /etc/hostname to "debian" and finally make a tarball of the complete system. {{- $image := or .image "debian.tgz" -}} architecture: arm64 actions: - action: debootstrap suite: bookworm components: - main - non-free-firmware mirror: https://deb.debian.org/debian variant: minbase - action: apt packages: [ sudo, openssh-server, adduser, systemd-sysv, firmware-linux ] - action: run chroot: true command: echo debian > /etc/hostname - action: pack file: {{ $image }} compression: gz To run it, create a file named `example.yaml` and run: debos example.yaml The final tarball will be named "debian.tgz" if you would like to modify the fileame, you can provided a different name for the variable image like this: debos -t image:"debian-arm64.tgz" example.yaml ## Other examples Example recipes are collected in a separate repository: https://github.com/go-debos/debos-recipes ## Environment variables debos reads a predefined list of environment variables from the host and propagates them to the fakemachine build environemnt. The set of environment variables is defined by `environ_vars` in `cmd/debos/debos.go`. Currently the list of environment variables includes the proxy environment variables documented at: https://wiki.archlinux.org/index.php/proxy_settings The list of environment variables currently exported to fakemachine is: http_proxy, https_proxy, ftp_proxy, rsync_proxy, all_proxy, no_proxy While the elements of `environ_vars` are in lower case, for each element both lower and upper case variants are probed on the host and if found propagated to fakemachine. So if the host has the environment variables HTTP_PROXY and no_proxy defined, both will be propagated to fakemachine respecting the case. The command line options `--environ-var` and `-e` can be used to specify, overwrite and unset environment variables for fakemachine with the syntax: $ debos -e ENVIRONVAR:VALUE ... To unset an environment variable, or in other words, to prevent an environment variable being propagated to fakemachine, use the same syntax without a value. debos accepts multiple -e simultaneously. ## Proxy configuration While the proxy related environment variables are exported from the host to fakemachine, there are two known sources of issues: * Using localhost will not work from fakemachine. Use an address which is valid on your network. debos will warn if the environment variables contain localhost. * In case you are running applications and/or scripts inside fakemachine you may need to check which are the proxy environment variables they use. Different apps are known to use different environment variable names and different case for environment variable names. ## Fakemachine Backend debos (unless running debos with the `--disable-fakemachine` argument) creates and spawns a virtual machine using [fakemachine](https://github.com/go-debos/fakemachine) and executes the actions defined by the recipe inside the virtual machine. This helps ensure recipes are reproducible no matter the host environment. Fakemachine can use different virtualisation backends to spawn the virtual machine, for more information see the [fakemachine documentation](https://github.com/go-debos/fakemachine). By default the backend will automatically be selected based on what is supported by the host machine, but this can be overridden using the `--fakemachine-backend` / `-b` option. If no backends are supported, debos reverts to running the recipe on the host without creating a fakemachine. Performance of the backends is roughly as follows: `kvm` is faster than `uml` is faster than `qemu`. Using `--disable-fakemachine` is slightly faster than `kvm`, but requires root permissions. Benchmark times for running [pine-a64-plus/debian.yaml](https://github.com/go-debos/debos-recipes/blob/9a25b4be6c9136f4a27e542f39ab7e419fc852c9/pine-a64-plus/debian.yaml) on an Intel Pentium G4560T with SSD: | Backend | Wall Time | Prerequisites | | --- | --- | --- | | `--disable-fakemachine` | 8 min | root permissions | | `-b kvm` | 9 min | access to `/dev/kvm` | | `-b uml` | 18 min | package `user-mode-linux` installed | | `-b qemu` | 166 min | none | debos-1.1.5/TODO000066400000000000000000000031231476454235000133130ustar00rootroot00000000000000TODO * Make logging more consistent * Have a mode to output the final yaml after templating and then exit (dry-run?) * Documentation of all actions and overall concepts! * Tests, individual actions are mostly testable.. * Support having devices as image files (fakemachine should handle it ok) * Create a standard docker image with debos to push to docker hub for easy image creation * Template function to include other files * Dependency system between recipes ? * Warn on unknown yaml fields? * New actions: ** Create a manifest from install debian packages ** Install all dbgsym packages for a given rootfs ** Install all dev package for installed libraries ** Generalize the ostree conversion into an action? ** Ostree checkout for incremental fast updates? ** Action to get remote content which can be copied in *** Download tarball from http and unpack (same content) *** Download git tree (e.g. to get rpi firmware) *** Download .deb (e.g. u-boot to raw write rather then install) * Check what triggers ostree changes and try to minimize those * Control passwd & group contents as that can be problematic with ostree * Rewrite debootstrap in pure go to add a bunch of smarts (e.g. parallel downloads, local caching etc) * Rewrite pack/unpack in pure go and support more formats * Make actions using (host) commands check their existance early * Fix race in qemu-helper (if qemu-user-static gets installed in the system chroot things will get confused) * Do shell compatible parsing of script: argument to run actions and environment substitution * Support gpg signing ostree commits debos-1.1.5/action.go000066400000000000000000000052031476454235000144300ustar00rootroot00000000000000package debos import ( "bytes" "github.com/go-debos/fakemachine" ) type DebosState int // Represent the current state of Debos const ( Success DebosState = iota Failed ) // Mapping from partition name as configured in the image-partition action to // device path for usage by other actions type Partition struct { Name string DevicePath string } type CommonContext struct { Scratchdir string Rootdir string Artifactdir string Downloaddir string Image string ImagePartitions []Partition ImageMntDir string ImageFSTab bytes.Buffer // Fstab as per partitioning ImageKernelRoot string // Kernel cmdline root= snippet for the / of the image DebugShell string Origins map[string]string State DebosState EnvironVars map[string]string PrintRecipe bool Verbose bool } type DebosContext struct { *CommonContext RecipeDir string Architecture string SectorSize int } func (c *DebosContext) Origin(o string) (string, bool) { if o == "recipe" { return c.RecipeDir, true } else { path, found := c.Origins[o]; return path, found } } type Action interface { /* FIXME verify should probably be prepare or somesuch */ Verify(context *DebosContext) error PreMachine(context *DebosContext, m *fakemachine.Machine, args *[]string) error PreNoMachine(context *DebosContext) error Run(context *DebosContext) error // Cleanup() method gets called only if the Run for an action // was started and in the same machine (host or fake) as Run has run Cleanup(context *DebosContext) error PostMachine(context *DebosContext) error // PostMachineCleanup() gets called for all actions if Pre*Machine() method // has run for Action. This method is always executed on the host with user's permissions. PostMachineCleanup(context *DebosContext) error String() string } type BaseAction struct { Action string Description string } func (b *BaseAction) Verify(context *DebosContext) error { return nil } func (b *BaseAction) PreMachine(context *DebosContext, m *fakemachine.Machine, args *[]string) error { return nil } func (b *BaseAction) PreNoMachine(context *DebosContext) error { return nil } func (b *BaseAction) Run(context *DebosContext) error { return nil } func (b *BaseAction) Cleanup(context *DebosContext) error { return nil } func (b *BaseAction) PostMachine(context *DebosContext) error { return nil } func (b *BaseAction) PostMachineCleanup(context *DebosContext) error { return nil } func (b *BaseAction) String() string { if b.Description == "" { return b.Action } return b.Description } debos-1.1.5/actions/000077500000000000000000000000001476454235000142645ustar00rootroot00000000000000debos-1.1.5/actions/actions_doc.go000066400000000000000000000010361476454235000171000ustar00rootroot00000000000000// Copyright 2017, Collabora Ltd. /* Package 'actions' implements 'debos' modules used for OS creation. The origin property Several actions have the 'origin' property. Possible values for the 'origin' property are: 1) 'recipe' ....... directory the recipe is in 2) 'filesystem' ... target filesystem root directory from previous filesystem-deploy action or a previous ostree action. 3) 'artifacts' .... directory the artifacts are stored in 4) name property of a previous download action */ package actions debos-1.1.5/actions/apt_action.go000066400000000000000000000035711476454235000167420ustar00rootroot00000000000000/* Apt Action Install packages and their dependencies to the target rootfs with 'apt'. # Yaml syntax: - action: apt recommends: bool unauthenticated: bool update: bool packages: - package1 - package2 Mandatory properties: - packages -- list of packages to install Optional properties: - recommends -- boolean indicating if suggested packages will be installed - unauthenticated -- boolean indicating if unauthenticated packages can be installed - update -- boolean indicating if `apt update` will be run. Default 'true'. */ package actions import ( "github.com/go-debos/debos" ) type AptAction struct { debos.BaseAction `yaml:",inline"` Recommends bool Unauthenticated bool Update bool Packages []string } func NewAptAction() *AptAction { a := &AptAction{Update: true} return a } func (apt *AptAction) Run(context *debos.DebosContext) error { aptConfig := []string{} /* Don't show progress update percentages */ aptConfig = append(aptConfig, "-o=quiet::NoUpdate=1") aptOptions := []string{"apt-get", "-y"} aptOptions = append(aptOptions, aptConfig...) if !apt.Recommends { aptOptions = append(aptOptions, "--no-install-recommends") } if apt.Unauthenticated { aptOptions = append(aptOptions, "--allow-unauthenticated") } aptOptions = append(aptOptions, "install") aptOptions = append(aptOptions, apt.Packages...) c := debos.NewChrootCommandForContext(*context) c.AddEnv("DEBIAN_FRONTEND=noninteractive") if apt.Update { cmd := []string{"apt-get"} cmd = append(cmd, aptConfig...) cmd = append(cmd, "update") err := c.Run("apt", cmd...) if err != nil { return err } } err := c.Run("apt", aptOptions...) if err != nil { return err } cmd := []string{"apt-get"} cmd = append(cmd, aptConfig...) cmd = append(cmd, "clean") err = c.Run("apt", cmd...) if err != nil { return err } return nil } debos-1.1.5/actions/debootstrap_action.go000066400000000000000000000164211476454235000205020ustar00rootroot00000000000000/* Debootstrap Action Construct the target rootfs with debootstrap tool. Please keep in mind -- file `/etc/resolv.conf` will be removed after execution. Most of the OS scripts used by `debootstrap` copy `resolv.conf` from the host, and this may lead to incorrect configuration when becoming part of the created rootfs. # Yaml syntax: - action: debootstrap mirror: URL suite: "name" components: variant: "name" keyring-package: keyring-file: certificate: private-key: Mandatory properties: - suite -- release code name or symbolic name (e.g. "stable") Optional properties: - check-gpg -- verify GPG signatures on Release files, true by default - mirror -- URL with Debian-compatible repository If no mirror is specified debos will use http://deb.debian.org/debian as default. - variant -- name of the bootstrap script variant to use - components -- list of components to use for packages selection. If no components are specified debos will use main as default. Example: components: [ main, contrib ] - keyring-package -- keyring for package validation. - keyring-file -- keyring file for repository validation. - merged-usr -- use merged '/usr' filesystem, true by default. - certificate -- client certificate stored in file to be used for downloading packages from the server. - private-key -- provide the client's private key in a file separate from the certificate. */ package actions import ( "fmt" "io" "log" "os" "path" "strings" "runtime" "github.com/go-debos/debos" "github.com/go-debos/fakemachine" ) type DebootstrapAction struct { debos.BaseAction `yaml:",inline"` Suite string Mirror string Variant string KeyringPackage string `yaml:"keyring-package"` KeyringFile string `yaml:"keyring-file"` Certificate string PrivateKey string `yaml:"private-key"` Components []string MergedUsr bool `yaml:"merged-usr"` CheckGpg bool `yaml:"check-gpg"` } func NewDebootstrapAction() *DebootstrapAction { d := DebootstrapAction{} // Use filesystem with merged '/usr' by default d.MergedUsr = true // Be secure by default d.CheckGpg = true // Use main as default component d.Components = []string{"main"} // Set generic default mirror d.Mirror = "http://deb.debian.org/debian" return &d } func (d *DebootstrapAction) listOptionFiles(context *debos.DebosContext) []string { files := []string{} if d.Certificate != "" { d.Certificate = debos.CleanPathAt(d.Certificate, context.RecipeDir) files = append(files, d.Certificate) } if d.PrivateKey != "" { d.PrivateKey = debos.CleanPathAt(d.PrivateKey, context.RecipeDir) files = append(files, d.PrivateKey) } if d.KeyringFile != "" { d.KeyringFile = debos.CleanPathAt(d.KeyringFile, context.RecipeDir) files = append(files, d.KeyringFile) } return files } func (d *DebootstrapAction) Verify(context *debos.DebosContext) error { if len(d.Suite) == 0 { return fmt.Errorf("suite property not specified") } files := d.listOptionFiles(context) // Check if all needed files exists for _, f := range files { if _, err := os.Stat(f); os.IsNotExist(err) { return err } } return nil } func (d *DebootstrapAction) PreMachine(context *debos.DebosContext, m *fakemachine.Machine, args *[]string) error { mounts := d.listOptionFiles(context) // Mount configuration files outside of recipes directory for _, mount := range mounts { m.AddVolume(path.Dir(mount)) } return nil } func (d *DebootstrapAction) RunSecondStage(context debos.DebosContext) error { cmdline := []string{ "/debootstrap/debootstrap", "--no-check-gpg", "--second-stage"} if d.Components != nil { s := strings.Join(d.Components, ",") cmdline = append(cmdline, fmt.Sprintf("--components=%s", s)) } c := debos.NewChrootCommandForContext(context) // Can't use nspawn for debootstrap as it wants to create device nodes c.ChrootMethod = debos.CHROOT_METHOD_CHROOT err := c.Run("Debootstrap (stage 2)", cmdline...) if err != nil { log := path.Join(context.Rootdir, "debootstrap/debootstrap.log") _ = debos.Command{}.Run("debootstrap.log", "cat", log) } return err } // Guess if suite is something before usr-is-merged was introduced func (d *DebootstrapAction) isLikelyOldSuite() bool { switch strings.ToLower(d.Suite) { case "sid", "unstable": return false case "testing": return false case "bookworm": return false case "trixie": return false case "forky": return false default: return true } } func (d *DebootstrapAction) Run(context *debos.DebosContext) error { cmdline := []string{"debootstrap"} if d.MergedUsr { cmdline = append(cmdline, "--merged-usr") } else { cmdline = append(cmdline, "--no-merged-usr") } if !d.CheckGpg { cmdline = append(cmdline, fmt.Sprintf("--no-check-gpg")) } else if d.KeyringFile != "" { cmdline = append(cmdline, fmt.Sprintf("--keyring=%s", d.KeyringFile)) } if d.KeyringPackage != "" { cmdline = append(cmdline, fmt.Sprintf("--include=%s", d.KeyringPackage)) } if d.Certificate != "" { cmdline = append(cmdline, fmt.Sprintf("--certificate=%s", d.Certificate)) } if d.PrivateKey != "" { cmdline = append(cmdline, fmt.Sprintf("--private-key=%s", d.PrivateKey)) } if d.Components != nil { s := strings.Join(d.Components, ",") cmdline = append(cmdline, fmt.Sprintf("--components=%s", s)) } /* Only works for amd64, arm64 and riscv64 hosts, which should be enough */ foreign := context.Architecture != runtime.GOARCH if foreign { cmdline = append(cmdline, "--foreign") cmdline = append(cmdline, fmt.Sprintf("--arch=%s", context.Architecture)) } if d.Variant != "" { cmdline = append(cmdline, fmt.Sprintf("--variant=%s", d.Variant)) } // workaround for https://github.com/go-debos/debos/issues/361 if d.isLikelyOldSuite() { log.Println("excluding usr-is-merged as package is not in suite") cmdline = append(cmdline, "--exclude=usr-is-merged") } cmdline = append(cmdline, d.Suite) cmdline = append(cmdline, context.Rootdir) cmdline = append(cmdline, d.Mirror) cmdline = append(cmdline, "/usr/share/debootstrap/scripts/unstable") /* Make sure /etc/apt/apt.conf.d exists inside the fakemachine otherwise debootstrap prints a warning about the path not existing. */ if fakemachine.InMachine() { if err := os.MkdirAll(path.Join("/etc/apt/apt.conf.d"), os.ModePerm); err != nil { return err } } err := debos.Command{}.Run("Debootstrap", cmdline...) if err != nil { log := path.Join(context.Rootdir, "debootstrap/debootstrap.log") _ = debos.Command{}.Run("debootstrap.log", "cat", log) return err } if foreign { err = d.RunSecondStage(*context) if err != nil { return err } } /* HACK */ srclist, err := os.OpenFile(path.Join(context.Rootdir, "etc/apt/sources.list"), os.O_RDWR|os.O_CREATE, 0755) if err != nil { return err } _, err = io.WriteString(srclist, fmt.Sprintf("deb %s %s %s\n", d.Mirror, d.Suite, strings.Join(d.Components, " "))) if err != nil { return err } srclist.Close() /* Cleanup resolv.conf after debootstrap */ resolvconf := path.Join(context.Rootdir, "/etc/resolv.conf") if _, err = os.Stat(resolvconf); !os.IsNotExist(err) { if err = os.Remove(resolvconf); err != nil { return err } } c := debos.NewChrootCommandForContext(*context) return c.Run("apt clean", "/usr/bin/apt-get", "clean") } debos-1.1.5/actions/download_action.go000066400000000000000000000075651476454235000177740ustar00rootroot00000000000000/* Download Action Download a single file from Internet and unpack it in place if needed. # Yaml syntax: - action: download url: http://example.domain/path/filename.ext name: firmware filename: output_name unpack: bool compression: gz Mandatory properties: - url -- URL to an object for download - name -- string which allow to use downloaded object in other actions via 'origin' property. If 'unpack' property is set to 'true' name will refer to temporary directory with extracted content. Optional properties: - filename -- use this property as the name for saved file. Useful if URL does not contain file name in path, for example it is possible to download files from URLs without path part. - unpack -- hint for action to extract all files from downloaded archive. See the 'Unpack' action for more information. - compression -- optional hint for unpack allowing to use proper compression method. See the 'Unpack' action for more information. */ package actions import ( "fmt" "github.com/go-debos/debos" "net/url" "path" ) type DownloadAction struct { debos.BaseAction `yaml:",inline"` Url string // URL for downloading Filename string // File name, overrides the name from URL. Unpack bool // Unpack downloaded file to directory dedicated for download Compression string // compression type Name string // exporting path to file or directory(in case of unpack) } // validateUrl checks if supported URL is passed from recipe // Return: // - parsed URL // - nil in case of success func (d *DownloadAction) validateUrl() (*url.URL, error) { url, err := url.Parse(d.Url) if err != nil { return url, err } switch url.Scheme { case "http", "https": // Supported scheme default: return url, fmt.Errorf("Unsupported URL is provided: '%s'", url.String()) } return url, nil } func (d *DownloadAction) validateFilename(context *debos.DebosContext, url *url.URL) (filename string, err error) { if len(d.Filename) == 0 { // Trying to guess the name from URL Path filename = path.Base(url.Path) } else { filename = path.Base(d.Filename) } if len(filename) == 0 || filename == "." || filename == "/" { return "", fmt.Errorf("Incorrect filename is provided for '%s'", d.Url) } filename = path.Join(context.Scratchdir, filename) return filename, nil } func (d *DownloadAction) archive(filename string) (debos.Archive, error) { archive, err := debos.NewArchive(filename) if err != nil { return archive, err } switch archive.Type() { case debos.Tar: if len(d.Compression) > 0 { if err := archive.AddOption("tarcompression", d.Compression); err != nil { return archive, err } } default: } return archive, nil } func (d *DownloadAction) Verify(context *debos.DebosContext) error { var filename string if len(d.Name) == 0 { return fmt.Errorf("Property 'name' is mandatory for download action\n") } url, err := d.validateUrl() if err != nil { return err } filename, err = d.validateFilename(context, url) if err != nil { return err } if d.Unpack == true { if _, err := d.archive(filename); err != nil { return err } } return nil } func (d *DownloadAction) Run(context *debos.DebosContext) error { var filename string url, err := d.validateUrl() if err != nil { return err } filename, err = d.validateFilename(context, url) if err != nil { return err } originPath := filename switch url.Scheme { case "http", "https": err := debos.DownloadHttpUrl(url.String(), filename) if err != nil { return err } default: return fmt.Errorf("Unsupported URL is provided: '%s'", url.String()) } if d.Unpack == true { archive, err := d.archive(filename) if err != nil { return err } targetdir := filename + ".d" err = archive.RelaxedUnpack(targetdir) if err != nil { return err } originPath = targetdir } context.Origins[d.Name] = originPath return nil } debos-1.1.5/actions/filesystem_deploy_action.go000066400000000000000000000074011476454235000217120ustar00rootroot00000000000000/* FilesystemDeploy Action Deploy prepared root filesystem to output image by copying the files from the temporary scratch directory to the mounted image and optionally creates various configuration files for the image: '/etc/fstab' and '/etc/kernel/cmdline'. This action requires 'image-partition' action to be executed before it. After this action has ran, subsequent actions are executed on the mounted output image. # Yaml syntax: - action: filesystem-deploy setup-fstab: bool setup-kernel-cmdline: bool append-kernel-cmdline: arguments Optional properties: - setup-fstab -- generate '/etc/fstab' file according to information provided by 'image-partition' action. By default is 'true'. - setup-kernel-cmdline -- add location of root partition to '/etc/kernel/cmdline' file on target image. By default is 'true'. - append-kernel-cmdline -- additional kernel command line arguments passed to kernel. */ package actions import ( "errors" "fmt" "io" "io/ioutil" "log" "os" "path" "strings" "github.com/go-debos/debos" ) type FilesystemDeployAction struct { debos.BaseAction `yaml:",inline"` SetupFSTab bool `yaml:"setup-fstab"` SetupKernelCmdline bool `yaml:"setup-kernel-cmdline"` AppendKernelCmdline string `yaml:"append-kernel-cmdline"` } func NewFilesystemDeployAction() *FilesystemDeployAction { fd := &FilesystemDeployAction{SetupFSTab: true, SetupKernelCmdline: true} fd.Description = "Deploying filesystem" return fd } func (fd *FilesystemDeployAction) setupFSTab(context *debos.DebosContext) error { if context.ImageFSTab.Len() == 0 { return errors.New("Fstab not generated, missing image-partition action?") } log.Print("Setting up /etc/fstab") err := os.MkdirAll(path.Join(context.Rootdir, "etc"), 0755) if err != nil { return fmt.Errorf("Couldn't create etc in image: %v", err) } fstab := path.Join(context.Rootdir, "etc/fstab") f, err := os.OpenFile(fstab, os.O_RDWR|os.O_CREATE, 0755) defer f.Close() if err != nil { return fmt.Errorf("Couldn't open /etc/fstab: %v", err) } _, err = io.Copy(f, &context.ImageFSTab) if err != nil { return fmt.Errorf("Couldn't write /etc/fstab: %v", err) } return nil } func (fd *FilesystemDeployAction) setupKernelCmdline(context *debos.DebosContext) error { var cmdline []string log.Print("Setting up /etc/kernel/cmdline") err := os.MkdirAll(path.Join(context.Rootdir, "etc", "kernel"), 0755) if err != nil { return fmt.Errorf("Couldn't create etc/kernel in image: %v", err) } path := path.Join(context.Rootdir, "etc/kernel/cmdline") current, _ := ioutil.ReadFile(path) f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0755) defer f.Close() if err != nil { return fmt.Errorf("Couldn't open /etc/kernel/cmdline: %v", err) } cmdline = append(cmdline, strings.TrimSpace(string(current))) cmdline = append(cmdline, context.ImageKernelRoot) if fd.AppendKernelCmdline != "" { cmdline = append(cmdline, fd.AppendKernelCmdline) } _, err = f.WriteString(strings.Join(cmdline, " ") + "\n") if err != nil { return fmt.Errorf("Couldn't write /etc/kernel/cmdline: %v", err) } return nil } func (fd *FilesystemDeployAction) Run(context *debos.DebosContext) error { /* Copying files is actually silly hafd, one has to keep permissions, ACL's * extended attribute, misc, other. Leave it to cp... */ err := debos.Command{}.Run("Deploy to image", "cp", "-a", context.Rootdir+"/.", context.ImageMntDir) if err != nil { return fmt.Errorf("rootfs deploy failed: %v", err) } context.Rootdir = context.ImageMntDir context.Origins["filesystem"] = context.ImageMntDir if fd.SetupFSTab { err = fd.setupFSTab(context) if err != nil { return err } } if fd.SetupKernelCmdline { err = fd.setupKernelCmdline(context) if err != nil { return err } } return nil } debos-1.1.5/actions/image_partition_action.go000066400000000000000000000620011476454235000213220ustar00rootroot00000000000000/* ImagePartition Action This action creates an image file, partitions it and formats the filesystems. Mountpoints can be defined so the created partitions can be mounted during the build, and optionally (but by-default) mounted at boot in the final system. The mountpoints are sorted on their position in the filesystem hierarchy so the order in the recipe does not matter. # Yaml syntax: - action: image-partition imagename: image_name imagesize: size partitiontype: gpt diskid: string gpt_gap: offset partitions: mountpoints: Mandatory properties: - imagename -- the name of the image file, relative to the artifact directory. - imagesize -- generated image size in human-readable form, examples: 100MB, 1GB, etc. - partitiontype -- partition table type. Currently only 'gpt' and 'msdos' partition tables are supported. - gpt_gap -- shifting GPT allow to use this gap for bootloaders, for example if U-Boot intersects with original GPT placement. Only works if parted supports an extra argument to mklabel to specify the gpt offset. - partitions -- list of partitions, at least one partition is needed. Partition properties are described below. - mountpoints -- list of mount points for partitions. Properties for mount points are described below. Optional properties: - diskid -- disk unique identifier string. For 'gpt' partition table, 'diskid' should be in GUID format (e.g.: '00002222-4444-6666-AAAA-BBBBCCCCFFFF' where each character is an hexadecimal digit). For 'msdos' partition table, 'diskid' should be a 32 bits hexadecimal number (e.g. '1234ABCD' without any dash separator). # Yaml syntax for partitions: partitions: - name: partition name partlabel: partition label fs: filesystem fslabel: filesystem label start: offset end: offset features: list of filesystem features extendedoptions: list of filesystem extended options flags: list of flags fsck: bool fsuuid: string partuuid: string Mandatory properties: - name -- is used for referencing named partition for mount points configuration (below) and label the filesystem located on this partition. Must be unique. - fs -- filesystem type used for formatting. 'none' fs type should be used for partition without filesystem. - start -- offset from beginning of the disk there the partition starts. - end -- offset from beginning of the disk there the partition ends. For 'start' and 'end' properties offset can be written in human readable form -- '32MB', '1GB' or as disk percentage -- '100%'. Optional properties: - partlabel -- label for the partition in the GPT partition table. Defaults to the `name` property of the partition. May only be used for GPT partitions. - fslabel -- label for the filesystem. Defaults to the `name` property of the partition. The filesystem label can be up to 11 characters long for {v}fat{12|16|32}, 16 characters long for ext2/3/4, 255 characters long for btrfs, 512 characters long for hfs/hfsplus and 12 characters long for xfs. - parttype -- set the partition type in the partition table. The string should be in a hexadecimal format (2-characters) for msdos partition tables and GUID format (36-characters) for GPT partition tables. For instance, "82" for msdos sets the partition type to Linux Swap. Whereas "0657fd6d-a4ab-43c4-84e5-0933c84b4f4f" for GPT sets the partition type to Linux Swap. For msdos partition types hex codes see: https://en.wikipedia.org/wiki/Partition_type For gpt partition type GUIDs see: https://systemd.io/DISCOVERABLE_PARTITIONS/ - features -- list of additional filesystem features which need to be enabled for partition. - flags -- list of additional flags for partition compatible with parted(8) 'set' command. - fsck -- if set to `false` -- then set fs_passno (man fstab) to 0 meaning no filesystem checks in boot time. By default is set to `true` allowing checks on boot. - fsuuid -- file system UUID string. This option is only supported for btrfs, ext2, ext3, ext4 and xfs. - partuuid -- GPT partition UUID string. A version 5 UUID can be easily generated using the uuid5 template function {{ uuid5 $namespace $data }} $namespace should be a valid UUID and $data can be any string, to generate reproducible UUID value pass a fixed value of namespace and data. - extendedoptions -- list of additional filesystem extended options which need to be enabled for the partition. # Yaml syntax for mount points: mountpoints: - mountpoint: path partition: partition label options: list of options buildtime: bool Mandatory properties: - partition -- partition name for mounting. The partion must exist under `partitions`. - mountpoint -- path in the target root filesystem where the named partition should be mounted. Must be unique, only one partition can be mounted per mountpoint. Optional properties: - options -- list of options to be added to appropriate entry in fstab file. - buildtime -- if set to true then the mountpoint only used during the debos run. No entry in `/etc/fstab` will be created. The mountpoints directory will be removed from the image, so it is recommended to define a `mountpoint` path which is temporary and unique for the image, for example: `/mnt/temporary_mount`. Defaults to false. # Layout example for Raspberry PI 3: - action: image-partition imagename: "debian-rpi3.img" imagesize: 1GB partitiontype: msdos mountpoints: - mountpoint: / partition: root - mountpoint: /boot/firmware partition: firmware options: [ x-systemd.automount ] partitions: - name: firmware fs: vfat start: 0% end: 64MB - name: root fs: ext4 start: 64MB end: 100% flags: [ boot ] */ package actions import ( "encoding/hex" "errors" "fmt" "github.com/docker/go-units" "github.com/go-debos/fakemachine" "github.com/google/uuid" "github.com/freddierice/go-losetup/v2" "log" "os" "os/exec" "path" "path/filepath" "sort" "strconv" "strings" "syscall" "time" "regexp" "github.com/go-debos/debos" ) type Partition struct { number int Name string PartLabel string FSLabel string PartType string PartUUID string Start string End string FS string Flags []string Features []string ExtendedOptions []string Fsck bool "fsck" FSUUID string } type Mountpoint struct { Mountpoint string Partition string Options []string Buildtime bool part *Partition } type imageLocker struct { fd *os.File } func lockImage(context *debos.DebosContext) (*imageLocker, error) { fd, err := os.Open(context.Image) if err != nil { return nil, err } syscall.Flock(int(fd.Fd()), syscall.LOCK_EX) return &imageLocker{fd: fd}, nil } func (i imageLocker) unlock() { i.fd.Close() } type ImagePartitionAction struct { debos.BaseAction `yaml:",inline"` ImageName string ImageSize string PartitionType string DiskID string GptGap string "gpt_gap" Partitions []Partition Mountpoints []Mountpoint size int64 loopDev losetup.Device usingLoop bool } func (p *Partition) UnmarshalYAML(unmarshal func(interface{}) error) error { type rawPartition Partition part := rawPartition{Fsck: true} if err := unmarshal(&part); err != nil { return err } *p = Partition(part) return nil } func (i *ImagePartitionAction) generateFSTab(context *debos.DebosContext) error { context.ImageFSTab.Reset() for _, m := range i.Mountpoints { options := []string{"defaults"} options = append(options, m.Options...) if m.Buildtime == true { /* Do not need to add mount point into fstab */ continue } if m.part.FSUUID == "" { return fmt.Errorf("Missing fs UUID for partition %s!?!", m.part.Name) } fs_passno := 0 if m.part.Fsck { if m.Mountpoint == "/" { fs_passno = 1 } else { fs_passno = 2 } } fsType := m.part.FS switch m.part.FS { case "fat", "fat12", "fat16", "fat32", "msdos": fsType = "vfat" default: break } context.ImageFSTab.WriteString(fmt.Sprintf("UUID=%s\t%s\t%s\t%s\t0\t%d\n", m.part.FSUUID, m.Mountpoint, fsType, strings.Join(options, ","), fs_passno)) } return nil } func (i *ImagePartitionAction) generateKernelRoot(context *debos.DebosContext) error { for _, m := range i.Mountpoints { if m.Mountpoint == "/" { if m.part.FSUUID == "" { return errors.New("No fs UUID for root partition !?!") } context.ImageKernelRoot = fmt.Sprintf("root=UUID=%s", m.part.FSUUID) break } } return nil } func (i ImagePartitionAction) getPartitionDevice(number int, context debos.DebosContext) string { /* Always look up canonical device as udev might not generate the by-id * symlinks while there is an flock on /dev/vda */ device, _ := filepath.EvalSymlinks(context.Image) suffix := "p" /* Check partition naming first: if used 'by-id'i naming convention */ if strings.Contains(device, "/disk/by-id/") { suffix = "-part" } /* If the iamge device has a digit as the last character, the partition * suffix is p else it's just */ last := device[len(device)-1] if last >= '0' && last <= '9' { return fmt.Sprintf("%s%s%d", device, suffix, number) } else { return fmt.Sprintf("%s%d", device, number) } } func (i *ImagePartitionAction) triggerDeviceNodes(context *debos.DebosContext) error { err := debos.Command{}.Run("udevadm", "udevadm", "trigger", "--settle", context.Image) if err != nil { log.Printf("Failed to trigger device nodes") return err } return nil } func (i ImagePartitionAction) PreMachine(context *debos.DebosContext, m *fakemachine.Machine, args *[]string) error { imagePath := path.Join(context.Artifactdir, i.ImageName) image, err := m.CreateImage(imagePath, i.size) if err != nil { return err } context.Image = image *args = append(*args, "--internal-image", image) return nil } func (i ImagePartitionAction) formatPartition(p *Partition, context debos.DebosContext) error { label := fmt.Sprintf("Formatting partition %d", p.number) path := i.getPartitionDevice(p.number, context) cmdline := []string{} switch p.FS { case "fat", "fat12", "fat16", "fat32", "msdos", "vfat": cmdline = append(cmdline, "mkfs.vfat", "-n", p.FSLabel) switch p.FS { case "fat12": cmdline = append(cmdline, "-F12") case "fat16": cmdline = append(cmdline, "-F16") case "fat32", "msdos", "vfat": cmdline = append(cmdline, "-F32") default: /* let mkfs.vfat autodetermine FAT type */ break } if len(p.FSUUID) > 0 { cmdline = append(cmdline, "-i", p.FSUUID) } case "btrfs": // Force formatting to prevent failure in case if partition was formatted already cmdline = append(cmdline, "mkfs.btrfs", "-L", p.FSLabel, "-f") if len(p.Features) > 0 { cmdline = append(cmdline, "-O", strings.Join(p.Features, ",")) } if len(p.FSUUID) > 0 { cmdline = append(cmdline, "-U", p.FSUUID) } case "f2fs": cmdline = append(cmdline, "mkfs.f2fs", "-l", p.FSLabel) if len(p.Features) > 0 { cmdline = append(cmdline, "-O", strings.Join(p.Features, ",")) } case "hfs": cmdline = append(cmdline, "mkfs.hfs", "-h", "-v", p.FSLabel) case "hfsplus": cmdline = append(cmdline, "mkfs.hfsplus", "-v", p.FSLabel) case "hfsx": cmdline = append(cmdline, "mkfs.hfsplus", "-s", "-v", p.FSLabel) // hfsx is case-insensitive hfs+, should be treated as "normal" hfs+ from now on p.FS = "hfsplus" case "xfs": cmdline = append(cmdline, "mkfs.xfs", "-L", p.FSLabel) if len(p.FSUUID) > 0 { cmdline = append(cmdline, "-m", "uuid="+p.FSUUID) } case "none": default: cmdline = append(cmdline, fmt.Sprintf("mkfs.%s", p.FS), "-L", p.FSLabel) if len(p.Features) > 0 { cmdline = append(cmdline, "-O", strings.Join(p.Features, ",")) } if len(p.ExtendedOptions) > 0 { cmdline = append(cmdline, "-E", strings.Join(p.ExtendedOptions, ",")) } if len(p.FSUUID) > 0 { if p.FS == "ext2" || p.FS == "ext3" || p.FS == "ext4" { cmdline = append(cmdline, "-U", p.FSUUID) } } } if len(cmdline) != 0 { cmdline = append(cmdline, path) cmd := debos.Command{} /* Some underlying device driver, e.g. the UML UBD driver, may manage holes * incorrectly which will prevent to retrieve all useful zero ranges in * filesystem, e.g. when using 'bmaptool create', see patch * http://lists.infradead.org/pipermail/linux-um/2022-January/002074.html * * Adding UNIX_IO_NOZEROOUT environment variable prevent mkfs.ext[234] * utilities to create zero range spaces using fallocate with * FALLOC_FL_ZERO_RANGE or FALLOC_FL_PUNCH_HOLE */ if p.FS == "ext2" || p.FS == "ext3" || p.FS == "ext4" { cmd.AddEnv("UNIX_IO_NOZEROOUT=1") } if err := cmd.Run(label, cmdline...); err != nil { return err } } if p.FS != "none" && p.FSUUID == "" { uuid, err := exec.Command("blkid", "-o", "value", "-s", "UUID", "-p", "-c", "none", path).Output() if err != nil { return fmt.Errorf("Failed to get uuid: %s", err) } p.FSUUID = strings.TrimSpace(string(uuid[:])) } return nil } func (i *ImagePartitionAction) PreNoMachine(context *debos.DebosContext) error { imagePath := path.Join(context.Artifactdir, i.ImageName) img, err := os.OpenFile(imagePath, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { return fmt.Errorf("Couldn't open image file: %v", err) } err = img.Truncate(i.size) if err != nil { return fmt.Errorf("Couldn't resize image file: %v", err) } img.Close() // losetup.Attach() can fail due to concurrent attaches in other processes retries := 60 for t := 1; t <= retries; t++ { i.loopDev, err = losetup.Attach(imagePath, 0, false) if err == nil { break } log.Printf("Setup loop device: try %d/%d failed: %v", t, retries, err) time.Sleep(200 * time.Millisecond) } if err != nil { return fmt.Errorf("Failed to setup loop device: %v", err) } // go-losetup doesn't provide a way to change the loop device sector size // see https://github.com/freddierice/go-losetup/pull/10 if context.SectorSize != 512 { command := []string{"losetup", "--sector-size", strconv.Itoa(context.SectorSize), i.loopDev.Path()} err = debos.Command{}.Run("losetup", command...) if err != nil { return err } } context.Image = i.loopDev.Path() i.usingLoop = true return nil } func (i ImagePartitionAction) Run(context *debos.DebosContext) error { /* On certain disk device events udev will call the BLKRRPART ioctl to * re-read the partition table. This will cause the partition devices * (e.g. vda3) to temporarily disappear while the rescanning happens. * udev does this while holding an exclusive flock. This means to avoid partition * devices disappearing while doing operations on them (e.g. formatting * and mounting) we need to do it while holding an exclusive lock */ command := []string{"parted", "-s", context.Image, "mklabel", i.PartitionType} if len(i.GptGap) > 0 { command = append(command, i.GptGap) } err := debos.Command{}.Run("parted", command...) if err != nil { return err } if len(i.DiskID) > 0 { command := []string{"sfdisk", "--disk-id", context.Image, i.DiskID} err = debos.Command{}.Run("sfdisk", command...) if err != nil { return err } } for idx, _ := range i.Partitions { p := &i.Partitions[idx] if p.PartLabel == "" { p.PartLabel = p.Name } var name string if i.PartitionType == "msdos" { if len(i.Partitions) <= 4 { name = "primary" } else { if idx < 3 { name = "primary" } else if idx == 3 { name = "extended" } else { name = "logical" } } } else { name = p.PartLabel } command := []string{"parted", "-a", "none", "-s", "--", context.Image, "mkpart", name} switch p.FS { case "fat16": command = append(command, "fat16") case "fat", "fat12", "fat32", "msdos", "vfat": /* TODO: Not sure if this is correct. Perhaps fat12 should be treated the same as fat16 ? */ command = append(command, "fat32") case "hfsplus": command = append(command, "hfs+") case "f2fs": case "none": default: command = append(command, p.FS) } command = append(command, p.Start, p.End) err = debos.Command{}.Run("parted", command...) if err != nil { return err } if p.Flags != nil { for _, flag := range p.Flags { err = debos.Command{}.Run("parted", "parted", "-s", context.Image, "set", fmt.Sprintf("%d", p.number), flag, "on") if err != nil { return err } } } if p.PartType != "" { err = debos.Command{}.Run("sfdisk", "sfdisk", "--part-type", context.Image, fmt.Sprintf("%d", p.number), p.PartType) if err != nil { return err } } /* PartUUID will only be set for gpt partitions */ if len(p.PartUUID) > 0 { err = debos.Command{}.Run("sfdisk", "sfdisk", "--part-uuid", context.Image, fmt.Sprintf("%d", p.number), p.PartUUID) if err != nil { return err } } lock, err := lockImage(context) if err != nil { return err } defer lock.unlock() err = i.formatPartition(p, *context) if err != nil { return err } lock.unlock() devicePath := i.getPartitionDevice(p.number, *context) context.ImagePartitions = append(context.ImagePartitions, debos.Partition{p.Name, devicePath}) } context.ImageMntDir = path.Join(context.Scratchdir, "mnt") os.MkdirAll(context.ImageMntDir, 0755) // sort mountpoints based on position in filesystem hierarchy sort.SliceStable(i.Mountpoints, func(a, b int) bool { mntA := i.Mountpoints[a].Mountpoint mntB := i.Mountpoints[b].Mountpoint // root should always be mounted first if (mntA == "/") { return true } if (mntB == "/") { return false } return strings.Count(mntA, "/") < strings.Count(mntB, "/") }) lock, err := lockImage(context) if err != nil { return err } defer lock.unlock() for _, m := range i.Mountpoints { dev := i.getPartitionDevice(m.part.number, *context) mntpath := path.Join(context.ImageMntDir, m.Mountpoint) os.MkdirAll(mntpath, 0755) fsType := m.part.FS switch m.part.FS { case "fat", "fat12", "fat16", "fat32", "msdos": fsType = "vfat" default: break } err = syscall.Mount(dev, mntpath, fsType, 0, "") if err != nil { return fmt.Errorf("%s mount failed: %v", m.part.Name, err) } } lock.unlock() err = i.generateFSTab(context) if err != nil { return err } err = i.generateKernelRoot(context) if err != nil { return err } /* Now that all partitions are created (re)trigger all udev events for * the image file to make sure everything is in a reasonable state */ i.triggerDeviceNodes(context) return nil } func (i ImagePartitionAction) Cleanup(context *debos.DebosContext) error { for idx := len(i.Mountpoints) - 1; idx >= 0; idx-- { m := i.Mountpoints[idx] mntpath := path.Join(context.ImageMntDir, m.Mountpoint) err := syscall.Unmount(mntpath, 0) if err != nil { log.Printf("Warning: Failed to get unmount %s: %s", m.Mountpoint, err) log.Printf("Unmount failure can cause images being incomplete!") return err } if m.Buildtime == true { if err = os.Remove(mntpath); err != nil { log.Printf("Failed to remove temporary mount point %s: %s", m.Mountpoint, err) if err.(*os.PathError).Err.Error() == "read-only file system" { continue } return err } } } if i.usingLoop { err := i.loopDev.Detach() if err != nil { log.Printf("WARNING: Failed to detach loop device: %s", err) return err } for t := 0; t < 60; t++ { err = i.loopDev.Remove() if err == nil { break } time.Sleep(time.Second) } if err != nil { log.Printf("WARNING: Failed to remove loop device: %s", err) return err } } return nil } func (i ImagePartitionAction) PostMachineCleanup(context *debos.DebosContext) error { image := path.Join(context.Artifactdir, i.ImageName) /* Remove the image in case of any action failure */ if context.State != debos.Success { if _, err := os.Stat(image); !os.IsNotExist(err) { if err = os.Remove(image); err != nil { return err } } } return nil } func (i *ImagePartitionAction) Verify(context *debos.DebosContext) error { if i.PartitionType == "msdos" { for idx, _ := range i.Partitions { p := &i.Partitions[idx] if idx == 3 && len(i.Partitions) > 4 { var name string var part Partition name = "extended" part.number = idx + 1 part.Name = name part.Start = p.Start tmp_n := len(i.Partitions) - 1 tmp := &i.Partitions[tmp_n] part.End = tmp.End part.FS = "none" i.Partitions = append(i.Partitions[:idx+1], i.Partitions[idx:]...) i.Partitions[idx] = part num := 1 for idx, _ := range i.Partitions { p := &i.Partitions[idx] p.number = num num++ } } } } if len(i.GptGap) > 0 { log.Println("WARNING: special version of parted is needed for 'gpt_gap' option") if i.PartitionType != "gpt" { return fmt.Errorf("gpt_gap property could be used only with 'gpt' label") } // Just check if it contains correct value _, err := units.FromHumanSize(i.GptGap) if err != nil { return fmt.Errorf("Failed to parse GPT offset: %s", i.GptGap) } } if len(i.DiskID) > 0 { switch i.PartitionType { case "gpt": _, err := uuid.Parse(i.DiskID) if err != nil { return fmt.Errorf("Incorrect disk GUID %s", i.DiskID) } case "msdos": _, err := hex.DecodeString(i.DiskID) if err != nil || len(i.DiskID) != 8 { return fmt.Errorf("Incorrect disk ID %s, should be 32-bit hexadecimal number", i.DiskID) } // Add 0x prefix i.DiskID = "0x" + i.DiskID } } num := 1 for idx, _ := range i.Partitions { var maxLength int = 0 p := &i.Partitions[idx] p.number = num num++ if p.Name == "" { return fmt.Errorf("Partition without a name") } // check for duplicate partition names for j := idx + 1; j < len(i.Partitions); j++ { if i.Partitions[j].Name == p.Name { return fmt.Errorf("Partition %s already exists", p.Name) } } if len(p.FSUUID) > 0 { switch p.FS { case "btrfs", "ext2", "ext3", "ext4", "xfs": _, err := uuid.Parse(p.FSUUID) if err != nil { return fmt.Errorf("Incorrect UUID %s", p.FSUUID) } case "fat", "fat12", "fat16", "fat32", "msdos", "vfat": _, err := hex.DecodeString(p.FSUUID) if err != nil || len(p.FSUUID) != 8 { return fmt.Errorf("Incorrect UUID %s, should be 32-bit hexadecimal number", p.FSUUID) } default: return fmt.Errorf("Setting the UUID is not supported for filesystem %s", p.FS) } } if i.PartitionType != "gpt" && p.PartLabel != "" { return fmt.Errorf("Can only set partition partlabel on GPT filesystem") } if len(p.PartUUID) > 0 { switch i.PartitionType { case "gpt": _, err := uuid.Parse(p.PartUUID) if err != nil { return fmt.Errorf("Incorrect partition UUID %s", p.PartUUID) } default: return fmt.Errorf("Setting the partition UUID is not supported for %s", i.PartitionType) } } if p.PartType != "" { var partTypeLen int switch i.PartitionType { case "gpt": partTypeLen = 36 case "msdos": partTypeLen = 2 } if len(p.PartType) != partTypeLen { return fmt.Errorf("incorrect partition type for %s, should be %d characters", p.Name, partTypeLen) } } if p.Start == "" { return fmt.Errorf("Partition %s missing start", p.Name) } if p.End == "" { return fmt.Errorf("Partition %s missing end", p.Name) } if p.FS == "" { return fmt.Errorf("Partition %s missing fs type", p.Name) } if p.FSLabel == "" { p.FSLabel = p.Name } switch p.FS { case "fat", "fat12", "fat16", "fat32", "msdos", "vfat": maxLength = 11 case "ext2", "ext3", "ext4": maxLength = 16 case "btrfs": maxLength = 255 case "f2fs": maxLength = 512 case "hfs", "hfsplus": maxLength = 255 case "xfs": maxLength = 12 case "none": default: log.Printf("Warning: setting a fs label for %s is unsupported", p.FS) } if maxLength > 0 && len(p.FSLabel) > maxLength { return fmt.Errorf("fs label for %s '%s' is too long", p.Name, p.FSLabel) } } for idx, _ := range i.Mountpoints { m := &i.Mountpoints[idx] // check for duplicate mountpoints for j := idx + 1; j < len(i.Mountpoints); j++ { if i.Mountpoints[j].Mountpoint == m.Mountpoint { return fmt.Errorf("Mountpoint %s already exists", m.Mountpoint) } } for pidx, _ := range i.Partitions { p := &i.Partitions[pidx] if m.Partition == p.Name { m.part = p break } } if m.part == nil { return fmt.Errorf("Couldn't find partition for %s", m.Mountpoint) } if strings.ToLower(m.part.FS) == "none" { return fmt.Errorf("Cannot mount %s: filesystem not present", m.Mountpoint) } } // Calculate the size based on the unit (binary or decimal) // binary units are multiples of 1024 - KiB, MiB, GiB, TiB, PiB // decimal units are multiples of 1000 - KB, MB, GB, TB, PB var getSizeValueFunc func(size string) (int64, error) if regexp.MustCompile(`^[0-9.]+[kmgtp]ib+$`).MatchString(strings.ToLower(i.ImageSize)) { getSizeValueFunc = units.RAMInBytes } else { getSizeValueFunc = units.FromHumanSize } size, err := getSizeValueFunc(i.ImageSize) if err != nil { return fmt.Errorf("Failed to parse image size: %s", i.ImageSize) } i.size = size return nil } debos-1.1.5/actions/mmdebstrap_action.go000066400000000000000000000120771476454235000203150ustar00rootroot00000000000000/* mmdebstrap Action Construct the target rootfs with mmdebstrap tool. Please keep in mind -- file `/etc/resolv.conf` will be removed after execution. Most of the OS scripts used by `mmdebstrap` copy `resolv.conf` from the host, and this may lead to incorrect configuration when becoming part of the created rootfs. # Yaml syntax: - action: mmdebstrap mirrors: suite: "name" components: variant: "name" keyring-packages: keyring-files: include: dpkg-opts: apt-opts: Mandatory properties: - suite -- release code name or symbolic name (e.g. "stable") Optional properties: - mirrors -- list of URLs with Debian-compatible repository If no mirror is specified debos will use http://deb.debian.org/debian as default. - variant -- name of the bootstrap script variant to use - components -- list of components to use for packages selection. If no components are specified debos will use main as default. Example: components: [ main, contrib ] - keyring-packages -- list of keyrings for package validation. - keyring-files -- list keyring files for repository validation. - merged-usr -- use merged '/usr' filesystem, fallback to distribution default if not set. - include -- list of packages to install during bootstrap. - dpkg-opts -- list of arbitrary options to dpkg. - apt-opts -- list of arbitrary options to apt. */ package actions import ( "fmt" "os" "path" "strings" "github.com/go-debos/debos" "github.com/go-debos/fakemachine" ) type MmdebstrapAction struct { debos.BaseAction `yaml:",inline"` Suite string Mirrors []string Variant string KeyringPackages []string `yaml:"keyring-packages"` KeyringFiles []string `yaml:"keyring-files"` Components []string MergedUsr *bool `yaml:"merged-usr"` Include []string DpkgOpts []string `yaml:"dpkg-opts"` AptOpts []string `yaml:"apt-opts"` } func NewMmdebstrapAction() *MmdebstrapAction { d := MmdebstrapAction{} // Use main as default component d.Components = []string{"main"} return &d } func (d *MmdebstrapAction) listOptionFiles(context *debos.DebosContext) []string { files := []string{} if d.KeyringFiles != nil { for _, file := range d.KeyringFiles { file = debos.CleanPathAt(file, context.RecipeDir) files = append(files, file) } } return files } func (d *MmdebstrapAction) Verify(context *debos.DebosContext) error { if len(d.Suite) == 0 { return fmt.Errorf("suite property not specified") } files := d.listOptionFiles(context) // Check if all needed files exists for _, f := range files { if _, err := os.Stat(f); os.IsNotExist(err) { return err } } return nil } func (d *MmdebstrapAction) PreMachine(context *debos.DebosContext, m *fakemachine.Machine, args *[]string) error { mounts := d.listOptionFiles(context) // Mount configuration files outside of recipes directory for _, mount := range mounts { m.AddVolume(path.Dir(mount)) } return nil } func (d *MmdebstrapAction) Run(context *debos.DebosContext) error { cmdline := []string{"mmdebstrap"} if d.MergedUsr != nil { if *d.MergedUsr { cmdline = append(cmdline, "--hook-dir=/usr/share/mmdebstrap/hooks/merged-usr") } else { cmdline = append(cmdline, "--hook-dir=/usr/share/mmdebstrap/hooks/no-merged-usr") } } if d.KeyringFiles != nil { s := strings.Join(d.KeyringFiles, ",") cmdline = append(cmdline, fmt.Sprintf("--keyring=%s", s)) } if d.KeyringPackages != nil { s := strings.Join(d.KeyringPackages, ",") cmdline = append(cmdline, fmt.Sprintf("--include=%s", s)) } if d.Components != nil { s := strings.Join(d.Components, ",") cmdline = append(cmdline, fmt.Sprintf("--components=%s", s)) } cmdline = append(cmdline, fmt.Sprintf("--architectures=%s", context.Architecture)) if d.Variant != "" { cmdline = append(cmdline, fmt.Sprintf("--variant=%s", d.Variant)) } if d.Include != nil { s := strings.Join(d.Include, ",") cmdline = append(cmdline, fmt.Sprintf("--include=%s", s)) } if d.DpkgOpts != nil { for _, opt := range d.DpkgOpts { cmdline = append(cmdline, fmt.Sprintf("--dpkgopt=%s", opt)) } } if d.AptOpts != nil { for _, opt := range d.AptOpts { cmdline = append(cmdline, fmt.Sprintf("--aptopt=%s", opt)) } } cmdline = append(cmdline, d.Suite) cmdline = append(cmdline, context.Rootdir) if d.Mirrors != nil { cmdline = append(cmdline, d.Mirrors...) } /* Make sure files in /etc/apt/ exist inside the fakemachine otherwise mmdebstrap prints a warning about the path not existing. */ if fakemachine.InMachine() { if err := os.MkdirAll(path.Join("/etc/apt/apt.conf.d"), os.ModePerm); err != nil { return err } if err := os.MkdirAll(path.Join("/etc/apt/trusted.gpg.d"), os.ModePerm); err != nil { return err } } mmdebstrapErr := debos.Command{}.Run("Mmdebstrap", cmdline...) /* Cleanup resolv.conf after mmdebstrap */ resolvconf := path.Join(context.Rootdir, "/etc/resolv.conf") if _, err := os.Stat(resolvconf); !os.IsNotExist(err) { if err = os.Remove(resolvconf); err != nil { return err } } return mmdebstrapErr } debos-1.1.5/actions/ostree_commit_action.go000066400000000000000000000056321476454235000210270ustar00rootroot00000000000000/* OstreeCommit Action Create OSTree commit from rootfs. # Yaml syntax: - action: ostree-commit repository: repository name branch: branch name subject: commit message collection-id: org.apertis.example ref-binding: - branch1 - branch2 metadata: key: value vendor.key: somevalue Mandatory properties: - repository -- path to repository with OSTree structure; the same path is used by 'ostree' tool with '--repo' argument. This path is relative to 'artifact' directory. Please keep in mind -- you will need a root privileges for 'bare' repository type (https://ostree.readthedocs.io/en/latest/manual/repo/#repository-types-and-locations). - branch -- OSTree branch name that should be used for the commit. Optional properties: - subject -- one line message with commit description. - collection-id -- Collection ID ref binding (requires libostree 2018.6). - ref-binding -- enforce that the commit was retrieved from one of the branch names in this array. If 'collection-id' is set and 'ref-binding' is empty, will default to the branch name. - metadata -- key-value pairs of meta information to be added into commit. */ package actions import ( "fmt" "log" "os" "path" "github.com/go-debos/debos" "github.com/sjoerdsimons/ostree-go/pkg/otbuiltin" ) type OstreeCommitAction struct { debos.BaseAction `yaml:",inline"` Repository string Branch string Subject string Command string CollectionID string `yaml:"collection-id"` RefBinding []string `yaml:"ref-binding"` Metadata map[string]string } func emptyDir(dir string) { d, _ := os.Open(dir) defer d.Close() files, err := d.Readdirnames(-1) if err != nil { log.Fatal(err) } for _, f := range files { err := os.RemoveAll(path.Join(dir, f)) if err != nil { log.Fatalf("Failed to remove file: %v", err) } } } func (ot *OstreeCommitAction) Run(context *debos.DebosContext) error { repoPath := path.Join(context.Artifactdir, ot.Repository) emptyDir(path.Join(context.Rootdir, "dev")) repo, err := otbuiltin.OpenRepo(repoPath) if err != nil { return err } _, err = repo.PrepareTransaction() if err != nil { return err } opts := otbuiltin.NewCommitOptions() opts.Subject = ot.Subject for k, v := range ot.Metadata { str := fmt.Sprintf("%s=%s", k, v) opts.AddMetadataString = append(opts.AddMetadataString, str) } if ot.CollectionID != "" { opts.CollectionID = ot.CollectionID if len(ot.RefBinding) == 0 { // Add current branch if not explitely set via 'ref-binding' opts.RefBinding = append(opts.RefBinding, ot.Branch) } } // Add values from 'ref-binding' if any opts.RefBinding = append(opts.RefBinding, ot.RefBinding...) ret, err := repo.Commit(context.Rootdir, ot.Branch, opts) if err != nil { return err } else { log.Printf("Commit: %s\n", ret) } _, err = repo.CommitTransaction() if err != nil { return err } return nil } debos-1.1.5/actions/ostree_deploy_action.go000066400000000000000000000133061476454235000210300ustar00rootroot00000000000000/* OstreeDeploy Action Deploy the OSTree branch to the image. If any preparation has been done for rootfs, it can be overwritten during this step. Action 'image-partition' must be called prior to OSTree deploy. # Yaml syntax: - action: ostree-deploy repository: repository name remote_repository: URL branch: branch name os: os name tls-client-cert-path: path to client certificate tls-client-key-path: path to client certificate key setup-fstab: bool setup-kernel-cmdline: bool appendkernelcmdline: arguments collection-id: org.apertis.example Mandatory properties: - remote_repository -- URL to remote OSTree repository for pulling stateroot branch. Currently not implemented, please prepare local repository instead. - repository -- path to repository with OSTree structure. This path is relative to 'artifact' directory. - os -- os deployment name, as explained in: https://ostree.readthedocs.io/en/latest/manual/deployment/ - branch -- branch of the repository to use for populating the image. Optional properties: - setup-fstab -- create '/etc/fstab' file for image - setup-kernel-cmdline -- add the information from the 'image-partition' action to the configured commandline. - append-kernel-cmdline -- additional kernel command line arguments passed to kernel. - tls-client-cert-path -- path to client certificate to use for the remote repository - tls-client-key-path -- path to client certificate key to use for the remote repository - collection-id -- Collection ID ref binding (require libostree 2018.6). */ package actions import ( "fmt" "io" "os" "path" "strings" "github.com/go-debos/debos" ostree "github.com/sjoerdsimons/ostree-go/pkg/otbuiltin" ) type OstreeDeployAction struct { debos.BaseAction `yaml:",inline"` Repository string RemoteRepository string "remote_repository" Branch string Os string SetupFSTab bool `yaml:"setup-fstab"` SetupKernelCmdline bool `yaml:"setup-kernel-cmdline"` AppendKernelCmdline string `yaml:"append-kernel-cmdline"` TlsClientCertPath string `yaml:"tls-client-cert-path"` TlsClientKeyPath string `yaml:"tls-client-key-path"` CollectionID string `yaml:"collection-id"` } func NewOstreeDeployAction() *OstreeDeployAction { ot := &OstreeDeployAction{SetupFSTab: true, SetupKernelCmdline: true} ot.Description = "Deploying from ostree" return ot } func (ot *OstreeDeployAction) setupFSTab(deployment *ostree.Deployment, context *debos.DebosContext) error { deploymentDir := fmt.Sprintf("ostree/deploy/%s/deploy/%s.%d", deployment.Osname(), deployment.Csum(), deployment.Deployserial()) etcDir := path.Join(context.Rootdir, deploymentDir, "etc") err := os.Mkdir(etcDir, 0755) if err != nil && !os.IsExist(err) { return err } dst, err := os.OpenFile(path.Join(etcDir, "fstab"), os.O_WRONLY|os.O_CREATE, 0755) defer dst.Close() if err != nil { return err } _, err = io.Copy(dst, &context.ImageFSTab) return err } func (ot *OstreeDeployAction) Run(context *debos.DebosContext) error { // This is to handle cases there we didn't partition an image if len(context.ImageMntDir) != 0 { /* First deploy the current rootdir to the image so it can seed e.g. * bootloader configuration */ err := debos.Command{}.Run("Deploy to image", "cp", "-a", context.Rootdir+"/.", context.ImageMntDir) if err != nil { return fmt.Errorf("rootfs deploy failed: %v", err) } context.Rootdir = context.ImageMntDir context.Origins["filesystem"] = context.ImageMntDir } repoPath := "file://" + path.Join(context.Artifactdir, ot.Repository) sysroot := ostree.NewSysroot(context.Rootdir) err := sysroot.InitializeFS() if err != nil { return err } err = sysroot.InitOsname(ot.Os, nil) if err != nil { return err } /* HACK: Getting the repository form the sysroot gets ostree confused on * whether it should configure /etc/ostree or the repo configuration, so reopen by hand */ /* dstRepo, err := sysroot.Repo(nil) */ dstRepo, err := ostree.OpenRepo(path.Join(context.Rootdir, "ostree/repo")) if err != nil { return err } /* FIXME: add support for gpg signing commits so this is no longer needed */ opts := ostree.RemoteOptions{NoGpgVerify: true, TlsClientCertPath: ot.TlsClientCertPath, TlsClientKeyPath: ot.TlsClientKeyPath, CollectionId: ot.CollectionID, } err = dstRepo.RemoteAdd("origin", ot.RemoteRepository, opts, nil) if err != nil { return err } var options ostree.PullOptions options.OverrideRemoteName = "origin" options.Refs = []string{ot.Branch} err = dstRepo.PullWithOptions(repoPath, options, nil, nil) if err != nil { return err } /* Required by ostree to make sure a bunch of information was pulled in */ sysroot.Load(nil) revision, err := dstRepo.ResolveRev(ot.Branch, false) if err != nil { return err } var kargs []string if ot.SetupKernelCmdline { kargs = append(kargs, context.ImageKernelRoot) } if ot.AppendKernelCmdline != "" { s := strings.Split(ot.AppendKernelCmdline, " ") kargs = append(kargs, s...) } origin := sysroot.OriginNewFromRefspec("origin:" + ot.Branch) deployment, err := sysroot.DeployTree(ot.Os, revision, origin, nil, kargs, nil) if err != nil { return err } if ot.SetupFSTab { err = ot.setupFSTab(deployment, context) if err != nil { return err } } err = sysroot.SimpleWriteDeployment(ot.Os, deployment, nil, 0, nil) if err != nil { return err } /* libostree keeps some information, like repo lock file descriptor, in * thread specific variables. As GC can be run from another thread, it * may not been able to access this, preventing to free them correctly. * To prevent this, explicitly dereference libostree objects. */ dstRepo.Unref() sysroot.Unref() return nil } debos-1.1.5/actions/overlay_action.go000066400000000000000000000033551476454235000176370ustar00rootroot00000000000000/* Overlay Action Recursive copy of directory or file to target filesystem. # Yaml syntax: - action: overlay origin: name source: directory destination: directory Mandatory properties: - source -- relative path to the directory or file located in path referenced by `origin`. In case if this property is absent then pure path referenced by 'origin' will be used. Optional properties: - origin -- reference to named file or directory. - destination -- absolute path in the target rootfs where 'source' will be copied. All existing files will be overwritten. If destination isn't set '/' of the rootfs will be used. */ package actions import ( "fmt" "log" "path" "github.com/go-debos/debos" ) type OverlayAction struct { debos.BaseAction `yaml:",inline"` Origin string // origin of overlay, here the export from other action may be used Source string // external path there overlay is Destination string // path inside of rootfs } func (overlay *OverlayAction) Verify(context *debos.DebosContext) error { if _, err := debos.RestrictedPath(context.Rootdir, overlay.Destination); err != nil { return err } return nil } func (overlay *OverlayAction) Run(context *debos.DebosContext) error { origin := context.RecipeDir //Trying to get a filename from exports first if len(overlay.Origin) > 0 { var found bool if origin, found = context.Origin(overlay.Origin); !found { return fmt.Errorf("Origin not found '%s'", overlay.Origin) } } sourcedir := path.Join(origin, overlay.Source) destination, err := debos.RestrictedPath(context.Rootdir, overlay.Destination) if err != nil { return err } log.Printf("Overlaying %s on %s", sourcedir, destination) return debos.CopyTree(sourcedir, destination) } debos-1.1.5/actions/pack_action.go000066400000000000000000000040631476454235000170710ustar00rootroot00000000000000/* Pack Action Create tarball with filesystem. # Yaml syntax: - action: pack file: filename.ext compression: gz Mandatory properties: - file -- name of the output tarball, relative to the artifact directory. Optional properties: - compression -- compression type to use. Currently only 'gz', 'bzip2' and 'xz' compression types are supported. Use 'none' for uncompressed tarball. The 'gz' compression type will be used by default. */ package actions import ( "fmt" "log" "path" "strings" "os/exec" "github.com/go-debos/debos" ) var tarOpts = map[string]string{ "gz": "z", "bzip2": "j", "xz": "J", "none": "", } type PackAction struct { debos.BaseAction `yaml:",inline"` Compression string File string } func NewPackAction() *PackAction { d := PackAction{} // Use gz by default d.Compression = "gz" return &d } func (pf *PackAction) Verify(context *debos.DebosContext) error { _, compressionAvailable := tarOpts[pf.Compression] if compressionAvailable { return nil } possibleTypes := make([]string, 0, len(tarOpts)) for key := range tarOpts { possibleTypes = append(possibleTypes, key) } return fmt.Errorf("Option 'compression' has an unsupported type: `%s`. Possible types are %s.", pf.Compression, strings.Join(possibleTypes, ", ")) } func (pf *PackAction) Run(context *debos.DebosContext) error { usePigz := false if pf.Compression == "gz" { if _,err := exec.LookPath("pigz"); err == nil { usePigz = true } } outfile := path.Join(context.Artifactdir, pf.File) command := []string{"tar"} if usePigz == true { command = append(command, "cf") } else { command = append(command, "cf" + tarOpts[pf.Compression]) } command = append(command, outfile) command = append(command, "--xattrs") command = append(command, "--xattrs-include=*.*") if usePigz == true { command = append(command, "--use-compress-program=pigz") } command = append(command, "-C", context.Rootdir) command = append(command, ".") log.Printf("Compressing to %s\n", outfile) return debos.Command{}.Run("Packing", command...) } debos-1.1.5/actions/pacman_action.go000066400000000000000000000013071476454235000174100ustar00rootroot00000000000000/* Pacman Action Install packages and their dependencies to the target rootfs with 'pacman'. # Yaml syntax: - action: pacman packages: - package1 - package2 Mandatory properties: - packages -- list of packages to install */ package actions import ( "github.com/go-debos/debos" ) type PacmanAction struct { debos.BaseAction `yaml:",inline"` Packages []string } func (p *PacmanAction) Run(context *debos.DebosContext) error { pacmanOptions := []string{"pacman", "-Syu", "--noconfirm"} pacmanOptions = append(pacmanOptions, p.Packages...) c := debos.NewChrootCommandForContext(*context) if err := c.Run("pacman", pacmanOptions...); err != nil { return err } return nil } debos-1.1.5/actions/pacstrap_action.go000066400000000000000000000062321476454235000177700ustar00rootroot00000000000000/* Pacstrap Action Construct the target rootfs with pacstrap tool. # Yaml syntax: - action: pacstrap config: mirror: Mandatory properties: - config -- the pacman.conf file which will be used through the process - mirror -- the mirrorlist file which will be used through the process */ package actions import ( "fmt" "io/ioutil" "os" "path" "github.com/go-debos/debos" "github.com/go-debos/fakemachine" ) type PacstrapAction struct { debos.BaseAction `yaml:",inline"` Config string `yaml:"config"` Mirror string `yaml:"mirror"` } func (d *PacstrapAction) listOptionFiles(context *debos.DebosContext) ([]string, error) { files := []string{} if d.Config == "" { return nil, fmt.Errorf("No config file set") } d.Config = debos.CleanPathAt(d.Config, context.RecipeDir) files = append(files, d.Config) if d.Mirror == "" { return nil, fmt.Errorf("No mirror file set") } d.Mirror = debos.CleanPathAt(d.Mirror, context.RecipeDir) files = append(files, d.Mirror) return files, nil } func (d *PacstrapAction) Verify(context *debos.DebosContext) error { files, err := d.listOptionFiles(context) if err != nil { return err } // Check if all needed files exists for _, f := range files { if _, err := os.Stat(f); os.IsNotExist(err) { return err } } return nil } func (d *PacstrapAction) PreNoMachine(context *debos.DebosContext) error { return fmt.Errorf("action requires fakemachine") } func (d *PacstrapAction) PreMachine(context *debos.DebosContext, m *fakemachine.Machine, args *[]string) error { mounts, err := d.listOptionFiles(context) if err != nil { return err } // Mount configuration files outside of recipes directory for _, mount := range mounts { m.AddVolume(path.Dir(mount)) } return nil } func (d *PacstrapAction) Run(context *debos.DebosContext) error { files := map[string]string{ "/etc/pacman.conf": d.Config, "/etc/pacman.d/mirrorlist": d.Mirror, } // Copy the config/mirrorlist files for dest, src := range files { if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { return err } read, err := ioutil.ReadFile(src) if err != nil { return err } if err = ioutil.WriteFile(dest, read, 0644); err != nil { return err } } // Setup the local keychain, within the fakemachine instance, since we // don't have access to the host one. // Even if we did, blindly copying it might not be a good idea. cmdline := []string{"pacman-key", "--init"} if err := (debos.Command{}.Run("pacman-key", cmdline...)); err != nil { return fmt.Errorf("Couldn't init pacman keyring: %v", err) } // When there's no explicit keyring suite we populate all available cmdline = []string{"pacman-key", "--populate"} if err := (debos.Command{}.Run("pacman-key", cmdline...)); err != nil { return fmt.Errorf("Couldn't populate pacman keyring: %v", err) } // Run pacstrap cmdline = []string{"pacstrap", context.Rootdir} if err := (debos.Command{}.Run("pacstrap", cmdline...)); err != nil { log := path.Join(context.Rootdir, "var/log/pacman.log") _ = debos.Command{}.Run("pacstrap.log", "cat", log) return err } return nil } debos-1.1.5/actions/raw_action.go000066400000000000000000000071641476454235000167510ustar00rootroot00000000000000/* Raw Action Directly write a file to the output image at a given offset. This is typically useful for bootloaders. # Yaml syntax: - action: raw origin: name source: filename offset: bytes Mandatory properties: - origin -- reference to named file or directory. - source -- the name of file located in 'origin' to be written into the output image. Optional properties: - offset -- offset in bytes or in sector number e.g 256s. The sector size is either the recipe header 'sectorsize' or the default 512 sector size. Internal templating mechanism will append the 's' suffix, for instance: '{{ sector 256 }}' will be converted to '256s'. Deprecated, use '256s' instead of '{{ sector 256 }}'. The default value is zero. - partition -- named partition to write to */ package actions import ( "errors" "fmt" "io/ioutil" "log" "os" "path" "strconv" "strings" "github.com/go-debos/debos" ) type RawAction struct { debos.BaseAction `yaml:",inline"` Origin string // there the source comes from Offset string Source string // relative path inside of origin Path string // deprecated option (for backward compatibility) Partition string // Partition to write otherwise full image } func (raw *RawAction) checkDeprecatedSyntax() error { // New syntax is based on 'origin' and 'source' // Check if we do not mix new and old syntax // TODO: remove deprecated syntax verification if len(raw.Path) > 0 { // Deprecated syntax based on 'source' and 'path' log.Printf("Usage of 'source' and 'path' properties is deprecated.") log.Printf("Please use 'origin' and 'source' properties.") if len(raw.Origin) > 0 { return errors.New("Can't mix 'origin' and 'path'(deprecated option) properties") } if len(raw.Source) == 0 { return errors.New("'source' and 'path' properties can't be empty") } // Switch to new syntax raw.Origin = raw.Source raw.Source = raw.Path raw.Path = "" } return nil } func (raw *RawAction) Verify(context *debos.DebosContext) error { if err := raw.checkDeprecatedSyntax(); err != nil { return err } if len(raw.Origin) == 0 || len(raw.Source) == 0 { return errors.New("'origin' and 'source' properties can't be empty") } return nil } func (raw *RawAction) Run(context *debos.DebosContext) error { origin, found := context.Origin(raw.Origin) if !found { return fmt.Errorf("Origin `%s` doesn't exist\n", raw.Origin) } s := path.Join(origin, raw.Source) content, err := ioutil.ReadFile(s) if err != nil { return fmt.Errorf("Failed to read %s", s) } var devicePath string if raw.Partition != "" { for _, p := range context.ImagePartitions { if p.Name == raw.Partition { devicePath = p.DevicePath break } } if devicePath == "" { return fmt.Errorf("Failed to find partition named %s", raw.Partition) } } else { devicePath = context.Image } target, err := os.OpenFile(devicePath, os.O_WRONLY, 0) if err != nil { return fmt.Errorf("Failed to open %s: %v", devicePath, err) } defer target.Close() var offset int64 = 0 if len(raw.Offset) > 0 { sector := false offs := raw.Offset if strings.HasSuffix(offs, "s") { sector = true offs = strings.TrimSuffix(offs, "s") } offset, err = strconv.ParseInt(offs, 0, 64) if err != nil { return fmt.Errorf("Couldn't parse offset %v", err) } if sector { offset = offset * int64(context.SectorSize) } } bytes, err := target.WriteAt(content, offset) if bytes != len(content) { return fmt.Errorf("Couldn't write complete data %v", err) } err = target.Sync() if err != nil { return fmt.Errorf("Couldn't sync content %v", err) } return nil } debos-1.1.5/actions/recipe.go000066400000000000000000000203561476454235000160700ustar00rootroot00000000000000/* Package 'recipe' implements actions mapping to YAML recipe. Recipe syntax Recipe is a YAML file which is pre-processed though Golang text templating engine (https://golang.org/pkg/text/template) Recipe is composed of 2 parts: - header - actions Comments are allowed and should be prefixed with '#' symbol. # Declare variable 'Var' {{- $Var := "Value" -}} # Header architecture: arm64 sectorsize: 512 # Actions are executed in listed order actions: - action: ActionName1 property1: true - action: ActionName2 # Use value of variable 'Var' defined above property2: {{$Var}} The following custom template functions are available: - sector: Returns the argument with 's' suffix for raw action` (Deprecated) - escape: Shell escape the argument `{{ escape $var }}` - uuid5: Generates fixed UUID value `{{ uuid5 $random-uuid $text }}` - functions from [slim-sprig](https://go-task.github.io/slim-sprig/) Mandatory properties for recipe: - architecture -- target architecture - actions -- at least one action should be listed Optional properties for recipe: - sectorsize: Overrides the default 512 bytes sectorsize, mandatory for device using 4k block size such as UFS or NVMe storage. Setting the sectorsize to an other value than '512' is not supported by the 'uml' fakemachine backend. Supported actions - apt -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Apt_Action - debootstrap -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Debootstrap_Action - mmdebstrap -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Mmdebstrap_Action - download -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Download_Action - filesystem-deploy -- https://godoc.org/github.com/go-debos/debos/actions#hdr-FilesystemDeploy_Action - image-partition -- https://godoc.org/github.com/go-debos/debos/actions#hdr-ImagePartition_Action - ostree-commit -- https://godoc.org/github.com/go-debos/debos/actions#hdr-OstreeCommit_Action - ostree-deploy -- https://godoc.org/github.com/go-debos/debos/actions#hdr-OstreeDeploy_Action - overlay -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Overlay_Action - pack -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Pack_Action - pacman -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Pacman_Action - pacstrap -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Pacstrap_Action - raw -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Raw_Action - recipe -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Recipe_Action - run -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Run_Action - unpack -- https://godoc.org/github.com/go-debos/debos/actions#hdr-Unpack_Action */ package actions import ( "bytes" "fmt" "al.essio.dev/pkg/shellescape" "github.com/go-debos/debos" "gopkg.in/yaml.v2" "github.com/go-task/slim-sprig/v3" "path" "text/template" "log" "strconv" "strings" "reflect" "github.com/google/uuid" ) /* the YamlAction just embed the Action interface and implements the * UnmarshalYAML function so it can select the concrete implementer of a * specific action at unmarshaling time */ type YamlAction struct { debos.Action } type Recipe struct { Architecture string SectorSize int Actions []YamlAction } func (y *YamlAction) UnmarshalYAML(unmarshal func(interface{}) error) error { var aux debos.BaseAction err := unmarshal(&aux) if err != nil { return err } switch aux.Action { case "debootstrap": y.Action = NewDebootstrapAction() case "mmdebstrap": y.Action = NewMmdebstrapAction() case "pacstrap": y.Action = &PacstrapAction{} case "pack": y.Action = NewPackAction() case "unpack": y.Action = &UnpackAction{} case "run": y.Action = &RunAction{} case "apt": y.Action = NewAptAction() case "pacman": y.Action = &PacmanAction{} case "ostree-commit": y.Action = &OstreeCommitAction{} case "ostree-deploy": y.Action = NewOstreeDeployAction() case "overlay": y.Action = &OverlayAction{} case "image-partition": y.Action = &ImagePartitionAction{} case "filesystem-deploy": y.Action = NewFilesystemDeployAction() case "raw": y.Action = &RawAction{} case "download": y.Action = &DownloadAction{} case "recipe": y.Action = &RecipeAction{} default: return fmt.Errorf("Unknown action: %v", aux.Action) } err = unmarshal(y.Action) if err != nil { return err } return nil } // Deprecated we don't know the sector size when processing the template, // the sector size can be defined using yaml // definition. Append a 's' suffix and let the raw // action to calculate the correct value. func sector(s int) string { return strconv.Itoa(s) + "s" } func escape(s string) string { return shellescape.Quote(s) } func uuid5(namespace string, data string) string { id := uuid.NewSHA1(uuid.MustParse(namespace), []byte(data)) return id.String() } func DumpActionStruct(iface interface{}) string { var a []string s := reflect.ValueOf(iface) t := reflect.TypeOf(iface) for i := 0; i < t.NumField(); i++ { f := s.Field(i) // Dump only exported entries if f.CanInterface() { str := fmt.Sprintf("%s: %v", s.Type().Field(i).Name, f.Interface()) a = append(a, str) } } return strings.Join(a, ", ") } const tabs = 2 func DumpActions(iface interface{}, depth int) { tab := strings.Repeat(" ", depth * tabs) entries := reflect.ValueOf(iface) for i := 0; i < entries.NumField(); i++ { if entries.Type().Field(i).Name == "Actions" { log.Printf("%s %s:\n", tab, entries.Type().Field(i).Name) actions := reflect.ValueOf(entries.Field(i).Interface()) for j := 0; j < actions.Len(); j++ { yaml := reflect.ValueOf(actions.Index(j).Interface()) DumpActionFields(yaml.Field(0).Interface(), depth + 1) } } else { log.Printf("%s %s: %v\n", tab, entries.Type().Field(i).Name, entries.Field(i).Interface()) } } } func DumpActionFields(iface interface{}, depth int) { tab := strings.Repeat(" ", depth * tabs) entries := reflect.ValueOf(iface).Elem() for i := 0; i < entries.NumField(); i++ { f := entries.Field(i) // Dump only exported entries if f.CanInterface() { switch f.Kind() { case reflect.Struct: if entries.Type().Field(i).Type.String() == "debos.BaseAction" { // BaseAction is the only struct embbed in Action ActionFields // dump it at the same level log.Printf("%s- %s", tab, DumpActionStruct(f.Interface())) } case reflect.Slice: s := reflect.ValueOf(f.Interface()) if s.Len() > 0 && s.Index(0).Kind() == reflect.Struct { log.Printf("%s %s:\n", tab, entries.Type().Field(i).Name) for j := 0; j < s.Len(); j++ { if s.Index(j).Kind() == reflect.Struct { log.Printf("%s { %s }", tab, DumpActionStruct(s.Index(j).Interface())) } } } else { log.Printf("%s %s: %s\n", tab, entries.Type().Field(i).Name, f) } default: log.Printf("%s %s: %v\n", tab, entries.Type().Field(i).Name, f.Interface()) } } } } /* Parse method reads YAML recipe file and map all steps to appropriate actions. - file -- is the path to configuration file - templateVars -- optional argument allowing to use custom map for templating engine. Multiple template maps have no effect; only first map will be used. */ func (r *Recipe) Parse(file string, printRecipe bool, dump bool, templateVars ...map[string]string) error { t := template.New(path.Base(file)) funcs := template.FuncMap{ "sector": sector, "escape": escape, "uuid5": uuid5, } t.Funcs(funcs) /* Add slim-sprig functions to template language */ t.Funcs(sprig.FuncMap()) if _, err := t.ParseFiles(file); err != nil { return err } if len(templateVars) == 0 { templateVars = append(templateVars, make(map[string]string)) } data := new(bytes.Buffer) if err := t.Execute(data, templateVars[0]); err != nil { return err } if printRecipe || dump { log.Printf("Recipe '%s':", file) } if printRecipe { log.Printf("%s", data) } if err := yaml.Unmarshal(data.Bytes(), &r); err != nil { return err } if dump { DumpActions(reflect.ValueOf(*r).Interface(), 0) } if len(r.Architecture) == 0 { return fmt.Errorf("Recipe file must have 'architecture' property") } if len(r.Actions) == 0 { return fmt.Errorf("Recipe file must have at least one action") } if r.SectorSize == 0 { r.SectorSize = 512 } return nil } debos-1.1.5/actions/recipe_action.go000066400000000000000000000070611476454235000174230ustar00rootroot00000000000000/* Recipe Action This action includes the recipe at the given path, and can optionally override or set template variables. To ensure compatibility, both the parent recipe and all included recipes have to be for the same architecture. For convenience the parent architecture is passed in the "architecture" template variable. Limitations of combined recipes are equivalent to limitations within a single recipe (e.g. there can only be one image partition action). # Yaml syntax: - action: recipe recipe: path to recipe variables: key: value Mandatory properties: - recipe -- includes the recipe actions at the given path. Optional properties: - variables -- overrides or adds new template variables. */ package actions import ( "errors" "fmt" "log" "os" "path/filepath" "github.com/go-debos/debos" "github.com/go-debos/fakemachine" ) type RecipeAction struct { debos.BaseAction `yaml:",inline"` Recipe string Variables map[string]string Actions Recipe `yaml:"-"` templateVars map[string]string context debos.DebosContext } func (recipe *RecipeAction) Verify(context *debos.DebosContext) error { if len(recipe.Recipe) == 0 { return errors.New("'recipe' property can't be empty") } recipe.context = *context file := recipe.Recipe if !filepath.IsAbs(file) { file = filepath.Clean(context.RecipeDir + "/" + recipe.Recipe) } recipe.context.RecipeDir = filepath.Dir(file) if _, err := os.Stat(file); os.IsNotExist(err) { return err } // Initialise template vars recipe.templateVars = make(map[string]string) recipe.templateVars["architecture"] = context.Architecture // Add Variables to template vars for k, v := range recipe.Variables { recipe.templateVars[k] = v } if err := recipe.Actions.Parse(file, context.PrintRecipe, context.Verbose, recipe.templateVars); err != nil { return err } if recipe.context.Architecture != recipe.Actions.Architecture { return fmt.Errorf("Expect architecture '%s' but got '%s'", context.Architecture, recipe.Actions.Architecture) } for _, a := range recipe.Actions.Actions { if err := a.Verify(&recipe.context); err != nil { return err } } return nil } func (recipe *RecipeAction) PreMachine(context *debos.DebosContext, m *fakemachine.Machine, args *[]string) error { // TODO: check args? m.AddVolume(recipe.context.RecipeDir) for _, a := range recipe.Actions.Actions { if err := a.PreMachine(&recipe.context, m, args); err != nil { return err } } return nil } func (recipe *RecipeAction) PreNoMachine(context *debos.DebosContext) error { for _, a := range recipe.Actions.Actions { if err := a.PreNoMachine(&recipe.context); err != nil { return err } } return nil } func (recipe *RecipeAction) Run(context *debos.DebosContext) error { for _, a := range recipe.Actions.Actions { log.Printf("==== %s ====\n", a) if err := a.Run(&recipe.context); err != nil { return err } } return nil } func (recipe *RecipeAction) Cleanup(context *debos.DebosContext) error { for _, a := range recipe.Actions.Actions { if err := a.Cleanup(&recipe.context); err != nil { return err } } return nil } func (recipe *RecipeAction) PostMachine(context *debos.DebosContext) error { for _, a := range recipe.Actions.Actions { if err := a.PostMachine(&recipe.context); err != nil { return err } } return nil } func (recipe *RecipeAction) PostMachineCleanup(context *debos.DebosContext) error { for _, a := range recipe.Actions.Actions { if err := a.PostMachineCleanup(&recipe.context); err != nil { return err } } return nil } debos-1.1.5/actions/recipe_test.go000066400000000000000000000155431476454235000171310ustar00rootroot00000000000000package actions_test import ( "github.com/go-debos/debos" "github.com/go-debos/debos/actions" "github.com/stretchr/testify/assert" "io/ioutil" "os" "testing" "strings" ) type testRecipe struct { recipe string err string } // Test if incorrect file has been passed func TestParse_incorrect_file(t *testing.T) { var err error var tests = []struct { filename string err string }{ { "non-existing.yaml", "open non-existing.yaml: no such file or directory", }, { "/proc", "read /proc: is a directory", }, } for _, test := range tests { r := actions.Recipe{} err = r.Parse(test.filename, false, false) assert.EqualError(t, err, test.err) } } // Check common recipe syntax func TestParse_syntax(t *testing.T) { var tests = []testRecipe{ // Test if all actions are supported {` architecture: arm64 actions: - action: apt - action: debootstrap - action: download - action: filesystem-deploy - action: image-partition - action: ostree-commit - action: ostree-deploy - action: overlay - action: pack - action: raw - action: run - action: unpack - action: recipe `, "", // Do not expect failure }, // Test of unknown action in list {` architecture: arm64 actions: - action: test_unknown_action `, "Unknown action: test_unknown_action", }, // Test if 'architecture' property absence {` actions: - action: raw `, "Recipe file must have 'architecture' property", }, // Test if no actions listed {` architecture: arm64 `, "Recipe file must have at least one action", }, // Test of wrong syntax in Yaml {`wrong`, "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `wrong` into actions.Recipe", }, // Test if no actions listed {` architecture: arm64 `, "Recipe file must have at least one action", }, } for _, test := range tests { runTest(t, test) } } // Check template engine func TestParse_template(t *testing.T) { var test = testRecipe{ // Test template variables ` {{ $action:= or .action "download" }} architecture: arm64 actions: - action: {{ $action }} `, "", // Do not expect failure } { // Test of embedded template r := runTest(t, test) assert.Equalf(t, r.Actions[0].String(), "download", "Fail to use embedded variable definition from recipe:%s\n", test.recipe) } { // Test of user-defined template variable var templateVars = map[string]string{ "action": "pack", } r := runTest(t, test, templateVars) assert.Equalf(t, r.Actions[0].String(), "pack", "Fail to redefine variable with user-defined map:%s\n", test.recipe) } } // Test of 'sector' function embedded to recipe package func TestParse_sector(t *testing.T) { var testSector = testRecipe{ // Fail with unknown action ` architecture: arm64 actions: - action: {{ sector 42 }} `, "Unknown action: 42s", } runTest(t, testSector) } func runTest(t *testing.T, test testRecipe, templateVars ...map[string]string) actions.Recipe { file, err := ioutil.TempFile(os.TempDir(), "recipe") assert.Empty(t, err) defer os.Remove(file.Name()) file.WriteString(test.recipe) file.Close() r := actions.Recipe{} if len(templateVars) == 0 { err = r.Parse(file.Name(), false, false) } else { err = r.Parse(file.Name(), false, false, templateVars[0]) } failed := false if len(test.err) > 0 { // Expected error? failed = !assert.EqualError(t, err, test.err) } else { // Unexpected error failed = !assert.Empty(t, err) } if failed { t.Logf("Failed recipe:%s\n", test.recipe) } return r } type subRecipe struct { name string recipe string } type testSubRecipe struct { recipe string subrecipe subRecipe err string parseErr string } func TestSubRecipe(t *testing.T) { // Embedded recipes var recipeAmd64 = subRecipe { "amd64.yaml", ` architecture: amd64 actions: - action: run command: ok.sh `, } var recipeInheritedArch = subRecipe { "inherited.yaml", ` {{- $architecture := or .architecture "armhf" }} architecture: {{ $architecture }} actions: - action: run command: ok.sh `, } var recipeArmhf = subRecipe { "armhf.yaml", ` architecture: armhf actions: - action: run command: ok.sh `, } // test recipes var tests = []testSubRecipe { { // Test recipe same architecture OK ` architecture: amd64 actions: - action: recipe recipe: amd64.yaml `, recipeAmd64, "", // Do not expect failure "", // Do not expect parse failure }, { // Test recipe with inherited architecture OK ` architecture: amd64 actions: - action: recipe recipe: inherited.yaml `, recipeInheritedArch, "", // Do not expect failure "", // Do not expect parse failure }, { // Fail with unknown recipe ` architecture: amd64 actions: - action: recipe recipe: unknown_recipe.yaml `, recipeAmd64, "stat /tmp/unknown_recipe.yaml: no such file or directory", "", // Do not expect parse failure }, { // Fail with different architecture recipe ` architecture: amd64 actions: - action: recipe recipe: armhf.yaml `, recipeArmhf, "Expect architecture 'amd64' but got 'armhf'", "", // Do not expect parse failure }, { // Fail with type mismatch during parse ` architecture: armhf actions: - action: recipe recipe: armhf.yaml variables: - foo `, recipeArmhf, "", "yaml: unmarshal errors:\n line 8: cannot unmarshal !!seq into map[string]string", }, } for _, test := range tests { runTestWithSubRecipes(t, test) } } func runTestWithSubRecipes(t *testing.T, test testSubRecipe, templateVars ...map[string]string) actions.Recipe { context := debos.DebosContext { &debos.CommonContext{}, "", "", 512 } dir, err := ioutil.TempDir("", "go-debos") assert.Empty(t, err) defer os.RemoveAll(dir) file, err := ioutil.TempFile(dir, "recipe") assert.Empty(t, err) defer os.Remove(file.Name()) file.WriteString(test.recipe) file.Close() file_subrecipe, err := os.Create(dir + "/" + test.subrecipe.name) assert.Empty(t, err) defer os.Remove(file_subrecipe.Name()) file_subrecipe.WriteString(test.subrecipe.recipe) file_subrecipe.Close() failed := false r := actions.Recipe{} if len(templateVars) == 0 { err = r.Parse(file.Name(), false, false) } else { err = r.Parse(file.Name(), false, false, templateVars[0]) } if len(test.parseErr) > 0 { // Expected parse error? failed = !assert.EqualError(t, err, test.parseErr) } else { // Unexpected error failed = !assert.Empty(t, err) } if err == nil { context.Architecture = r.Architecture context.SectorSize = r.SectorSize context.RecipeDir = dir for _, a := range r.Actions { if err = a.Verify(&context); err != nil { break } } if len(test.err) > 0 { // Expected error? failed = !assert.EqualError(t, err, strings.Replace(test.err, "/tmp", dir, 1)) } else { // Unexpected error failed = !assert.Empty(t, err) } } if failed { t.Logf("Failed recipe:%s\n", test.recipe) } return r } debos-1.1.5/actions/run_action.go000066400000000000000000000107721476454235000167630ustar00rootroot00000000000000/* Run Action Allows to run any available command or script in the filesystem or in build process host environment: specifically inside the fakemachine created by Debos. # Yaml syntax: - action: run chroot: bool postprocess: bool script: script name command: command line label: string Properties 'command' and 'script' are mutually exclusive. - command -- command with arguments; the command expected to be accessible in host's or chrooted environment -- depending on 'chroot' property. - script -- script with arguments; script must be located in recipe directory. Optional properties: - chroot -- run script or command in target filesystem if set to true. Otherwise the command or script is executed within the build process, with access to the filesystem ($ROOTDIR), the image if any ($IMAGE), the recipe directory ($RECIPEDIR), the artifact directory ($ARTIFACTDIR) and the directory where the image is mounted ($IMAGEMNTDIR). In both cases it is run with root privileges. If unset, chroot is set to false and the command or script is run in the host environment. - label -- if non-empty, this string is used to label output. If empty, a label is derived from the command or script. - postprocess -- if set script or command is executed after all other commands and has access to the recipe directory ($RECIPEDIR) and the artifact directory ($ARTIFACTDIR). The working directory will be set to the artifact directory. Properties 'chroot' and 'postprocess' are mutually exclusive. */ package actions import ( "errors" "github.com/go-debos/fakemachine" "path" "strings" "github.com/go-debos/debos" ) const ( maxLabelLength = 40 ) type RunAction struct { debos.BaseAction `yaml:",inline"` Chroot bool PostProcess bool Script string Command string Label string } func (run *RunAction) Verify(context *debos.DebosContext) error { if run.PostProcess && run.Chroot { return errors.New("Cannot run postprocessing in the chroot") } if run.Script == "" && run.Command == "" { return errors.New("Script and Command both cannot be empty") } return nil } func (run *RunAction) PreMachine(context *debos.DebosContext, m *fakemachine.Machine, args *[]string) error { if run.Script == "" { return nil } run.Script = debos.CleanPathAt(run.Script, context.RecipeDir) // Expect we have no blank spaces in path scriptpath := strings.Split(run.Script, " ") if !run.PostProcess { m.AddVolume(path.Dir(scriptpath[0])) } return nil } func (run *RunAction) doRun(context debos.DebosContext) error { var cmdline []string var label string var cmd debos.Command if run.Chroot { cmd = debos.NewChrootCommandForContext(context) } else { cmd = debos.Command{} } if run.Script != "" { script := strings.SplitN(run.Script, " ", 2) script[0] = debos.CleanPathAt(script[0], context.RecipeDir) if run.Chroot { scriptpath := path.Dir(script[0]) cmd.AddBindMount(scriptpath, "/tmp/script") script[0] = strings.Replace(script[0], scriptpath, "/tmp/script", 1) } cmdline = []string{strings.Join(script, " ")} label = path.Base(run.Script) } else { cmdline = []string{run.Command} // Remove leading and trailing spaces and — importantly — newlines // before splitting, so that single-line scripts split into an array // of a single string only. commands := strings.Split(strings.TrimSpace(run.Command), "\n") label = commands[0] // Make it clear a long or a multi-line command is being run if len(label) > maxLabelLength { label = label[:maxLabelLength] label = strings.TrimSpace(label) label += "..." } else if len(commands) > 1 { label += "..." } } if run.Label != "" { label = run.Label } // Command/script with options passed as single string cmdline = append([]string{"sh", "-c"}, cmdline...) if !run.Chroot { cmd.AddEnvKey("RECIPEDIR", context.RecipeDir) cmd.AddEnvKey("ARTIFACTDIR", context.Artifactdir) } if !run.PostProcess { if !run.Chroot { cmd.AddEnvKey("ROOTDIR", context.Rootdir) if context.ImageMntDir != "" { cmd.AddEnvKey("IMAGEMNTDIR", context.ImageMntDir) } } if context.Image != "" { cmd.AddEnvKey("IMAGE", context.Image) } } return cmd.Run(label, cmdline...) } func (run *RunAction) Run(context *debos.DebosContext) error { if run.PostProcess { /* This runs in postprocessing instead */ return nil } return run.doRun(*context) } func (run *RunAction) PostMachine(context *debos.DebosContext) error { if !run.PostProcess { return nil } return run.doRun(*context) } debos-1.1.5/actions/unpack_action.go000066400000000000000000000046701476454235000174400ustar00rootroot00000000000000/* Unpack Action Unpack files from archive to the filesystem. Useful for creating target rootfs from saved tarball with prepared file structure. Only (compressed) tar archives are supported currently. # Yaml syntax: - action: unpack origin: name file: file.ext compression: gz Mandatory properties: - file -- archive's file name. It is possible to skip this property if 'origin' referenced to downloaded file. One of the mandatory properties may be omitted with limitations mentioned above. It is expected to find archive with name pointed in `file` property inside of `origin` in case if both properties are used. Optional properties: - origin -- reference to a named file or directory. The default value is 'artifacts' directory in case if this property is omitted. - compression -- optional hint for unpack allowing to use proper compression method. Currently only 'gz', bzip2' and 'xz' compression types are supported. If not provided an attempt to autodetect the compression type will be done. */ package actions import ( "fmt" "github.com/go-debos/debos" ) type UnpackAction struct { debos.BaseAction `yaml:",inline"` Compression string Origin string File string } func (pf *UnpackAction) Verify(context *debos.DebosContext) error { if len(pf.Origin) == 0 && len(pf.File) == 0 { return fmt.Errorf("Filename can't be empty. Please add 'file' and/or 'origin' property.") } archive, err := debos.NewArchive(pf.File) if err != nil { return err } if len(pf.Compression) > 0 { if archive.Type() != debos.Tar { return fmt.Errorf("Option 'compression' is supported for Tar archives only.") } if err := archive.AddOption("tarcompression", pf.Compression); err != nil { return fmt.Errorf("'%s': %s", pf.File, err) } } return nil } func (pf *UnpackAction) Run(context *debos.DebosContext) error { var origin string if len(pf.Origin) > 0 { var found bool //Trying to get a filename from origins first origin, found = context.Origin(pf.Origin) if !found { return fmt.Errorf("Origin not found '%s'", pf.Origin) } } else { origin = context.Artifactdir } infile, err := debos.RestrictedPath(origin, pf.File) if err != nil { return err } archive, err := debos.NewArchive(infile) if err != nil { return err } if len(pf.Compression) > 0 { if err := archive.AddOption("tarcompression", pf.Compression); err != nil { return err } } return archive.Unpack(context.Rootdir) } debos-1.1.5/archiver.go000066400000000000000000000125031476454235000147570ustar00rootroot00000000000000package debos import ( "fmt" "os" "path/filepath" "strings" "os/exec" ) type ArchiveType int // Supported types const ( _ ArchiveType = iota // Guess archive type from file extension Tar Zip Deb ) type ArchiveBase struct { file string // Path to archive file atype ArchiveType options map[interface{}]interface{} // Archiver-depending map with additional hints } type ArchiveTar struct { ArchiveBase } type ArchiveZip struct { ArchiveBase } type ArchiveDeb struct { ArchiveBase } type Unpacker interface { Unpack(destination string) error RelaxedUnpack(destination string) error } type Archiver interface { Type() ArchiveType AddOption(key, value interface{}) error Unpacker } type Archive struct { Archiver } // Unpack archive as is func (arc *ArchiveBase) Unpack(destination string) error { return fmt.Errorf("Unpack is not supported for '%s'", arc.file) } /* RelaxedUnpack unpack archive in relaxed mode allowing to ignore or avoid minor issues with unpacker tool or framework. */ func (arc *ArchiveBase) RelaxedUnpack(destination string) error { return arc.Unpack(destination) } func (arc *ArchiveBase) AddOption(key, value interface{}) error { if arc.options == nil { arc.options = make(map[interface{}]interface{}) } arc.options[key] = value return nil } func (arc *ArchiveBase) Type() ArchiveType { return arc.atype } // Helper function for unpacking with external tool func unpack(command []string, destination string) error { if err := os.MkdirAll(destination, 0755); err != nil { return err } return Command{}.Run("unpack", command...) } // Helper function for checking allowed compression types // Returns empty string for unknown func tarOptions(compression string) string { unpackTarOpts := map[string]string{ "gz": "-z", "bzip2": "-j", "xz": "-J", } // Trying to guess all other supported compression types return unpackTarOpts[compression] } func (tar *ArchiveTar) Unpack(destination string) error { command := []string{"tar"} usePigz := false if compression, ok := tar.options["tarcompression"]; ok && compression == "gz" { if _, err := exec.LookPath("pigz"); err == nil { usePigz = true } } if options, ok := tar.options["taroptions"].([]string); ok { for _, option := range options { command = append(command, option) } } command = append(command, "-C", destination) command = append(command, "-x") command = append(command, "--xattrs") command = append(command, "--xattrs-include=*.*") if compression, ok := tar.options["tarcompression"]; ok { if unpackTarOpt := tarOptions(compression.(string)); len(unpackTarOpt) > 0 { if usePigz == true { command = append(command, "--use-compress-program=pigz") } else { command = append(command, unpackTarOpt) } } } command = append(command, "-f", tar.file) return unpack(command, destination) } func (tar *ArchiveTar) RelaxedUnpack(destination string) error { taroptions := []string{"--no-same-owner", "--no-same-permissions"} options, ok := tar.options["taroptions"].([]string) defer func() { tar.options["taroptions"] = options }() if ok { for _, option := range options { taroptions = append(taroptions, option) } } tar.options["taroptions"] = taroptions return tar.Unpack(destination) } func (tar *ArchiveTar) AddOption(key, value interface{}) error { switch key { case "taroptions": // expect a slice options, ok := value.([]string) if !ok { return fmt.Errorf("Wrong type for value") } tar.options["taroptions"] = options case "tarcompression": compression, ok := value.(string) if !ok { return fmt.Errorf("Wrong type for value") } option := tarOptions(compression) if len(option) == 0 { return fmt.Errorf("Compression '%s' is not supported", compression) } tar.options["tarcompression"] = compression default: return fmt.Errorf("Option '%v' is not supported for tar archive type", key) } return nil } func (zip *ArchiveZip) Unpack(destination string) error { command := []string{"unzip", zip.file, "-d", destination} return unpack(command, destination) } func (zip *ArchiveZip) RelaxedUnpack(destination string) error { return zip.Unpack(destination) } func (deb *ArchiveDeb) Unpack(destination string) error { command := []string{"dpkg", "-x", deb.file, destination} return unpack(command, destination) } func (deb *ArchiveDeb) RelaxedUnpack(destination string) error { return deb.Unpack(destination) } /* NewArchive associate correct structure and methods according to archive type. If ArchiveType is omitted -- trying to guess the type. Return ArchiveType or nil in case of error. */ func NewArchive(file string, arcType ...ArchiveType) (Archive, error) { var archive Archive var atype ArchiveType if len(arcType) == 0 { ext := filepath.Ext(file) ext = strings.ToLower(ext) switch ext { case ".deb": atype = Deb case ".zip": atype = Zip default: //FIXME: guess Tar maybe? atype = Tar } } else { atype = arcType[0] } common := ArchiveBase{} common.file = file common.atype = atype common.options = make(map[interface{}]interface{}) switch atype { case Tar: archive = Archive{&ArchiveTar{ArchiveBase: common}} case Zip: archive = Archive{&ArchiveZip{ArchiveBase: common}} case Deb: archive = Archive{&ArchiveDeb{ArchiveBase: common}} default: return archive, fmt.Errorf("Unsupported archive '%s'", file) } return archive, nil } debos-1.1.5/archiver_test.go000066400000000000000000000104431476454235000160170ustar00rootroot00000000000000package debos_test import ( _ "fmt" "github.com/go-debos/debos" "github.com/stretchr/testify/assert" _ "reflect" _ "strings" "testing" ) func TestBase(t *testing.T) { // New archive // Expect Tar by default _, err := debos.NewArchive("test.base", 0) assert.EqualError(t, err, "Unsupported archive 'test.base'") // Test base archive := debos.ArchiveBase{} arcType := archive.Type() assert.Equal(t, 0, int(arcType)) // Add option err = archive.AddOption("someoption", "somevalue") assert.Empty(t, err) err = archive.Unpack("/tmp/test") assert.EqualError(t, err, "Unpack is not supported for ''") err = archive.RelaxedUnpack("/tmp/test") assert.EqualError(t, err, "Unpack is not supported for ''") } func TestTar_default(t *testing.T) { // New archive // Expect Tar by default archive, err := debos.NewArchive("test.tar.gz") assert.NotEmpty(t, archive) assert.Empty(t, err) // Type must be Tar by default arcType := archive.Type() assert.Equal(t, debos.Tar, arcType) // Test unpack err = archive.Unpack("/tmp/test") // Expect unpack failure assert.EqualError(t, err, "exit status 2") // Expect failure for RelaxedUnpack err = archive.RelaxedUnpack("/tmp/test") assert.EqualError(t, err, "exit status 2") // Check options err = archive.AddOption("taroptions", []string{"--option1"}) assert.Empty(t, err) err = archive.Unpack("/tmp/test") assert.EqualError(t, err, "exit status 64") err = archive.Unpack("/proc/debostest") assert.EqualError(t, err, "mkdir /proc/debostest: no such file or directory") err = archive.RelaxedUnpack("/tmp/test") assert.EqualError(t, err, "exit status 64") // Add wrong option err = archive.AddOption("someoption", "somevalue") assert.EqualError(t, err, "Option 'someoption' is not supported for tar archive type") } // Check supported compression types func TestTar_compression(t *testing.T) { compressions := map[string]string{ "gz": "tar -C test -x -z -f test.tar.gz", "bzip2": "tar -C test -x -j -f test.tar.gz", "xz": "tar -C test -x -J -f test.tar.gz", } // Force type archive, err := debos.NewArchive("test.tar.gz", debos.Tar) assert.NotEmpty(t, archive) assert.Empty(t, err) // Type must be Tar arcType := archive.Type() assert.Equal(t, debos.Tar, arcType) for compression, _ := range compressions { err = archive.AddOption("tarcompression", compression) assert.Empty(t, err) err := archive.Unpack("test") assert.EqualError(t, err, "exit status 2") } // Check of unsupported compression type err = archive.AddOption("tarcompression", "fake") assert.EqualError(t, err, "Compression 'fake' is not supported") // Pass incorrect type err = archive.AddOption("taroptions", nil) assert.EqualError(t, err, "Wrong type for value") err = archive.AddOption("tarcompression", nil) assert.EqualError(t, err, "Wrong type for value") } func TestDeb(t *testing.T) { // Guess Deb archive, err := debos.NewArchive("test.deb") assert.NotEmpty(t, archive) assert.Empty(t, err) // Type must be guessed as Deb arcType := archive.Type() assert.Equal(t, debos.Deb, arcType) // Force Deb type archive, err = debos.NewArchive("test.deb", debos.Deb) assert.NotEmpty(t, archive) assert.Empty(t, err) // Type must be Deb arcType = archive.Type() assert.Equal(t, debos.Deb, arcType) // Expect unpack failure err = archive.Unpack("/tmp/test") assert.EqualError(t, err, "exit status 2") err = archive.Unpack("/proc/debostest") assert.EqualError(t, err, "mkdir /proc/debostest: no such file or directory") err = archive.RelaxedUnpack("/tmp/test") assert.EqualError(t, err, "exit status 2") } func TestZip(t *testing.T) { // Guess zip archive, err := debos.NewArchive("test.ZiP") assert.NotEmpty(t, archive) assert.Empty(t, err) // Type must be guessed as Zip arcType := archive.Type() assert.Equal(t, debos.Zip, arcType) // Force Zip type archive, err = debos.NewArchive("test.zip", debos.Zip) assert.NotEmpty(t, archive) assert.Empty(t, err) // Type must be Zip arcType = archive.Type() assert.Equal(t, debos.Zip, arcType) // Expect unpack failure err = archive.Unpack("/tmp/test") assert.EqualError(t, err, "exit status 9") err = archive.Unpack("/proc/debostest") assert.EqualError(t, err, "mkdir /proc/debostest: no such file or directory") err = archive.RelaxedUnpack("/tmp/test") assert.EqualError(t, err, "exit status 9") } debos-1.1.5/cmd/000077500000000000000000000000001476454235000133675ustar00rootroot00000000000000debos-1.1.5/cmd/debos/000077500000000000000000000000001476454235000144635ustar00rootroot00000000000000debos-1.1.5/cmd/debos/debos.go000066400000000000000000000256271476454235000161220ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "log" "os" "path" "strings" "github.com/docker/go-units" "github.com/go-debos/debos" "github.com/go-debos/debos/actions" "github.com/go-debos/fakemachine" "github.com/jessevdk/go-flags" ) func checkError(context *debos.DebosContext, err error, a debos.Action, stage string) int { if err == nil { return 0 } context.State = debos.Failed log.Printf("Action `%s` failed at stage %s, error: %s", a, stage, err) debos.DebugShell(*context) return 1 } func do_run(r actions.Recipe, context *debos.DebosContext) int { for _, a := range r.Actions { log.Printf("==== %s ====\n", a) err := a.Run(context) // This does not stop the call of stacked Cleanup methods for other Actions // Stack Cleanup methods defer a.Cleanup(context) // Check the state of Run method if exitcode := checkError(context, err, a, "Run"); exitcode != 0 { return exitcode } } return 0 } func warnLocalhost(variable string, value string) { message := `WARNING: Environment variable %[1]s contains a reference to localhost. This may not work when running from fakemachine. Consider using an address that is valid on your network.` if strings.Contains(value, "localhost") || strings.Contains(value, "127.0.0.1") || strings.Contains(value, "::1") { log.Printf(message, variable) } } func main() { context := debos.DebosContext { &debos.CommonContext{}, "", "", 512 } var options struct { Backend string `short:"b" long:"fakemachine-backend" description:"Fakemachine backend to use" default:"auto"` ArtifactDir string `long:"artifactdir" description:"Directory for packed archives and ostree repositories (default: current directory)"` InternalImage string `long:"internal-image" hidden:"true"` TemplateVars map[string]string `short:"t" long:"template-var" description:"Template variables (use -t VARIABLE:VALUE syntax)"` DebugShell bool `long:"debug-shell" description:"Fall into interactive shell on error"` Shell string `short:"s" long:"shell" description:"Redefine interactive shell binary (default: bash)" optionsl:"" default:"/bin/bash"` ScratchSize string `long:"scratchsize" description:"Size of disk-backed scratch space (parsed with human-readable suffix; assumed bytes if no suffix)"` CPUs int `short:"c" long:"cpus" description:"Number of CPUs to use for build VM (default: 2)"` Memory string `short:"m" long:"memory" description:"Amount of memory for build VM (parsed with human-readable suffix; assumed bytes if no suffix. default: 2Gb)"` ShowBoot bool `long:"show-boot" description:"Show boot/console messages from the fake machine"` EnvironVars map[string]string `short:"e" long:"environ-var" description:"Environment variables (use -e VARIABLE:VALUE syntax)"` Verbose bool `short:"v" long:"verbose" description:"Verbose output"` PrintRecipe bool `long:"print-recipe" description:"Print final recipe"` DryRun bool `long:"dry-run" description:"Compose final recipe to build but without any real work started"` DisableFakeMachine bool `long:"disable-fakemachine" description:"Do not use fakemachine."` } // These are the environment variables that will be detected on the // host and propagated to fakemachine. These are listed lower case, but // they are detected and configured in both lower case and upper case. var environ_vars = [...]string { "http_proxy", "https_proxy", "ftp_proxy", "rsync_proxy", "all_proxy", "no_proxy", } var exitcode int = 0 // Allow to run all deferred calls prior to os.Exit() defer func() { os.Exit(exitcode) }() parser := flags.NewParser(&options, flags.Default) fakemachineBackends := parser.FindOptionByLongName("fakemachine-backend") fakemachineBackends.Choices = fakemachine.BackendNames() args, err := parser.Parse() if err != nil { flagsErr, ok := err.(*flags.Error) if ok && flagsErr.Type == flags.ErrHelp { return } else { exitcode = 1 return } } if len(args) != 1 { log.Println("No recipe given!") exitcode = 1 return } if options.DisableFakeMachine && options.Backend != "auto" { log.Println("--disable-fakemachine and --fakemachine-backend are mutually exclusive") exitcode = 1 return } // Set interactive shell binary only if '--debug-shell' options passed if options.DebugShell { context.DebugShell = options.Shell } if options.PrintRecipe { context.PrintRecipe = options.PrintRecipe } if options.Verbose { context.Verbose = options.Verbose } file := args[0] file = debos.CleanPath(file) r := actions.Recipe{} if _, err := os.Stat(file); os.IsNotExist(err) { log.Println(err) exitcode = 1 return } if err := r.Parse(file, options.PrintRecipe, options.Verbose, options.TemplateVars); err != nil { log.Println(err) exitcode = 1 return } /* If fakemachine is used the outer fake machine will never use the * scratchdir, so just set it to /scratch as a dummy to prevent the * outer debos creating a temporary directory */ context.Scratchdir = "/scratch" var runInFakeMachine = true var m *fakemachine.Machine if options.DisableFakeMachine || fakemachine.InMachine() { runInFakeMachine = false } else { // attempt to create a fakemachine m, err = fakemachine.NewMachineWithBackend(options.Backend) if err != nil { log.Printf("error creating fakemachine: %v", err) /* fallback to running on the host unless the user has chosen * a specific backend */ if options.Backend == "auto" { runInFakeMachine = false } else { exitcode = 1 return } } } // if running on the host create a scratchdir if !runInFakeMachine && !fakemachine.InMachine() { log.Printf("fakemachine not supported, running on the host!") cwd, _ := os.Getwd() context.Scratchdir, err = ioutil.TempDir(cwd, ".debos-") defer os.RemoveAll(context.Scratchdir) } context.Rootdir = path.Join(context.Scratchdir, "root") context.Image = options.InternalImage context.RecipeDir = path.Dir(file) context.Artifactdir = options.ArtifactDir if context.Artifactdir == "" { context.Artifactdir, _ = os.Getwd() } context.Artifactdir = debos.CleanPath(context.Artifactdir) // Initialise origins map context.Origins = make(map[string]string) context.Origins["artifacts"] = context.Artifactdir context.Origins["filesystem"] = context.Rootdir context.Origins["recipe"] = context.RecipeDir context.Architecture = r.Architecture context.SectorSize = r.SectorSize context.State = debos.Success // Initialize environment variables map context.EnvironVars = make(map[string]string) // First add variables from host for _, e := range environ_vars { lowerVar := strings.ToLower(e) // lowercase not really needed lowerVal := os.Getenv(lowerVar) if lowerVal != "" { context.EnvironVars[lowerVar] = lowerVal } upperVar := strings.ToUpper(e) upperVal := os.Getenv(upperVar) if upperVal != "" { context.EnvironVars[upperVar] = upperVal } } // Then add/overwrite with variables from command line for k, v := range options.EnvironVars { // Allows the user to unset environ variables with -e if v == "" { delete(context.EnvironVars, k) } else { context.EnvironVars[k] = v } } for _, a := range r.Actions { err = a.Verify(&context) if exitcode = checkError(&context, err, a, "Verify"); exitcode != 0 { return } } if options.DryRun { log.Printf("==== Recipe done (Dry run) ====") return } if runInFakeMachine { var args []string if options.Memory == "" { // Set default memory size for fakemachine options.Memory = "2Gb" } memsize, err := units.RAMInBytes(options.Memory) if err != nil { fmt.Printf("Couldn't parse memory size: %v\n", err) exitcode = 1 return } memsizeMB := int(memsize / 1024 / 1024) if memsizeMB < 256 { log.Printf("WARNING: Memory size of %dMB is less than recommended minimum 256MB\n", memsizeMB) } m.SetMemory(memsizeMB) if options.CPUs == 0 { // Set default CPU count for fakemachine options.CPUs = 2 } m.SetNumCPUs(options.CPUs) m.SetSectorSize(r.SectorSize) if options.ScratchSize != "" { size, err := units.FromHumanSize(options.ScratchSize) if err != nil { fmt.Printf("Couldn't parse scratch size: %v\n", err) exitcode = 1 return } scratchsizeMB := int(size / 1000 / 1000) if scratchsizeMB < 512 { log.Printf("WARNING: Scratch size of %dMB is less than recommended minimum 512MB\n", scratchsizeMB) } m.SetScratch(size, "") } m.SetShowBoot(options.ShowBoot) // Puts in a format that is compatible with output of os.Environ() if context.EnvironVars != nil { EnvironString := []string{} for k, v := range context.EnvironVars { warnLocalhost(k, v) EnvironString = append(EnvironString, fmt.Sprintf("%s=%s", k, v)) } m.SetEnviron(EnvironString) // And save the resulting environ vars on m } m.AddVolume(context.Artifactdir) args = append(args, "--artifactdir", context.Artifactdir) for k, v := range options.TemplateVars { args = append(args, "--template-var", fmt.Sprintf("%s:%s", k, v)) } for k, v := range options.EnvironVars { args = append(args, "--environ-var", fmt.Sprintf("%s:%s", k, v)) } m.AddVolume(context.RecipeDir) args = append(args, file) if options.DebugShell { args = append(args, "--debug-shell") args = append(args, "--shell", fmt.Sprintf("%s", options.Shell)) } for _, a := range r.Actions { // Stack PostMachineCleanup methods defer a.PostMachineCleanup(&context) err = a.PreMachine(&context, m, &args) if exitcode = checkError(&context, err, a, "PreMachine"); exitcode != 0 { return } } // Silence extra output from fakemachine unless the --verbose flag was passed. m.SetQuiet(!options.Verbose) exitcode, err = m.RunInMachineWithArgs(args) if err != nil { fmt.Println(err) return } if exitcode != 0 { context.State = debos.Failed return } for _, a := range r.Actions { err = a.PostMachine(&context) if exitcode = checkError(&context, err, a, "Postmachine"); exitcode != 0 { return } } log.Printf("==== Recipe done ====") return } if !fakemachine.InMachine() { for _, a := range r.Actions { // Stack PostMachineCleanup methods defer a.PostMachineCleanup(&context) err = a.PreNoMachine(&context) if exitcode = checkError(&context, err, a, "PreNoMachine"); exitcode != 0 { return } } } // Create Rootdir if _, err = os.Stat(context.Rootdir); os.IsNotExist(err) { err = os.Mkdir(context.Rootdir, 0755) if err != nil && os.IsNotExist(err) { exitcode = 1 return } } exitcode = do_run(r, &context) if exitcode != 0 { return } if !fakemachine.InMachine() { for _, a := range r.Actions { err = a.PostMachine(&context) if exitcode = checkError(&context, err, a, "PostMachine"); exitcode != 0 { return } } log.Printf("==== Recipe done ====") } } debos-1.1.5/commands.go000066400000000000000000000176261476454235000147700ustar00rootroot00000000000000package debos import ( "bytes" "crypto/sha256" "fmt" "io" "io/ioutil" "log" "os" "os/exec" "path" "runtime" ) type ChrootEnterMethod int const ( CHROOT_METHOD_NONE = iota // No chroot in use CHROOT_METHOD_NSPAWN // use nspawn to create the chroot environment CHROOT_METHOD_CHROOT // use chroot to create the chroot environment ) type Command struct { Architecture string // Architecture of the chroot, nil if same as host Dir string // Working dir to run command in Chroot string // Run in the chroot at path ChrootMethod ChrootEnterMethod // Method to enter the chroot bindMounts []string /// Items to bind mount extraEnv []string // Extra environment variables to set } type commandWrapper struct { label string buffer *bytes.Buffer } func newCommandWrapper(label string) *commandWrapper { b := bytes.Buffer{} return &commandWrapper{label, &b} } func (w commandWrapper) out(atEOF bool) { for { s, err := w.buffer.ReadString('\n') if err == nil { log.Printf("%s | %v", w.label, s) } else { if len(s) > 0 { if atEOF && err == io.EOF { log.Printf("%s | %v\n", w.label, s) } else { w.buffer.WriteString(s) } } break } } } func (w commandWrapper) Write(p []byte) (n int, err error) { n, err = w.buffer.Write(p) w.out(false) return } func (w *commandWrapper) flush() { w.out(true) } func NewChrootCommandForContext(context DebosContext) Command { c := Command{Architecture: context.Architecture, Chroot: context.Rootdir, ChrootMethod: CHROOT_METHOD_NSPAWN} if context.EnvironVars != nil { for k, v := range context.EnvironVars { c.AddEnv(fmt.Sprintf("%s=%s", k, v)) } } if context.Image != "" { path, err := RealPath(context.Image) if err == nil { c.AddBindMount(path, "") } else { log.Printf("Failed to get realpath for %s, %v", context.Image, err) } for _, p := range context.ImagePartitions { path, err := RealPath(p.DevicePath) if err != nil { log.Printf("Failed to get realpath for %s, %v", p.DevicePath, err) continue } c.AddBindMount(path, "") } c.AddBindMount("/dev/disk", "") } return c } func (cmd *Command) AddEnv(env string) { cmd.extraEnv = append(cmd.extraEnv, env) } func (cmd *Command) AddEnvKey(key, value string) { cmd.extraEnv = append(cmd.extraEnv, fmt.Sprintf("%s=%s", key, value)) } func (cmd *Command) AddBindMount(source, target string) { var mount string if target != "" { mount = fmt.Sprintf("%s:%s", source, target) } else { mount = source } cmd.bindMounts = append(cmd.bindMounts, mount) } func (cmd *Command) saveResolvConf() (*[sha256.Size]byte, error) { hostconf := "/etc/resolv.conf" chrootedconf := path.Join(cmd.Chroot, hostconf) savedconf := chrootedconf + ".debos" var sum [sha256.Size]byte if cmd.ChrootMethod == CHROOT_METHOD_NONE { return nil, nil } // There may not be an existing resolv.conf if _, err := os.Lstat(chrootedconf); !os.IsNotExist(err) { if err = os.Rename(chrootedconf, savedconf); err != nil { return nil, err } } /* Expect a relatively small file here */ data, err := ioutil.ReadFile(hostconf) if err != nil { return nil, err } out := []byte("# Automatically generated by Debos\n") out = append(out, data...) sum = sha256.Sum256(out) err = ioutil.WriteFile(chrootedconf, out, 0644) if err != nil { return nil, err } return &sum, nil } func (cmd *Command) restoreResolvConf(sum *[sha256.Size]byte) error { hostconf := "/etc/resolv.conf" chrootedconf := path.Join(cmd.Chroot, hostconf) savedconf := chrootedconf + ".debos" if cmd.ChrootMethod == CHROOT_METHOD_NONE || sum == nil { return nil } // Remove the original copy anyway defer os.Remove(savedconf) fi, err := os.Lstat(chrootedconf) // resolv.conf was removed during the command call // Nothing to do with it -- file has been changed anyway if os.IsNotExist(err) { return nil } mode := fi.Mode() switch { case mode.IsRegular(): // Try to calculate checksum data, err := ioutil.ReadFile(chrootedconf) if err != nil { return err } currentsum := sha256.Sum256(data) // Leave the changed resolv.conf untouched if bytes.Compare(currentsum[:], (*sum)[:]) == 0 { // Remove the generated version if err := os.Remove(chrootedconf); err != nil { return err } if _, err := os.Lstat(savedconf); !os.IsNotExist(err) { // Restore the original version if err = os.Rename(savedconf, chrootedconf); err != nil { return err } } } case mode&os.ModeSymlink != 0: // If the 'resolv.conf' is a symlink // Nothing to do with it -- file has been changed anyway default: // File is not regular or symlink // Let's get out here with verbose message log.Printf("Warning: /etc/resolv.conf inside the chroot is not a regular file") } return nil } func (cmd Command) Run(label string, cmdline ...string) error { q := newQemuHelper(cmd) q.Setup() defer q.Cleanup() var options []string switch cmd.ChrootMethod { case CHROOT_METHOD_NONE: options = cmdline case CHROOT_METHOD_CHROOT: options = append(options, "chroot") options = append(options, cmd.Chroot) options = append(options, cmdline...) case CHROOT_METHOD_NSPAWN: // We use own resolv.conf handling options = append(options, "systemd-nspawn", "-q") options = append(options, "--resolv-conf=off") options = append(options, "--timezone=off") options = append(options, "--register=no") options = append(options, "--keep-unit") options = append(options, "--console=pipe") for _, e := range cmd.extraEnv { options = append(options, "--setenv", e) } for _, b := range cmd.bindMounts { options = append(options, "--bind", b) } options = append(options, "-D", cmd.Chroot) options = append(options, cmdline...) } exe := exec.Command(options[0], options[1:]...) w := newCommandWrapper(label) exe.Stdin = nil exe.Stdout = w exe.Stderr = w defer w.flush() if len(cmd.extraEnv) > 0 && cmd.ChrootMethod != CHROOT_METHOD_NSPAWN { exe.Env = append(os.Environ(), cmd.extraEnv...) } // Disable services start/stop for commands running in chroot if cmd.ChrootMethod != CHROOT_METHOD_NONE { services := ServiceHelper{cmd.Chroot} services.Deny() defer services.Allow() } // Save the original resolv.conf and copy version from host resolvsum, err := cmd.saveResolvConf() if err != nil { return err } if err = exe.Run(); err != nil { return err } // Restore the original resolv.conf if not changed if err = cmd.restoreResolvConf(resolvsum); err != nil { return err } return nil } type qemuHelper struct { qemusrc string qemutarget string } func newQemuHelper(c Command) qemuHelper { q := qemuHelper{} if c.Chroot == "" || c.Architecture == "" { return q } switch c.Architecture { case "armhf", "armel", "arm": if runtime.GOARCH != "arm64" && runtime.GOARCH != "arm" { q.qemusrc = "/usr/bin/qemu-arm-static" } case "arm64": if runtime.GOARCH != "arm64" { q.qemusrc = "/usr/bin/qemu-aarch64-static" } case "mips": q.qemusrc = "/usr/bin/qemu-mips-static" case "mipsel": if runtime.GOARCH != "mips64le" && runtime.GOARCH != "mipsle" { q.qemusrc = "/usr/bin/qemu-mipsel-static" } case "mips64el": if runtime.GOARCH != "mips64le" { q.qemusrc = "/usr/bin/qemu-mips64el-static" } case "riscv64": if runtime.GOARCH != "riscv64" { q.qemusrc = "/usr/bin/qemu-riscv64-static" } case "i386": if runtime.GOARCH != "amd64" && runtime.GOARCH != "386" { q.qemusrc = "/usr/bin/qemu-i386-static" } case "amd64": if runtime.GOARCH != "amd64" { q.qemusrc = "/usr/bin/qemu-x86_64-static" } default: log.Panicf("Don't know qemu for Architecture %s", c.Architecture) } if q.qemusrc != "" { q.qemutarget = path.Join(c.Chroot, q.qemusrc) } return q } func (q qemuHelper) Setup() error { if q.qemusrc == "" { return nil } return CopyFile(q.qemusrc, q.qemutarget, 0755) } func (q qemuHelper) Cleanup() { if q.qemusrc != "" { os.Remove(q.qemutarget) } } debos-1.1.5/commands_test.go000066400000000000000000000001601476454235000160100ustar00rootroot00000000000000package debos import ( "testing" ) func TestBasicCommand(t *testing.T) { Command{}.Run("out", "ls", "-l") } debos-1.1.5/debug.go000066400000000000000000000010671476454235000142450ustar00rootroot00000000000000package debos import ( "fmt" "log" "os" ) /* DebugShell function launches an interactive shell for debug and problems investigation. */ func DebugShell(context DebosContext) { if len(context.DebugShell) == 0 { return } pa := os.ProcAttr{ Files: []*os.File{os.Stdin, os.Stdout, os.Stderr}, Dir: context.Scratchdir, } // Start an interactive shell for debug. log.Printf(">>> Starting a debug shell") if proc, err := os.StartProcess(context.DebugShell, []string{}, &pa); err != nil { fmt.Printf("Failed: %s\n", err) } else { proc.Wait() } } debos-1.1.5/doc/000077500000000000000000000000001476454235000133715ustar00rootroot00000000000000debos-1.1.5/doc/examples/000077500000000000000000000000001476454235000152075ustar00rootroot00000000000000debos-1.1.5/doc/examples/debian-example-ospack.yaml000066400000000000000000000013561476454235000222310ustar00rootroot00000000000000{{ $architecture := or .architecture "arm64" }} {{ $suite := or .suite "bullseye" }} {{ $image := or .image (printf "debian-example-ospack-%s-%s.tar.gz" $suite $architecture) }} architecture: {{ $architecture }} actions: - action: debootstrap suite: {{ $suite }} components: - main mirror: https://deb.debian.org/debian variant: minbase - action: apt description: Install some packages packages: - sudo - openssh-server - adduser - systemd-sysv - action: run chroot: true script: scripts/setup-user.sh - action: overlay source: overlays/sudo - action: run command: echo debian > ${ROOTDIR}/etc/hostname - action: pack file: {{ $image }} compression: gz debos-1.1.5/doc/examples/overlays/000077500000000000000000000000001476454235000170535ustar00rootroot00000000000000debos-1.1.5/doc/examples/overlays/sudo/000077500000000000000000000000001476454235000200255ustar00rootroot00000000000000debos-1.1.5/doc/examples/overlays/sudo/etc/000077500000000000000000000000001476454235000206005ustar00rootroot00000000000000debos-1.1.5/doc/examples/overlays/sudo/etc/sudoers.d/000077500000000000000000000000001476454235000225065ustar00rootroot00000000000000debos-1.1.5/doc/examples/overlays/sudo/etc/sudoers.d/user000066400000000000000000000000511476454235000234030ustar00rootroot00000000000000%sudo ALL=(ALL) NOPASSWD: /usr/bin/su debos-1.1.5/doc/examples/scripts/000077500000000000000000000000001476454235000166765ustar00rootroot00000000000000debos-1.1.5/doc/examples/scripts/setup-user.sh000077500000000000000000000002421476454235000213470ustar00rootroot00000000000000#!/bin/sh set -e echo "I: create user" adduser --gecos User --disabled-password user echo "I: set user password" echo "user:user" | chpasswd adduser user sudo debos-1.1.5/doc/man/000077500000000000000000000000001476454235000141445ustar00rootroot00000000000000debos-1.1.5/doc/man/create_manpage.sh000077500000000000000000000007241476454235000174410ustar00rootroot00000000000000#!/bin/bash # Create a manpage from the README.md # Add header echo '''% debos(1) # NAME debos - Debian OS images builder ''' > debos.md # Add README.md tail -n +2 ../../README.md >> debos.md # Some tweaks to the markdown # Uppercase titles sed -i 's/^\(##.*\)$/\U\1/' debos.md # Remove double # sed -i 's/^\##/#/' debos.md # Create the manpage pandoc -s -t man debos.md -o debos.1 # Resulting manpage can be browsed with groff: #groff -man -Tascii debos.1 debos-1.1.5/doc/man/debos.1000066400000000000000000000207741476454235000153340ustar00rootroot00000000000000'\" t .\" Automatically generated by Pandoc 3.1.3 .\" .\" Define V font for inline verbatim, using C font in formats .\" that render this, and otherwise B font. .ie "\f[CB]x\f[]"x" \{\ . ftr V B . ftr VI BI . ftr VB B . ftr VBI BI .\} .el \{\ . ftr V CR . ftr VI CI . ftr VB CB . ftr VBI CBI .\} .TH "debos" "1" "" "" "" .hy .SH NAME .PP debos - Debian OS images builder .SH SYNOPSIS .IP .nf \f[C] debos [options] debos [--help] \f[R] .fi .PP Application Options: .IP .nf \f[C] -b, --fakemachine-backend= Fakemachine backend to use (default: auto) --artifactdir= Directory for packed archives and ostree repositories (default: current directory) -t, --template-var= Template variables (use -t VARIABLE:VALUE syntax) --debug-shell Fall into interactive shell on error -s, --shell= Redefine interactive shell binary (default: bash) (default: /bin/bash) --scratchsize= Size of disk backed scratch space -c, --cpus= Number of CPUs to use for build VM (default: 2) -m, --memory= Amount of memory for build VM (default: 2048MB) --show-boot Show boot/console messages from the fake machine -e, --environ-var= Environment variables (use -e VARIABLE:VALUE syntax) -v, --verbose Verbose output --print-recipe Print final recipe --dry-run Compose final recipe to build but without any real work started --disable-fakemachine Do not use fakemachine. \f[R] .fi .SH DESCRIPTION .PP debos is a tool to make the creation of various Debian-based OS images simpler. While most other tools focus on specific use-cases, debos is designed to be a toolchain making common actions trivial while providing enough rope to do whatever tweaking which might be required behind the scenes. .PP debos expects a YAML file as input and runs the actions listed in the file sequentially. These actions should be self-contained and independent of each other. .PP Some of the actions provided by debos to customise and produce images are: .IP \[bu] 2 apt: install packages and their dependencies with `apt' .IP \[bu] 2 debootstrap: construct the target rootfs with debootstrap .IP \[bu] 2 download: download a single file from the internet .IP \[bu] 2 filesystem-deploy: deploy a root filesystem to an image previously created .IP \[bu] 2 image-partition: create an image file, make partitions and format them .IP \[bu] 2 ostree-commit: create an OSTree commit from rootfs .IP \[bu] 2 ostree-deploy: deploy an OSTree branch to the image .IP \[bu] 2 overlay: do a recursive copy of directories or files to the target filesystem .IP \[bu] 2 pack: create a tarball with the target filesystem .IP \[bu] 2 pacman: install packages and their dependencies with pacman .IP \[bu] 2 pacstrap: construct the target rootfs with pacstrap .IP \[bu] 2 raw: directly write a file to the output image at a given offset .IP \[bu] 2 recipe: includes the recipe actions at the given path .IP \[bu] 2 run: allows to run a command or script in the filesystem or in the host .IP \[bu] 2 unpack: unpack files from archive in the filesystem .PP A full syntax description of all the debos actions can be found at: https://godoc.org/github.com/go-debos/debos/actions .SH INSTALLATION (DOCKER CONTAINER) .PP An official debos container is available: .IP .nf \f[C] docker pull godebos/debos \f[R] .fi .PP See docker/README.md (https://github.com/go-debos/debos/blob/master/docker/README.md) for usage. .SH INSTALLATION FROM SOURCE (UNDER DEBIAN) .IP .nf \f[C] sudo apt install golang git libglib2.0-dev libostree-dev qemu-system-x86 \[rs] qemu-user-static debootstrap systemd-container export GOPATH=/opt/src/gocode # or whatever suits your needs go install -v github.com/go-debos/debos/cmd/debos\[at]latest /opt/src/gocode/bin/debos --help \f[R] .fi .SH SIMPLE EXAMPLE .PP The following example will create an arm64 image, install several packages in it, change the file /etc/hostname to \[lq]debian\[rq] and finally make a tarball of the complete system. .IP .nf \f[C] {{- $image := or .image \[dq]debian.tgz\[dq] -}} architecture: arm64 actions: - action: debootstrap suite: bookworm components: - main - non-free-firmware mirror: https://deb.debian.org/debian variant: minbase - action: apt packages: [ sudo, openssh-server, adduser, systemd-sysv, firmware-linux ] - action: run chroot: true command: echo debian > /etc/hostname - action: pack file: {{ $image }} compression: gz \f[R] .fi .PP To run it, create a file named \f[V]example.yaml\f[R] and run: .IP .nf \f[C] debos example.yaml \f[R] .fi .PP The final tarball will be named \[lq]debian.tgz\[rq] if you would like to modify the fileame, you can provided a different name for the variable image like this: .IP .nf \f[C] debos -t image:\[dq]debian-arm64.tgz\[dq] example.yaml \f[R] .fi .SH OTHER EXAMPLES .PP Example recipes are collected in a separate repository: .PP https://github.com/go-debos/debos-recipes .SH ENVIRONMENT VARIABLES .PP debos reads a predefined list of environment variables from the host and propagates them to the fakemachine build environemnt. The set of environment variables is defined by \f[V]environ_vars\f[R] in \f[V]cmd/debos/debos.go\f[R]. Currently the list of environment variables includes the proxy environment variables documented at: .PP https://wiki.archlinux.org/index.php/proxy_settings .PP The list of environment variables currently exported to fakemachine is: .IP .nf \f[C] http_proxy, https_proxy, ftp_proxy, rsync_proxy, all_proxy, no_proxy \f[R] .fi .PP While the elements of \f[V]environ_vars\f[R] are in lower case, for each element both lower and upper case variants are probed on the host and if found propagated to fakemachine. So if the host has the environment variables HTTP_PROXY and no_proxy defined, both will be propagated to fakemachine respecting the case. .PP The command line options \f[V]--environ-var\f[R] and \f[V]-e\f[R] can be used to specify, overwrite and unset environment variables for fakemachine with the syntax: .IP .nf \f[C] $ debos -e ENVIRONVAR:VALUE ... \f[R] .fi .PP To unset an environment variable, or in other words, to prevent an environment variable being propagated to fakemachine, use the same syntax without a value. debos accepts multiple -e simultaneously. .SH PROXY CONFIGURATION .PP While the proxy related environment variables are exported from the host to fakemachine, there are two known sources of issues: .IP \[bu] 2 Using localhost will not work from fakemachine. Use an address which is valid on your network. debos will warn if the environment variables contain localhost. .IP \[bu] 2 In case you are running applications and/or scripts inside fakemachine you may need to check which are the proxy environment variables they use. Different apps are known to use different environment variable names and different case for environment variable names. .SH FAKEMACHINE BACKEND .PP debos (unless running debos with the \f[V]--disable-fakemachine\f[R] argument) creates and spawns a virtual machine using fakemachine (https://github.com/go-debos/fakemachine) and executes the actions defined by the recipe inside the virtual machine. This helps ensure recipes are reproducible no matter the host environment. .PP Fakemachine can use different virtualisation backends to spawn the virtual machine, for more information see the fakemachine documentation (https://github.com/go-debos/fakemachine). .PP By default the backend will automatically be selected based on what is supported by the host machine, but this can be overridden using the \f[V]--fakemachine-backend\f[R] / \f[V]-b\f[R] option. If no backends are supported, debos reverts to running the recipe on the host without creating a fakemachine. .PP Performance of the backends is roughly as follows: \f[V]kvm\f[R] is faster than \f[V]uml\f[R] is faster than \f[V]qemu\f[R]. Using \f[V]--disable-fakemachine\f[R] is slightly faster than \f[V]kvm\f[R], but requires root permissions. .PP Benchmark times for running pine-a64-plus/debian.yaml (https://github.com/go-debos/debos-recipes/blob/9a25b4be6c9136f4a27e542f39ab7e419fc852c9/pine-a64-plus/debian.yaml) on an Intel Pentium G4560T with SSD: .PP .TS tab(@); l l l. T{ Backend T}@T{ Wall Time T}@T{ Prerequisites T} _ T{ \f[V]--disable-fakemachine\f[R] T}@T{ 8 min T}@T{ root permissions T} T{ \f[V]-b kvm\f[R] T}@T{ 9 min T}@T{ access to \f[V]/dev/kvm\f[R] T} T{ \f[V]-b uml\f[R] T}@T{ 18 min T}@T{ package \f[V]user-mode-linux\f[R] installed T} T{ \f[V]-b qemu\f[R] T}@T{ 166 min T}@T{ none T} .TE debos-1.1.5/docker/000077500000000000000000000000001476454235000140735ustar00rootroot00000000000000debos-1.1.5/docker/Dockerfile000066400000000000000000000074321476454235000160730ustar00rootroot00000000000000# Global ARGs shared by all stages ARG DEBIAN_FRONTEND=noninteractive ARG GOPATH=/usr/local/go ### first stage - builder ### FROM debian:bookworm-slim AS builder ARG DEBIAN_FRONTEND ARG GOPATH ENV GOPATH=${GOPATH} # install debos build and unit-test dependencies RUN apt-get update && \ apt-get install -y --no-install-recommends \ ca-certificates \ curl \ gcc \ git \ golang-go \ libc6-dev \ libostree-dev \ unzip && \ rm -rf /var/lib/apt/lists/* # Build debos COPY . $GOPATH/src/github.com/go-debos/debos WORKDIR $GOPATH/src/github.com/go-debos/debos/cmd/debos RUN go install ./... # Install the latest archlinux-keyring, since the one in Debian is bound # to get outdated sooner or later. # WARNING: returning to the debian package will break the pacstrap action COPY docker/get-archlinux-keyring.sh / RUN /get-archlinux-keyring.sh /arch-keyring ### second stage - runner ### FROM debian:bookworm-slim AS runner-amd64 RUN apt-get update && \ apt-get install -y --no-install-recommends \ linux-image-amd64 \ qemu-system-x86 \ user-mode-linux && \ rm -rf /var/lib/apt/lists/* FROM debian:bookworm-slim AS runner-arm64 RUN apt-get update && \ apt-get install -y --no-install-recommends \ linux-image-arm64 \ qemu-system-arm \ # fixes: qemu-system-aarch64: failed to find romfile "efi-virtio.rom" ipxe-qemu && \ rm -rf /var/lib/apt/lists/* FROM runner-${TARGETARCH} AS runner ARG DEBIAN_FRONTEND ARG GOPATH # Set HOME to a writable directory in case something wants to cache things ENV HOME=/tmp LABEL org.label-schema.name "debos" LABEL org.label-schema.description "Debian OS builder" LABEL org.label-schema.vcs-url = "https://github.com/go-debos/debos" LABEL org.label-schema.docker.cmd 'docker run \ --rm \ --interactive \ --tty \ --device /dev/kvm \ --user $(id -u) \ --workdir /recipes \ --mount "type=bind,source=$(pwd),destination=/recipes" \ --security-opt label=disable' # debos runtime dependencies # ca-certificates is required to validate HTTPS certificates when getting debootstrap release file RUN apt-get update && \ apt-get install -y --no-install-recommends \ apt-transport-https \ binfmt-support \ bmap-tools \ btrfs-progs \ busybox \ bzip2 \ ca-certificates \ debian-ports-archive-keyring \ debootstrap \ mmdebstrap \ dosfstools \ e2fsprogs \ equivs \ fdisk \ f2fs-tools \ git \ gzip \ pigz \ libostree-1-1 \ libslirp-helper \ openssh-client \ parted \ pkg-config \ qemu-user-static \ qemu-utils \ rsync \ systemd \ systemd-container \ systemd-resolved \ u-boot-tools \ unzip \ xfsprogs \ xz-utils \ zip \ zstd \ makepkg \ pacman-package-manager \ arch-install-scripts \ arch-test && \ rm -rf /var/lib/apt/lists/* # debian's qemu-user-static package no longer registers binfmts # if running inside a virtualmachine; dockerhub builds are inside a vm RUN for arch in aarch64 alpha arm armeb cris hexagon hppa m68k microblaze mips mips64 mips64el mipsel mipsn32 mipsn32el ppc ppc64 ppc64le riscv32 riscv64 s390x sh4 sh4eb sparc sparc32plus sparc64 xtensa xtensaeb; do \ update-binfmts --import qemu-$arch; \ done COPY --from=builder $GOPATH/bin/debos /usr/local/bin/debos # Install the latest archlinux-keyring, since the one in Debian is bound # to get outdated sooner or later. # WARNING: returning to the debian package will break the pacstrap action COPY --from=builder /arch-keyring /usr/share/keyrings ENTRYPOINT ["/usr/local/bin/debos"] debos-1.1.5/docker/README.md000066400000000000000000000032111476454235000153470ustar00rootroot00000000000000# debos Docker container for ['debos' tool](https://github.com/go-debos/debos). ## Installation ``` docker pull godebos/debos ``` Debos needs virtualization to be enabled on the host and shared with the container. Check that `kvm` is enabled and writable by the user running the docker container by running ```ls /dev/kvm``` ## Usage /!\ This container should be used as an executable, i.e. there is no need to add `debos` after `godebos/debos`. To build `recipe.yaml`: ``` cd docker run --rm --interactive --tty --device /dev/kvm --user $(id -u) --workdir /recipes --mount "type=bind,source=$(pwd),destination=/recipes" --security-opt label=disable godebos/debos ``` If debos fails to run the KVM fakemachine backend and the `/dev/kvm` device exists on your host, you may need to add the owning group of the device as a supplementary group of the container. This will work if `ls -l /dev/kvm` indicates that the owning group has read-write access to the device. Adding the supplementary group may be unsafe depending on the owning group of `/dev/kvm`, but it could be required depending on your login provider. To add the group, add `--group-add "$(stat -c '%g' /dev/kvm)"` to your `docker run` command before `godebos/debos`. See [Docker run reference -- Additional Groups](https://docs.docker.com/engine/reference/run/#additional-groups) for more information. ## Container build To build the debos container image from current git branch: ``` docker build -f docker/Dockerfile -t godebos/debos . ``` ## Tests ### Unit tests Run unit tests: ``` docker-compose -f docker/unit-tests.test.yml up --build --exit-code-from=sut ``` debos-1.1.5/docker/get-archlinux-keyring.sh000077500000000000000000000006631476454235000206570ustar00rootroot00000000000000#!/bin/bash LINK=https://gitlab.archlinux.org/archlinux/archlinux-keyring/-/releases/permalink/latest TARGET="${1:-.}" RELEASE=$(curl -s -I -o /dev/null -w '%header{location}\n' \ ${LINK} \ | sed 's/.*\///') echo Arch keyring release ${RELEASE} echo Installing to ${TARGET} mkdir -p ${TARGET} curl -s -L ${LINK}/downloads/archlinux-keyring-${RELEASE}.tar.gz \ | tar xvz --strip-components=1 -C ${TARGET} debos-1.1.5/docker/unit-tests.test.yml000066400000000000000000000003621476454235000177140ustar00rootroot00000000000000version: '3.4' services: sut: build: context: .. dockerfile: docker/Dockerfile target: builder image: debos-builder working_dir: /usr/local/go/src/github.com/go-debos/debos command: go test -v ./... debos-1.1.5/filesystem.go000066400000000000000000000042671476454235000153500ustar00rootroot00000000000000package debos import ( "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strings" ) func CleanPathAt(path, at string) string { if filepath.IsAbs(path) { return filepath.Clean(path) } return filepath.Join(at, path) } func CleanPath(path string) string { cwd, _ := os.Getwd() return CleanPathAt(path, cwd) } func CopyFile(src, dst string, mode os.FileMode) error { in, err := os.Open(src) if err != nil { return err } defer in.Close() tmp, err := ioutil.TempFile(filepath.Dir(dst), "") if err != nil { return err } _, err = io.Copy(tmp, in) if err != nil { tmp.Close() os.Remove(tmp.Name()) return err } if err = tmp.Close(); err != nil { os.Remove(tmp.Name()) return err } if err = os.Chmod(tmp.Name(), mode); err != nil { os.Remove(tmp.Name()) return err } if err = os.Rename(tmp.Name(), dst); err != nil { os.Remove(tmp.Name()) return err } return nil } func CopyTree(sourcetree, desttree string) error { walker := func(p string, info os.FileInfo, err error) error { if err != nil { return err } suffix, _ := filepath.Rel(sourcetree, p) target := path.Join(desttree, suffix) switch info.Mode() & os.ModeType { case 0: err := CopyFile(p, target, info.Mode()) if err != nil { return fmt.Errorf("Failed to copy file %s: %w", p, err) } case os.ModeDir: os.Mkdir(target, info.Mode()) case os.ModeSymlink: link, err := os.Readlink(p) if err != nil { return fmt.Errorf("Failed to read symlink %s: %w", suffix, err) } os.Symlink(link, target) default: return fmt.Errorf("File %s with mode %v not handled", p, info.Mode()) } return nil } return filepath.Walk(sourcetree, walker) } func RealPath(path string) (string, error) { p, err := filepath.EvalSymlinks(path) if err != nil { return "", err } return filepath.Abs(p) } func RestrictedPath(prefix, dest string) (string, error) { var err error destination := path.Join(prefix, dest) destination, err = filepath.Abs(destination) if err != nil { return "", err } if !strings.HasPrefix(destination, prefix) { return "", fmt.Errorf("The resulting path points outside of prefix '%s': '%s'\n", prefix, destination) } return destination, nil } debos-1.1.5/go.mod000066400000000000000000000016141476454235000137340ustar00rootroot00000000000000module github.com/go-debos/debos go 1.23 require ( al.essio.dev/pkg/shellescape v1.5.1 github.com/docker/go-units v0.5.0 github.com/freddierice/go-losetup/v2 v2.0.1 github.com/go-debos/fakemachine v0.0.11 github.com/go-task/slim-sprig/v3 v3.0.0 github.com/google/uuid v1.6.0 github.com/jessevdk/go-flags v1.6.1 github.com/sjoerdsimons/ostree-go v0.0.0-20201014091107-8fae757256f8 github.com/stretchr/testify v1.10.0 gopkg.in/yaml.v2 v2.4.0 ) require ( github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 // indirect github.com/alessio/shellescape v1.4.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/surma/gocpio v1.1.0 // indirect github.com/ulikunitz/xz v0.5.12 // indirect golang.org/x/sys v0.22.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) debos-1.1.5/go.sum000066400000000000000000000112751476454235000137650ustar00rootroot00000000000000al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/alessio/shellescape v1.4.2 h1:MHPfaU+ddJ0/bYWpgIeUnQUqKrlJ1S7BfEYPM4uEoM0= github.com/alessio/shellescape v1.4.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/freddierice/go-losetup/v2 v2.0.1 h1:wPDx/Elu9nDV8y/CvIbEDz5Xi5Zo80y4h7MKbi3XaAI= github.com/freddierice/go-losetup/v2 v2.0.1/go.mod h1:TEyBrvlOelsPEhfWD5rutNXDmUszBXuFnwT1kIQF4J8= github.com/go-debos/fakemachine v0.0.11 h1:pXDtOEOjzeK2b3YnrcOx+hJp1B3rbRmxy8v+iD4H6lQ= github.com/go-debos/fakemachine v0.0.11/go.mod h1:RVnBUeGO0YCXEjBnFhTFO/deqSCyYeddP/bYQZZSF+o= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4= github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sjoerdsimons/ostree-go v0.0.0-20201014091107-8fae757256f8 h1:fLxnJNJ++tkunS7BATed+mFqhA8KZYG7kT+WYEarYU4= github.com/sjoerdsimons/ostree-go v0.0.0-20201014091107-8fae757256f8/go.mod h1:f9gMvY6srFTBixsEIcm3zd3q7Y6btDDE4DKYtn8yDII= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/surma/gocpio v1.1.0 h1:RUWT+VqJ8GSodSv7Oh5xjIxy7r24CV1YvothHFfPxcQ= github.com/surma/gocpio v1.1.0/go.mod h1:zaLNaN+EDnfSnNdWPJJf9OZxWF817w5dt8JNzF9LCVI= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= debos-1.1.5/net.go000066400000000000000000000016671476454235000137530ustar00rootroot00000000000000package debos import ( "fmt" "io" "log" "net/http" "os" ) // Function for downloading single file object with http(s) protocol func DownloadHttpUrl(url, filename string) error { log.Printf("Download started: '%s' -> '%s'\n", url, filename) // TODO: Proxy support? // Check if file object already exists. fi, err := os.Stat(filename) if !os.IsNotExist(err) && !fi.Mode().IsRegular() { return fmt.Errorf("Failed to download '%s': '%s' exists and it is not a regular file\n", url, filename) } resp, err := http.Get(url) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("Url '%s' returned status code %d (%s)\n", url, resp.StatusCode, http.StatusText(resp.StatusCode)) } // Output file output, err := os.Create(filename) if err != nil { return err } defer output.Close() if _, err := io.Copy(output, resp.Body); err != nil { return err } return nil } debos-1.1.5/os.go000066400000000000000000000024431476454235000135770ustar00rootroot00000000000000package debos import ( "fmt" "os" "path" ) const debianPolicyHelper = "/usr/sbin/policy-rc.d" /* ServiceHelper is used to manage services. Currently supports only debian-based family. */ type ServiceHelper struct { Rootdir string } type ServicesManager interface { Allow() error Deny() error } /* Allow() allows to start/stop services on OS level. */ func (s *ServiceHelper) Allow() error { helperFile := path.Join(s.Rootdir, debianPolicyHelper) if _, err := os.Stat(helperFile); os.IsNotExist(err) { return nil } if err := os.Remove(helperFile); err != nil { return err } return nil } /* Deny() prohibits to start/stop services on OS level. */ func (s *ServiceHelper) Deny() error { helperFile := path.Join(s.Rootdir, debianPolicyHelper) var helper = []byte(`#!/bin/sh exit 101 `) if _, err := os.Stat(helperFile); os.IsExist(err) { return fmt.Errorf("Policy helper file '%s' exists already", debianPolicyHelper) } if _, err := os.Stat(path.Dir(helperFile)); os.IsNotExist(err) { // do not try to do something if ".../usr/sbin" is not exists return nil } pf, err := os.Create(helperFile) if err != nil { return err } defer pf.Close() if _, err := pf.Write(helper); err != nil { return err } if err := pf.Chmod(0755); err != nil { return err } return nil } debos-1.1.5/tests/000077500000000000000000000000001476454235000137665ustar00rootroot00000000000000debos-1.1.5/tests/apertis/000077500000000000000000000000001476454235000154355ustar00rootroot00000000000000debos-1.1.5/tests/apertis/apertis-archive-keyring.gpg000066400000000000000000000022171476454235000226720ustar00rootroot00000000000000 T}>MƧY(e(X$2]2tNEbX8oN U5۹!D7 ;n"7{>Y<x e'^^tΒXJ3"uZj %SWL=zDjgߞs7\ĸ̥N <|"N $Vmf:2HwTHbZ\d}Ih 3dvzSbC OI2BRe#n\P ФۺEX-M-h 59vXNҍAu}qY  8giC j6cbZV=#-ߙ4a4ؠԆQzIM4%}aC o,L zlɘZˈ_k5J '. vA"_hOKsڤNX9ڣ =}u_VQew̆ļ(Y3d"Cm]rVCApertis Archive Automatic Signing Key 7 !W8    ,NV twwƒ\-XX"ꫜfh:G~ڟj}DjP[{g|i5qkgCKӬܷ%G}^넹ȭPw(._H\ˁ0EX-CF4L.KIvwv("$.>LgFJS٨]eqf[db,bX l p'Nf~) :ŚrWW,5aG-JdvFB06]7OdRwOKk2./ 32 E ?v5S U:Ldebos-1.1.5/tests/apertis/test.yaml000066400000000000000000000010641476454235000173010ustar00rootroot00000000000000--- # Test building a non-debian distribution such as apertis to ensure # bootstrapping suites that debootstrap won't internally know about works {{- $architecture := or .architecture "amd64"}} architecture: {{$architecture}} actions: - action: debootstrap suite: v2022 components: - target mirror: https://repositories.apertis.org/apertis/ variant: minbase keyring-package: apertis-archive-keyring keyring-file: apertis-archive-keyring.gpg - action: apt description: Install some base packages packages: - procps debos-1.1.5/tests/arch/000077500000000000000000000000001476454235000147035ustar00rootroot00000000000000debos-1.1.5/tests/arch/mirrorlist000066400000000000000000000002651476454235000170370ustar00rootroot00000000000000Server = https://geo.mirror.pkgbuild.com/$repo/os/$arch Server = https://mirror.rackspace.com/archlinux/$repo/os/$arch Server = https://mirror.leaseweb.net/archlinux/$repo/os/$arch debos-1.1.5/tests/arch/pacman.conf000066400000000000000000000054311476454235000170140ustar00rootroot00000000000000# # /etc/pacman.conf # # See the pacman.conf(5) manpage for option and repository directives # # GENERAL OPTIONS # [options] # The following paths are commented out with their default values listed. # If you wish to use different paths, uncomment and update the paths. #RootDir = / #DBPath = /var/lib/pacman/ #CacheDir = /var/cache/pacman/pkg/ #LogFile = /var/log/pacman.log #GPGDir = /etc/pacman.d/gnupg/ #HookDir = /etc/pacman.d/hooks/ HoldPkg = pacman glibc #XferCommand = /usr/bin/curl -L -C - -f -o %o %u #XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u #CleanMethod = KeepInstalled Architecture = auto # Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup #IgnorePkg = #IgnoreGroup = #NoUpgrade = #NoExtract = # Misc options #UseSyslog #Color #NoProgressBar CheckSpace #VerbosePkgLists #ParallelDownloads = 5 # By default, pacman accepts packages signed by keys that its local keyring # trusts (see pacman-key and its man page), as well as unsigned packages. SigLevel = Required DatabaseOptional LocalFileSigLevel = Optional #RemoteFileSigLevel = Required # NOTE: You must run `pacman-key --init` before first using pacman; the local # keyring can then be populated with the keys of all official Arch Linux # packagers with `pacman-key --populate archlinux`. # # REPOSITORIES # - can be defined here or included from another file # - pacman will search repositories in the order defined here # - local/custom mirrors can be added here or in separate files # - repositories listed first will take precedence when packages # have identical names, regardless of version number # - URLs will have $repo replaced by the name of the current repo # - URLs will have $arch replaced by the name of the architecture # # Repository entries are of the format: # [repo-name] # Server = ServerName # Include = IncludePath # # The header [repo-name] is crucial - it must be present and # uncommented to enable the repo. # # The testing repositories are disabled by default. To enable, uncomment the # repo name header and Include lines. You can add preferred servers immediately # after the header, and they will be used before the default mirrors. #[testing] #Include = /etc/pacman.d/mirrorlist [core] Include = /etc/pacman.d/mirrorlist [extra] Include = /etc/pacman.d/mirrorlist # If you want to run 32 bit applications on your x86_64 system, # enable the multilib repositories as required here. #[multilib-testing] #Include = /etc/pacman.d/mirrorlist #[multilib] #Include = /etc/pacman.d/mirrorlist # An example of a custom package repository. See the pacman manpage for # tips on creating your own repositories. #[custom] #SigLevel = Required DatabaseTrustedOnly #SigLevel = Optional TrustAll #Server = file:///home/custompkgs debos-1.1.5/tests/arch/test.yaml000066400000000000000000000003771476454235000165550ustar00rootroot00000000000000--- {{- $architecture := or .architecture "amd64"}} architecture: {{$architecture}} actions: - action: pacstrap config: pacman.conf mirror: mirrorlist - action: pacman description: Install some base packages packages: - procps debos-1.1.5/tests/debian/000077500000000000000000000000001476454235000152105ustar00rootroot00000000000000debos-1.1.5/tests/debian/test.yaml000066400000000000000000000007631476454235000170610ustar00rootroot00000000000000--- {{- $architecture := or .architecture "amd64"}} {{- $suite := or .suite "bookworm"}} {{- $tool := or .tool "debootstrap" }} architecture: {{$architecture}} actions: - action: {{ $tool }} suite: {{ $suite }} variant: minbase merged-usr: true {{- if eq $tool "mmdebstrap" }} dpkg-opts: - path-exclude=/usr/share/man/* apt-opts: - Apt::Install-Recommends "true" {{- end }} - action: apt description: Install some base packages packages: - procps debos-1.1.5/tests/msdos/000077500000000000000000000000001476454235000151135ustar00rootroot00000000000000debos-1.1.5/tests/msdos/expected.json000066400000000000000000000025201476454235000176060ustar00rootroot00000000000000{ "partitiontable": { "label": "dos", "id": "0xdeadbeef", "device": "test.img", "unit": "sectors", "sectorsize": 512, "partitions": [ { "node": "test.img1", "start": 1, "size": 500000, "type": "83" },{ "node": "test.img2", "start": 500001, "size": 3406250, "type": "83" },{ "node": "test.img3", "start": 3906251, "size": 1953125, "type": "83" },{ "node": "test.img4", "start": 5859376, "size": 9765624, "type": "f" },{ "node": "test.img5", "start": 5859377, "size": 1953124, "type": "83" },{ "node": "test.img6", "start": 7812502, "size": 1953124, "type": "83" },{ "node": "test.img7", "start": 9765627, "size": 1953124, "type": "83" },{ "node": "test.img8", "start": 11718752, "size": 1953124, "type": "83" },{ "node": "test.img9", "start": 13671877, "size": 1953123, "type": "83" } ] } } debos-1.1.5/tests/msdos/test.yaml000066400000000000000000000021711476454235000167570ustar00rootroot00000000000000architecture: amd64 actions: - action: image-partition description: Partition the image imagename: test.img imagesize: 8G partitiontype: msdos diskid: deadbeef mountpoints: - mounpoint: / partition: system partitions: - name: boot fs: ext2 start: 0% end: 256M - name: system fs: ext4 start: 256m end: 2G - name: data0 fs: ext4 start: 2G end: 3G - name: data1 fs: ext4 start: 3G end: 4G - name: data2 fs: ext4 start: 4G end: 5G - name: data3 fs: ext4 start: 5G end: 6G - name: data4 fs: ext4 start: 6G end: 7G - name: data5 fs: ext4 start: 7G end: 8G - action: run chroot: false command: > cd ${ARTIFACTDIR}; sfdisk -J test.img | tee ${RECIPEDIR}/actual.json - action: run description: Compare expected and actual chroot: false command: bash -c 'diff -u <(jq . ${RECIPEDIR}/expected.json) <(jq . ${RECIPEDIR}/actual.json)' debos-1.1.5/tests/overlay-non-existent-destination/000077500000000000000000000000001476454235000224175ustar00rootroot00000000000000debos-1.1.5/tests/overlay-non-existent-destination/overlay-non-existent-destination.yaml000066400000000000000000000010731476454235000317350ustar00rootroot00000000000000architecture: amd64 actions: # This overlay action is expected to error out here because the destination # doesn't exist in the filesystem. - action: overlay description: Overlay file into a non-existent destination source: overlay-non-existent-destination.yaml destination: /this/path/does/not/exist/overlay-non-existent-destination.yaml - action: run description: Check if path exists command: "[ -e /this/path/does/not/exist/overlay-non-existent-destination.yaml ] || exit 1" - action: run postprocess: true command: echo Test debos-1.1.5/tests/partitioning-sector-size/000077500000000000000000000000001476454235000207425ustar00rootroot00000000000000debos-1.1.5/tests/partitioning-sector-size/test.yaml000066400000000000000000000014351476454235000226100ustar00rootroot00000000000000{{ $sectorsize := or .sectorsize 512 }} architecture: amd64 sectorsize: {{ $sectorsize }} actions: - action: image-partition description: Create a {{ $sectorsize }} sector size image imagename: test.img imagesize: 10M partitiontype: gpt partitions: - name: system fs: ext4 start: 2m end: 100% # # Would more elegant to check the sector size with # sfdisk --sector-size {{ $sectorsize }} -J test.img | jq .partitiontable.sectorsize # once we have an updated sfdisk that supports this option see: # https://www.kernel.org/pub/linux/utils/util-linux/v2.41/v2.41-ReleaseNotes - action: run description: Print partition table chroot: false command: bash -c 'PARTED_SECTOR_SIZE={{ $sectorsize }} parted -s ${ARTIFACTDIR}/test.img print' debos-1.1.5/tests/partitioning/000077500000000000000000000000001476454235000164755ustar00rootroot00000000000000debos-1.1.5/tests/partitioning/expected.json000066400000000000000000000043631476454235000211770ustar00rootroot00000000000000{ "partitiontable": { "label": "gpt", "id": "12345678-1234-1234-1234-123456789012", "device": "test.img", "unit": "sectors", "firstlba": 34, "lastlba": 15624966, "sectorsize": 512, "partitions": [ { "node": "test.img1", "start": 34, "size": 499967, "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4", "uuid": "7BA1B99D-7942-450A-921B-F394A0A065AF", "name": "boot" },{ "node": "test.img2", "start": 500001, "size": 3406250, "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4", "uuid": "87654321-1234-5678-9012-345678901234", "name": "system" },{ "node": "test.img3", "start": 3906251, "size": 1953125, "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4", "uuid": "713CA942-5DB4-41F8-A365-E8F1CA967D7A", "name": "data0" },{ "node": "test.img4", "start": 5859376, "size": 1953125, "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4", "uuid": "05D13CA0-7EA3-48D2-9824-8C94263E5692", "name": "data1" },{ "node": "test.img5", "start": 7812501, "size": 1953125, "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4", "uuid": "C86924DB-49D2-47D8-BBB7-BB56426A8934", "name": "data2" },{ "node": "test.img6", "start": 9765626, "size": 1953125, "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4", "uuid": "B0F4D633-B66A-44B6-933A-E77A0144B275", "name": "data3" },{ "node": "test.img7", "start": 11718751, "size": 1953125, "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4", "uuid": "8BDAE85E-E473-4D77-B09D-43E1D6F741DA", "name": "data4" },{ "node": "test.img8", "start": 13671876, "size": 1953091, "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4", "uuid": "F9272482-3F7E-44E3-8D99-C24023EB3317", "name": "data5" } ] } } debos-1.1.5/tests/partitioning/test.yaml000066400000000000000000000023121476454235000203360ustar00rootroot00000000000000architecture: amd64 actions: - action: image-partition description: Partition the image imagename: test.img imagesize: 8G partitiontype: gpt diskid: 12345678-1234-1234-1234-123456789012 mountpoints: - mounpoint: / partition: system partitions: - name: boot fs: ext2 start: 0% end: 256M - name: system fs: ext4 start: 256m end: 2G partuuid: 87654321-1234-5678-9012-345678901234 - name: data0 fs: ext4 start: 2G end: 3G - name: data1 fs: ext4 start: 3G end: 4G - name: data2 fs: ext4 start: 4G end: 5G - name: data3 fs: ext4 start: 5G end: 6G - name: data4 fs: ext4 start: 6G end: 7G - name: data5 fs: ext4 start: 7G end: 8G - action: run chroot: false command: > cd ${ARTIFACTDIR}; sfdisk -J test.img | tee ${RECIPEDIR}/actual.json - action: run description: Compare expected and actual chroot: false command: bash -c 'diff -u <(jq . ${RECIPEDIR}/expected.json) <(jq . ${RECIPEDIR}/actual.json)' debos-1.1.5/tests/raw/000077500000000000000000000000001476454235000145575ustar00rootroot00000000000000debos-1.1.5/tests/raw/blobs/000077500000000000000000000000001476454235000156605ustar00rootroot00000000000000debos-1.1.5/tests/raw/blobs/blob1000066400000000000000000000000101476454235000165710ustar00rootroot00000000000000ް] debos-1.1.5/tests/raw/blobs/blob2000066400000000000000000000000101476454235000165720ustar00rootroot00000000000000ް] debos-1.1.5/tests/raw/blobs/blob3000066400000000000000000000000101476454235000165730ustar00rootroot00000000000000ް] debos-1.1.5/tests/raw/test.yaml000066400000000000000000000025351476454235000164270ustar00rootroot00000000000000{{ $sectorsize := or .sectorsize 512 }} architecture: amd64 sectorsize: {{ $sectorsize }} actions: - action: image-partition description: Partition the image imagename: test.img imagesize: 2M partitiontype: msdos - action: overlay source: blobs - action: raw description: Copying blob1 origin: filesystem source: blob1 offset: {{ sector 40 }} - action: raw description: Copying blob2 origin: filesystem source: blob2 offset: 42s - action: raw description: Copying blob3 origin: filesystem source: blob3 # Far enough to not overwrite previous one offset: 1048576 - action: run chroot: false command: > cd ${ARTIFACTDIR}; dd if=test.img bs=1 skip=$(( 40 * {{ $sectorsize }} )) count=8 of=blob1.res; dd if=test.img bs=1 skip=$(( 42 * {{ $sectorsize }} )) count=8 of=blob2.res; dd if=test.img bs=1 skip=1048576 count=8 of=blob3.res - action: run chroot: false description: Verifying blob1 command: diff "${RECIPEDIR}"/blobs/blob1 "${ARTIFACTDIR}"/blob1.res; - action: run chroot: false description: Verifying blob2 command: diff "${RECIPEDIR}"/blobs/blob2 "${ARTIFACTDIR}"/blob2.res; - action: run chroot: false description: Verifying blob3 command: diff "${RECIPEDIR}"/blobs/blob3 "${ARTIFACTDIR}"/blob3.res; debos-1.1.5/tests/recipes/000077500000000000000000000000001476454235000154205ustar00rootroot00000000000000debos-1.1.5/tests/recipes/separatedirs/000077500000000000000000000000001476454235000201065ustar00rootroot00000000000000debos-1.1.5/tests/recipes/separatedirs/base.tar.gz000066400000000000000000000011671476454235000221540ustar00rootroot00000000000000_\k0 n BiX c)mINܸYY ڍ}rt=0:d$"x'82=H^_ZiV.7JqRq:oF'#yX|':qh<{0xIOg4O=F`-䪶i4|9AG`*/ݚim-U*(CtA5mYfV:H4h)K8w!yIm| +VB YF- ] ( z3 ͮJA.tSײ ( ]䢜l6"XbFR-r]Vԋ9^/?\/?]/Wl/t?Z+V͖eûaEm-z| Ѣ_O"/ui+7q;[}ٝOYt`UZ꼘MpiZ a'l)5M٭_vw^ax]'mf>W~ûW~ûW~û test.a - action: run description: escaping via heredoc chroot: false command: |- cat << 'EOF' > test.b {{ $escaped }} EOF - action: run description: validate escaping chroot: false command: diff -u test.a test.b - action: run description: sector custom function chroot: false command: test "8s" = {{ sector 8 }} - action: run description: sprig string functions chroot: false command: test "HELLOHELLO" = {{ "hello" | upper | repeat 2 }} - action: run description: sprig integer math functions chroot: false command: test 42 -eq {{ mul 4 10 | add 2 }} - action: run description: sprig conversion function chroot: false command: test 42 -eq {{ 0x2a | int }} # expected.uuid5 contains value generated with $namespace and $text1 {{ $namespace := or .namespace "6c9d1418-8fa9-11ee-b4ca-325096b39f47" }} {{ $text1 := or .text1 "ver1.0" }} {{ $text2 := or .text2 "ver2.0" }} {{ $expected := or .expected "3deb2a0e-692c-5a37-93c0-1123948bf5ae" }} - action: run description: test uuid5 generate same value when input is same chroot: false command: test {{ uuid5 $namespace $text1 }} = {{ $expected }} - action: run description: test uuid5 generate different value when input is different chroot: false command: test {{ uuid5 $namespace $text2 }} != {{ $expected }}